drivers/net: Kill now superfluous ->last_rx stores.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
f8ef6e44
YG
62#define DRV_MODULE_VERSION "1.45.22"
63#define DRV_MODULE_RELDATE "2008/09/09"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
98
99enum bnx2x_board_type {
100 BCM57710 = 0,
34f80b04
EG
101 BCM57711 = 1,
102 BCM57711E = 2,
a2fbb9ea
ET
103};
104
34f80b04 105/* indexed by board_type, above */
53a10565 106static struct {
a2fbb9ea
ET
107 char *name;
108} board_info[] __devinitdata = {
34f80b04
EG
109 { "Broadcom NetXtreme II BCM57710 XGb" },
110 { "Broadcom NetXtreme II BCM57711 XGb" },
111 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
112};
113
34f80b04 114
a2fbb9ea
ET
115static const struct pci_device_id bnx2x_pci_tbl[] = {
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
122 { 0 }
123};
124
125MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127/****************************************************************************
128* General service functions
129****************************************************************************/
130
131/* used only at init
132 * locking is done by mcp
133 */
134static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135{
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139 PCICFG_VENDOR_ID_OFFSET);
140}
141
a2fbb9ea
ET
142static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143{
144 u32 val;
145
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150
151 return val;
152}
a2fbb9ea
ET
153
154static const u32 dmae_reg_go_c[] = {
155 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159};
160
161/* copy command into DMAE command memory and set DMAE command go */
162static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163 int idx)
164{
165 u32 cmd_offset;
166 int i;
167
168 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
ad8d3948
EG
172 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
174 }
175 REG_WR(bp, dmae_reg_go_c[idx], 1);
176}
177
ad8d3948
EG
178void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179 u32 len32)
a2fbb9ea 180{
ad8d3948 181 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
183 int cnt = 200;
184
185 if (!bp->dmae_ready) {
186 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
189 " using indirect\n", dst_addr, len32);
190 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191 return;
192 }
193
194 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
195
196 memset(dmae, 0, sizeof(struct dmae_command));
197
198 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201#ifdef __BIG_ENDIAN
202 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203#else
204 DMAE_CMD_ENDIANITY_DW_SWAP |
205#endif
34f80b04
EG
206 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
208 dmae->src_addr_lo = U64_LO(dma_addr);
209 dmae->src_addr_hi = U64_HI(dma_addr);
210 dmae->dst_addr_lo = dst_addr >> 2;
211 dmae->dst_addr_hi = 0;
212 dmae->len = len32;
213 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 215 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 216
ad8d3948 217 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
218 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
219 "dst_addr [%x:%08x (%08x)]\n"
220 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
221 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 224 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
225 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
227
228 *wb_comp = 0;
229
34f80b04 230 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
231
232 udelay(5);
ad8d3948
EG
233
234 while (*wb_comp != DMAE_COMP_VAL) {
235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
ad8d3948 237 if (!cnt) {
a2fbb9ea
ET
238 BNX2X_ERR("dmae timeout!\n");
239 break;
240 }
ad8d3948 241 cnt--;
12469401
YG
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
a2fbb9ea 247 }
ad8d3948
EG
248
249 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
250}
251
c18487ee 252void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 253{
ad8d3948 254 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 255 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
256 int cnt = 200;
257
258 if (!bp->dmae_ready) {
259 u32 *data = bnx2x_sp(bp, wb_data[0]);
260 int i;
261
262 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
263 " using indirect\n", src_addr, len32);
264 for (i = 0; i < len32; i++)
265 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266 return;
267 }
268
269 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
270
271 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272 memset(dmae, 0, sizeof(struct dmae_command));
273
274 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277#ifdef __BIG_ENDIAN
278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279#else
280 DMAE_CMD_ENDIANITY_DW_SWAP |
281#endif
34f80b04
EG
282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
284 dmae->src_addr_lo = src_addr >> 2;
285 dmae->src_addr_hi = 0;
286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->len = len32;
289 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 291 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 292
ad8d3948 293 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
294 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
295 "dst_addr [%x:%08x (%08x)]\n"
296 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
297 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
300
301 *wb_comp = 0;
302
34f80b04 303 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
304
305 udelay(5);
ad8d3948
EG
306
307 while (*wb_comp != DMAE_COMP_VAL) {
308
ad8d3948 309 if (!cnt) {
a2fbb9ea
ET
310 BNX2X_ERR("dmae timeout!\n");
311 break;
312 }
ad8d3948 313 cnt--;
12469401
YG
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
a2fbb9ea 319 }
ad8d3948 320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
323
324 mutex_unlock(&bp->dmae_mutex);
325}
326
327/* used only for slowpath so not inlined */
328static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329{
330 u32 wb_write[2];
331
332 wb_write[0] = val_hi;
333 wb_write[1] = val_lo;
334 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 335}
a2fbb9ea 336
ad8d3948
EG
337#ifdef USE_WB_RD
338static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339{
340 u32 wb_data[2];
341
342 REG_RD_DMAE(bp, reg, wb_data, 2);
343
344 return HILO_U64(wb_data[0], wb_data[1]);
345}
346#endif
347
a2fbb9ea
ET
348static int bnx2x_mc_assert(struct bnx2x *bp)
349{
a2fbb9ea 350 char last_idx;
34f80b04
EG
351 int i, rc = 0;
352 u32 row0, row1, row2, row3;
353
354 /* XSTORM */
355 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 if (last_idx)
358 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360 /* print the asserts */
361 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364 XSTORM_ASSERT_LIST_OFFSET(i));
365 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374 " 0x%08x 0x%08x 0x%08x\n",
375 i, row3, row2, row1, row0);
376 rc++;
377 } else {
378 break;
379 }
380 }
381
382 /* TSTORM */
383 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 if (last_idx)
386 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388 /* print the asserts */
389 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392 TSTORM_ASSERT_LIST_OFFSET(i));
393 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402 " 0x%08x 0x%08x 0x%08x\n",
403 i, row3, row2, row1, row0);
404 rc++;
405 } else {
406 break;
407 }
408 }
409
410 /* CSTORM */
411 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 if (last_idx)
414 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416 /* print the asserts */
417 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420 CSTORM_ASSERT_LIST_OFFSET(i));
421 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430 " 0x%08x 0x%08x 0x%08x\n",
431 i, row3, row2, row1, row0);
432 rc++;
433 } else {
434 break;
435 }
436 }
437
438 /* USTORM */
439 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 if (last_idx)
442 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444 /* print the asserts */
445 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448 USTORM_ASSERT_LIST_OFFSET(i));
449 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i) + 4);
451 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 8);
453 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458 " 0x%08x 0x%08x 0x%08x\n",
459 i, row3, row2, row1, row0);
460 rc++;
461 } else {
462 break;
a2fbb9ea
ET
463 }
464 }
34f80b04 465
a2fbb9ea
ET
466 return rc;
467}
c14423fe 468
a2fbb9ea
ET
469static void bnx2x_fw_dump(struct bnx2x *bp)
470{
471 u32 mark, offset;
472 u32 data[9];
473 int word;
474
475 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
476 mark = ((mark + 0x3) & ~0x3);
477 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
478
479 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480 for (word = 0; word < 8; word++)
481 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482 offset + 4*word));
483 data[8] = 0x0;
49d66772 484 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
485 }
486 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487 for (word = 0; word < 8; word++)
488 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489 offset + 4*word));
490 data[8] = 0x0;
49d66772 491 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
492 }
493 printk("\n" KERN_ERR PFX "end of fw dump\n");
494}
495
496static void bnx2x_panic_dump(struct bnx2x *bp)
497{
498 int i;
499 u16 j, start, end;
500
66e855f3
YG
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
a2fbb9ea
ET
504 BNX2X_ERR("begin crash dump -----------------\n");
505
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
517 fp->rx_bd_prod, fp->rx_bd_cons,
518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
522 " *sb_u_idx(%x) bd data(%x,%x)\n",
523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
528
529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531 for (j = start; j < end; j++) {
532 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535 sw_bd->skb, sw_bd->first_bd);
536 }
537
538 start = TX_BD(fp->tx_bd_cons - 10);
539 end = TX_BD(fp->tx_bd_cons + 254);
540 for (j = start; j < end; j++) {
541 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545 }
546
547 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549 for (j = start; j < end; j++) {
550 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
555 }
556
3196a88a
EG
557 start = RX_SGE(fp->rx_sge_prod);
558 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
559 for (j = start; j < end; j++) {
560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
564 j, rx_sge[1], rx_sge[0], sw_page->page);
565 }
566
a2fbb9ea
ET
567 start = RCQ_BD(fp->rx_comp_cons - 10);
568 end = RCQ_BD(fp->rx_comp_cons + 503);
569 for (j = start; j < end; j++) {
570 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573 j, cqe[0], cqe[1], cqe[2], cqe[3]);
574 }
575 }
576
49d66772
ET
577 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
578 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 579 " spq_prod_idx(%u)\n",
49d66772 580 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
581 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
34f80b04 583 bnx2x_fw_dump(bp);
a2fbb9ea
ET
584 bnx2x_mc_assert(bp);
585 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
586}
587
615f8fd9 588static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 589{
34f80b04 590 int port = BP_PORT(bp);
a2fbb9ea
ET
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595 if (msix) {
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 } else {
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 604
615f8fd9
ET
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
607
608 REG_WR(bp, addr, val);
609
a2fbb9ea
ET
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611 }
612
615f8fd9 613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
614 val, port, addr, msix);
615
616 REG_WR(bp, addr, val);
34f80b04
EG
617
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
620 if (IS_E1HMF(bp)) {
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 if (bp->port.pmf)
623 /* enable nig attention */
624 val |= 0x0100;
625 } else
626 val = 0xffff;
627
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630 }
a2fbb9ea
ET
631}
632
615f8fd9 633static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 634{
34f80b04 635 int port = BP_PORT(bp);
a2fbb9ea
ET
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
638
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
646
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650}
651
f8ef6e44 652static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 653{
a2fbb9ea
ET
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i;
656
34f80b04 657 /* disable interrupt handling */
a2fbb9ea 658 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
659 if (disable_hw)
660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp);
a2fbb9ea
ET
662
663 /* make sure all ISRs are done */
664 if (msix) {
665 for_each_queue(bp, i)
666 synchronize_irq(bp->msix_table[i].vector);
667
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp->msix_table[i].vector);
670 } else
671 synchronize_irq(bp->pdev->irq);
672
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
736/* free skb in the packet ring at pos idx
737 * return idx of last bd freed
738 */
739static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 u16 idx)
741{
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
34f80b04 745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
746 int nbd;
747
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
749 idx, tx_buf, skb);
750
751 /* unmap first bd */
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 758 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
759#ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 761 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
762 bnx2x_panic();
763 }
764#endif
765
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
768 if (nbd)
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
774 if (--nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 if (--nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781 }
782 }
783
784 /* now free frags */
785 while (nbd > 0) {
786
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 if (--nbd)
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 }
794
795 /* release skb */
53e5e96e 796 WARN_ON(!skb);
a2fbb9ea
ET
797 dev_kfree_skb(skb);
798 tx_buf->first_bd = 0;
799 tx_buf->skb = NULL;
800
34f80b04 801 return new_cons;
a2fbb9ea
ET
802}
803
34f80b04 804static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 805{
34f80b04
EG
806 s16 used;
807 u16 prod;
808 u16 cons;
a2fbb9ea 809
34f80b04 810 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
813
34f80b04
EG
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 817
34f80b04 818#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
819 WARN_ON(used < 0);
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 822#endif
a2fbb9ea 823
34f80b04 824 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
825}
826
827static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828{
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831 int done = 0;
832
833#ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
835 return;
836#endif
837
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
840
841 while (sw_cons != hw_cons) {
842 u16 pkt_cons;
843
844 pkt_cons = TX_BD(sw_cons);
845
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
34f80b04 848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
849 hw_cons, sw_cons, pkt_cons);
850
34f80b04 851/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
852 rmb();
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854 }
855*/
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857 sw_cons++;
858 done++;
859
860 if (done == work)
861 break;
862 }
863
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
866
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
871 */
872 smp_mb();
873
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
876
877 netif_tx_lock(bp->dev);
878
879 if (netif_queue_stopped(bp->dev) &&
da5a662a 880 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
883
884 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
885 }
886}
887
3196a88a 888
a2fbb9ea
ET
889static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
891{
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
34f80b04 896 DP(BNX2X_MSG_SP,
a2fbb9ea 897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
900
901 bp->spq_left++;
902
34f80b04 903 if (FP_IDX(fp)) {
a2fbb9ea
ET
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908 cid);
909 fp->state = BNX2X_FP_STATE_OPEN;
910 break;
911
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_HALTED;
916 break;
917
918 default:
34f80b04
EG
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
921 break;
a2fbb9ea 922 }
34f80b04 923 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
924 return;
925 }
c14423fe 926
a2fbb9ea
ET
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
931 break;
932
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
937 break;
938
a2fbb9ea 939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
942 break;
943
3196a88a 944
a2fbb9ea 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 948 bp->set_mac_pending = 0;
a2fbb9ea
ET
949 break;
950
49d66772 951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
953 break;
954
a2fbb9ea 955 default:
34f80b04 956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 957 command, bp->state);
34f80b04 958 break;
a2fbb9ea 959 }
34f80b04 960 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
961}
962
7a9b2557
VZ
963static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
965{
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970 /* Skip "next page" elements */
971 if (!page)
972 return;
973
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978 sw_buf->page = NULL;
979 sge->addr_hi = 0;
980 sge->addr_lo = 0;
981}
982
983static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
985{
986 int i;
987
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
990}
991
992static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998 dma_addr_t mapping;
999
1000 if (unlikely(page == NULL))
1001 return -ENOMEM;
1002
1003 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
8d8bb39b 1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 return -ENOMEM;
1008 }
1009
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016 return 0;
1017}
1018
a2fbb9ea
ET
1019static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1021{
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025 dma_addr_t mapping;
1026
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1029 return -ENOMEM;
1030
437cf2f1 1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1032 PCI_DMA_FROMDEVICE);
8d8bb39b 1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1034 dev_kfree_skb(skb);
1035 return -ENOMEM;
1036 }
1037
1038 rx_buf->skb = skb;
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044 return 0;
1045}
1046
1047/* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1051 */
1052static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1054{
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1065
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1070}
1071
7a9b2557
VZ
1072static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073 u16 idx)
1074{
1075 u16 last_max = fp->last_max_sge;
1076
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1079}
1080
1081static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082{
1083 int i, j;
1084
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1087
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1090 idx--;
1091 }
1092 }
1093}
1094
1095static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1097{
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1101 BCM_PAGE_SHIFT;
1102 u16 last_max, last_elem, first_elem;
1103 u16 delta = 0;
1104 u16 i;
1105
1106 if (!sge_len)
1107 return;
1108
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1126 last_elem++;
1127
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1131 break;
1132
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1135 }
1136
1137 if (delta > 0) {
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1141 }
1142
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1146}
1147
1148static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149{
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
33471629
EG
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159}
1160
1161static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1163{
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168 dma_addr_t mapping;
1169
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189#ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191#ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193#else
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195#endif
1196 fp->tpa_queue_used);
1197#endif
1198}
1199
1200static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1203 u16 cqe_idx)
1204{
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1206 struct page *sge;
1207 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208 u32 i, frag_len, frag_size, pages;
1209 int err;
1210 int j;
1211
1212 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215 /* This is needed in order to enable forwarding support */
1216 if (frag_size)
1217 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218 max(frag_size, (u32)len_on_bd));
1219
1220#ifdef BNX2X_STOP_ON_ERROR
1221 if (pages > 8*PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 pages, cqe_idx);
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1226 bnx2x_panic();
1227 return -EINVAL;
1228 }
1229#endif
1230
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1239 sge = rx_pg->page;
1240 old_rx_pg = *rx_pg;
1241
1242 /* If we fail to allocate a substitute page, we simply stop
1243 where we are and drop the whole packet */
1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245 if (unlikely(err)) {
66e855f3 1246 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1247 return err;
1248 }
1249
1250 /* Unmap the page as we r going to pass it to the stack */
1251 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254 /* Add one frag and update the appropriate fields in the skb */
1255 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257 skb->data_len += frag_len;
1258 skb->truesize += frag_len;
1259 skb->len += frag_len;
1260
1261 frag_size -= frag_len;
1262 }
1263
1264 return 0;
1265}
1266
1267static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272 struct sk_buff *skb = rx_buf->skb;
1273 /* alloc new skb */
1274 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276 /* Unmap skb in the pool anyway, as we are going to change
1277 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278 fails. */
1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1280 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1281
7a9b2557 1282 if (likely(new_skb)) {
66e855f3
YG
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
7a9b2557
VZ
1285
1286 prefetch(skb);
1287 prefetch(((char *)(skb)) + 128);
1288
7a9b2557
VZ
1289#ifdef BNX2X_STOP_ON_ERROR
1290 if (pad + len > bp->rx_buf_size) {
1291 BNX2X_ERR("skb_put is about to fail... "
1292 "pad %d len %d rx_buf_size %d\n",
1293 pad, len, bp->rx_buf_size);
1294 bnx2x_panic();
1295 return;
1296 }
1297#endif
1298
1299 skb_reserve(skb, pad);
1300 skb_put(skb, len);
1301
1302 skb->protocol = eth_type_trans(skb, bp->dev);
1303 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305 {
1306 struct iphdr *iph;
1307
1308 iph = (struct iphdr *)skb->data;
1309 iph->check = 0;
1310 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311 }
1312
1313 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314 &cqe->fast_path_cqe, cqe_idx)) {
1315#ifdef BCM_VLAN
1316 if ((bp->vlgrp != NULL) &&
1317 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318 PARSING_FLAGS_VLAN))
1319 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320 le16_to_cpu(cqe->fast_path_cqe.
1321 vlan_tag));
1322 else
1323#endif
1324 netif_receive_skb(skb);
1325 } else {
1326 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327 " - dropping packet!\n");
1328 dev_kfree_skb(skb);
1329 }
1330
7a9b2557
VZ
1331
1332 /* put new skb in bin */
1333 fp->tpa_pool[queue].skb = new_skb;
1334
1335 } else {
66e855f3 1336 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1337 DP(NETIF_MSG_RX_STATUS,
1338 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1339 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1340 }
1341
1342 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1343}
1344
1345static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346 struct bnx2x_fastpath *fp,
1347 u16 bd_prod, u16 rx_comp_prod,
1348 u16 rx_sge_prod)
1349{
1350 struct tstorm_eth_rx_producers rx_prods = {0};
1351 int i;
1352
1353 /* Update producers */
1354 rx_prods.bd_prod = bd_prod;
1355 rx_prods.cqe_prod = rx_comp_prod;
1356 rx_prods.sge_prod = rx_sge_prod;
1357
1358 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361 ((u32 *)&rx_prods)[i]);
1362
1363 DP(NETIF_MSG_RX_STATUS,
1364 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1365 bd_prod, rx_comp_prod, rx_sge_prod);
1366}
1367
a2fbb9ea
ET
1368static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1369{
1370 struct bnx2x *bp = fp->bp;
34f80b04 1371 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1372 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1373 int rx_pkt = 0;
1374
1375#ifdef BNX2X_STOP_ON_ERROR
1376 if (unlikely(bp->panic))
1377 return 0;
1378#endif
1379
34f80b04
EG
1380 /* CQ "next element" is of the size of the regular element,
1381 that's why it's ok here */
a2fbb9ea
ET
1382 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384 hw_comp_cons++;
1385
1386 bd_cons = fp->rx_bd_cons;
1387 bd_prod = fp->rx_bd_prod;
34f80b04 1388 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1389 sw_comp_cons = fp->rx_comp_cons;
1390 sw_comp_prod = fp->rx_comp_prod;
1391
1392 /* Memory barrier necessary as speculative reads of the rx
1393 * buffer can be ahead of the index in the status block
1394 */
1395 rmb();
1396
1397 DP(NETIF_MSG_RX_STATUS,
1398 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1399 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1400
1401 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1402 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1403 struct sk_buff *skb;
1404 union eth_rx_cqe *cqe;
34f80b04
EG
1405 u8 cqe_fp_flags;
1406 u16 len, pad;
a2fbb9ea
ET
1407
1408 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409 bd_prod = RX_BD(bd_prod);
1410 bd_cons = RX_BD(bd_cons);
1411
1412 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1413 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1414
a2fbb9ea 1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1418 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1421
1422 /* is this a slowpath msg? */
34f80b04 1423 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1424 bnx2x_sp_event(fp, cqe);
1425 goto next_cqe;
1426
1427 /* this is an rx packet */
1428 } else {
1429 rx_buf = &fp->rx_buf_ring[bd_cons];
1430 skb = rx_buf->skb;
a2fbb9ea
ET
1431 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432 pad = cqe->fast_path_cqe.placement_offset;
1433
7a9b2557
VZ
1434 /* If CQE is marked both TPA_START and TPA_END
1435 it is a non-TPA CQE */
1436 if ((!fp->disable_tpa) &&
1437 (TPA_TYPE(cqe_fp_flags) !=
1438 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1439 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1440
1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442 DP(NETIF_MSG_RX_STATUS,
1443 "calling tpa_start on queue %d\n",
1444 queue);
1445
1446 bnx2x_tpa_start(fp, queue, skb,
1447 bd_cons, bd_prod);
1448 goto next_rx;
1449 }
1450
1451 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452 DP(NETIF_MSG_RX_STATUS,
1453 "calling tpa_stop on queue %d\n",
1454 queue);
1455
1456 if (!BNX2X_RX_SUM_FIX(cqe))
1457 BNX2X_ERR("STOP on none TCP "
1458 "data\n");
1459
1460 /* This is a size of the linear data
1461 on this skb */
1462 len = le16_to_cpu(cqe->fast_path_cqe.
1463 len_on_bd);
1464 bnx2x_tpa_stop(bp, fp, queue, pad,
1465 len, cqe, comp_ring_cons);
1466#ifdef BNX2X_STOP_ON_ERROR
1467 if (bp->panic)
1468 return -EINVAL;
1469#endif
1470
1471 bnx2x_update_sge_prod(fp,
1472 &cqe->fast_path_cqe);
1473 goto next_cqe;
1474 }
1475 }
1476
a2fbb9ea
ET
1477 pci_dma_sync_single_for_device(bp->pdev,
1478 pci_unmap_addr(rx_buf, mapping),
1479 pad + RX_COPY_THRESH,
1480 PCI_DMA_FROMDEVICE);
1481 prefetch(skb);
1482 prefetch(((char *)(skb)) + 128);
1483
1484 /* is this an error packet? */
34f80b04 1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1486 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1487 "ERROR flags %x rx packet %u\n",
1488 cqe_fp_flags, sw_comp_cons);
66e855f3 1489 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1490 goto reuse_rx;
1491 }
1492
1493 /* Since we don't have a jumbo ring
1494 * copy small packets if mtu > 1500
1495 */
1496 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497 (len <= RX_COPY_THRESH)) {
1498 struct sk_buff *new_skb;
1499
1500 new_skb = netdev_alloc_skb(bp->dev,
1501 len + pad);
1502 if (new_skb == NULL) {
1503 DP(NETIF_MSG_RX_ERR,
34f80b04 1504 "ERROR packet dropped "
a2fbb9ea 1505 "because of alloc failure\n");
66e855f3 1506 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1507 goto reuse_rx;
1508 }
1509
1510 /* aligned copy */
1511 skb_copy_from_linear_data_offset(skb, pad,
1512 new_skb->data + pad, len);
1513 skb_reserve(new_skb, pad);
1514 skb_put(new_skb, len);
1515
1516 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518 skb = new_skb;
1519
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1523 bp->rx_buf_size,
a2fbb9ea
ET
1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad);
1526 skb_put(skb, len);
1527
1528 } else {
1529 DP(NETIF_MSG_RX_ERR,
34f80b04 1530 "ERROR packet dropped because "
a2fbb9ea 1531 "of alloc failure\n");
66e855f3 1532 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1533reuse_rx:
1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535 goto next_rx;
1536 }
1537
1538 skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1541 if (bp->rx_csum) {
1adcd8be
EG
1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1544 else
1545 bp->eth_stats.hw_csum_err++;
1546 }
a2fbb9ea
ET
1547 }
1548
1549#ifdef BCM_VLAN
34f80b04
EG
1550 if ((bp->vlgrp != NULL) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555 else
1556#endif
34f80b04 1557 netif_receive_skb(skb);
a2fbb9ea 1558
a2fbb9ea
ET
1559
1560next_rx:
1561 rx_buf->skb = NULL;
1562
1563 bd_cons = NEXT_RX_IDX(bd_cons);
1564 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1565 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1566 rx_pkt++;
a2fbb9ea
ET
1567next_cqe:
1568 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1569 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1570
34f80b04 1571 if (rx_pkt == budget)
a2fbb9ea
ET
1572 break;
1573 } /* while */
1574
1575 fp->rx_bd_cons = bd_cons;
34f80b04 1576 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1577 fp->rx_comp_cons = sw_comp_cons;
1578 fp->rx_comp_prod = sw_comp_prod;
1579
7a9b2557
VZ
1580 /* Update producers */
1581 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1582 fp->rx_sge_prod);
a2fbb9ea
ET
1583 mmiowb(); /* keep prod updates ordered */
1584
1585 fp->rx_pkt += rx_pkt;
1586 fp->rx_calls++;
1587
1588 return rx_pkt;
1589}
1590
1591static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1592{
1593 struct bnx2x_fastpath *fp = fp_cookie;
1594 struct bnx2x *bp = fp->bp;
1595 struct net_device *dev = bp->dev;
34f80b04 1596 int index = FP_IDX(fp);
a2fbb9ea 1597
da5a662a
VZ
1598 /* Return here if interrupt is disabled */
1599 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1600 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1601 return IRQ_HANDLED;
1602 }
1603
34f80b04
EG
1604 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1605 index, FP_SB_ID(fp));
1606 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1607
1608#ifdef BNX2X_STOP_ON_ERROR
1609 if (unlikely(bp->panic))
1610 return IRQ_HANDLED;
1611#endif
1612
1613 prefetch(fp->rx_cons_sb);
1614 prefetch(fp->tx_cons_sb);
1615 prefetch(&fp->status_blk->c_status_block.status_block_index);
1616 prefetch(&fp->status_blk->u_status_block.status_block_index);
1617
1618 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1619
a2fbb9ea
ET
1620 return IRQ_HANDLED;
1621}
1622
1623static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1624{
1625 struct net_device *dev = dev_instance;
1626 struct bnx2x *bp = netdev_priv(dev);
1627 u16 status = bnx2x_ack_int(bp);
34f80b04 1628 u16 mask;
a2fbb9ea 1629
34f80b04 1630 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1631 if (unlikely(status == 0)) {
1632 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1633 return IRQ_NONE;
1634 }
34f80b04 1635 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1636
34f80b04 1637 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1638 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1639 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1640 return IRQ_HANDLED;
1641 }
1642
3196a88a
EG
1643#ifdef BNX2X_STOP_ON_ERROR
1644 if (unlikely(bp->panic))
1645 return IRQ_HANDLED;
1646#endif
1647
34f80b04
EG
1648 mask = 0x2 << bp->fp[0].sb_id;
1649 if (status & mask) {
a2fbb9ea
ET
1650 struct bnx2x_fastpath *fp = &bp->fp[0];
1651
1652 prefetch(fp->rx_cons_sb);
1653 prefetch(fp->tx_cons_sb);
1654 prefetch(&fp->status_blk->c_status_block.status_block_index);
1655 prefetch(&fp->status_blk->u_status_block.status_block_index);
1656
1657 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1658
34f80b04 1659 status &= ~mask;
a2fbb9ea
ET
1660 }
1661
a2fbb9ea 1662
34f80b04 1663 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1664 schedule_work(&bp->sp_task);
1665
1666 status &= ~0x1;
1667 if (!status)
1668 return IRQ_HANDLED;
1669 }
1670
34f80b04
EG
1671 if (status)
1672 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1673 status);
a2fbb9ea 1674
c18487ee 1675 return IRQ_HANDLED;
a2fbb9ea
ET
1676}
1677
c18487ee 1678/* end of fast path */
a2fbb9ea 1679
bb2a0f7a 1680static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1681
c18487ee
YR
1682/* Link */
1683
1684/*
1685 * General service functions
1686 */
a2fbb9ea 1687
4a37fb66 1688static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1689{
1690 u32 lock_status;
1691 u32 resource_bit = (1 << resource);
4a37fb66
YG
1692 int func = BP_FUNC(bp);
1693 u32 hw_lock_control_reg;
c18487ee 1694 int cnt;
a2fbb9ea 1695
c18487ee
YR
1696 /* Validating that the resource is within range */
1697 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1698 DP(NETIF_MSG_HW,
1699 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1700 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1701 return -EINVAL;
1702 }
a2fbb9ea 1703
4a37fb66
YG
1704 if (func <= 5) {
1705 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1706 } else {
1707 hw_lock_control_reg =
1708 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1709 }
1710
c18487ee 1711 /* Validating that the resource is not already taken */
4a37fb66 1712 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1713 if (lock_status & resource_bit) {
1714 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1715 lock_status, resource_bit);
1716 return -EEXIST;
1717 }
a2fbb9ea 1718
46230476
EG
1719 /* Try for 5 second every 5ms */
1720 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1721 /* Try to acquire the lock */
4a37fb66
YG
1722 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1723 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1724 if (lock_status & resource_bit)
1725 return 0;
a2fbb9ea 1726
c18487ee 1727 msleep(5);
a2fbb9ea 1728 }
c18487ee
YR
1729 DP(NETIF_MSG_HW, "Timeout\n");
1730 return -EAGAIN;
1731}
a2fbb9ea 1732
4a37fb66 1733static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1734{
1735 u32 lock_status;
1736 u32 resource_bit = (1 << resource);
4a37fb66
YG
1737 int func = BP_FUNC(bp);
1738 u32 hw_lock_control_reg;
a2fbb9ea 1739
c18487ee
YR
1740 /* Validating that the resource is within range */
1741 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1742 DP(NETIF_MSG_HW,
1743 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1744 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1745 return -EINVAL;
1746 }
1747
4a37fb66
YG
1748 if (func <= 5) {
1749 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1750 } else {
1751 hw_lock_control_reg =
1752 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1753 }
1754
c18487ee 1755 /* Validating that the resource is currently taken */
4a37fb66 1756 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1757 if (!(lock_status & resource_bit)) {
1758 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1759 lock_status, resource_bit);
1760 return -EFAULT;
a2fbb9ea
ET
1761 }
1762
4a37fb66 1763 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1764 return 0;
1765}
1766
1767/* HW Lock for shared dual port PHYs */
4a37fb66 1768static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1769{
1770 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1771
34f80b04 1772 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1773
c18487ee
YR
1774 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1775 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1776 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1777}
a2fbb9ea 1778
4a37fb66 1779static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1780{
1781 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1782
c18487ee
YR
1783 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1784 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1785 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1786
34f80b04 1787 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1788}
a2fbb9ea 1789
17de50b7 1790int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1791{
1792 /* The GPIO should be swapped if swap register is set and active */
1793 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1794 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1795 int gpio_shift = gpio_num +
1796 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1797 u32 gpio_mask = (1 << gpio_shift);
1798 u32 gpio_reg;
a2fbb9ea 1799
c18487ee
YR
1800 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1801 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1802 return -EINVAL;
1803 }
a2fbb9ea 1804
4a37fb66 1805 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1806 /* read GPIO and mask except the float bits */
1807 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1808
c18487ee
YR
1809 switch (mode) {
1810 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1811 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1812 gpio_num, gpio_shift);
1813 /* clear FLOAT and set CLR */
1814 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1815 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1816 break;
a2fbb9ea 1817
c18487ee
YR
1818 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1819 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1820 gpio_num, gpio_shift);
1821 /* clear FLOAT and set SET */
1822 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1823 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1824 break;
a2fbb9ea 1825
17de50b7 1826 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1827 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1828 gpio_num, gpio_shift);
1829 /* set FLOAT */
1830 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1831 break;
a2fbb9ea 1832
c18487ee
YR
1833 default:
1834 break;
a2fbb9ea
ET
1835 }
1836
c18487ee 1837 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1838 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1839
c18487ee 1840 return 0;
a2fbb9ea
ET
1841}
1842
c18487ee 1843static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1844{
c18487ee
YR
1845 u32 spio_mask = (1 << spio_num);
1846 u32 spio_reg;
a2fbb9ea 1847
c18487ee
YR
1848 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1849 (spio_num > MISC_REGISTERS_SPIO_7)) {
1850 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1851 return -EINVAL;
a2fbb9ea
ET
1852 }
1853
4a37fb66 1854 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1855 /* read SPIO and mask except the float bits */
1856 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1857
c18487ee 1858 switch (mode) {
6378c025 1859 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1860 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1861 /* clear FLOAT and set CLR */
1862 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1863 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1864 break;
a2fbb9ea 1865
6378c025 1866 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1867 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1868 /* clear FLOAT and set SET */
1869 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1870 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1871 break;
a2fbb9ea 1872
c18487ee
YR
1873 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1874 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1875 /* set FLOAT */
1876 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1877 break;
a2fbb9ea 1878
c18487ee
YR
1879 default:
1880 break;
a2fbb9ea
ET
1881 }
1882
c18487ee 1883 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1884 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1885
a2fbb9ea
ET
1886 return 0;
1887}
1888
c18487ee 1889static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1890{
c18487ee
YR
1891 switch (bp->link_vars.ieee_fc) {
1892 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1893 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1894 ADVERTISED_Pause);
1895 break;
1896 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1897 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1898 ADVERTISED_Pause);
1899 break;
1900 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1901 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1902 break;
1903 default:
34f80b04 1904 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1905 ADVERTISED_Pause);
1906 break;
1907 }
1908}
f1410647 1909
c18487ee
YR
1910static void bnx2x_link_report(struct bnx2x *bp)
1911{
1912 if (bp->link_vars.link_up) {
1913 if (bp->state == BNX2X_STATE_OPEN)
1914 netif_carrier_on(bp->dev);
1915 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1916
c18487ee 1917 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1918
c18487ee
YR
1919 if (bp->link_vars.duplex == DUPLEX_FULL)
1920 printk("full duplex");
1921 else
1922 printk("half duplex");
f1410647 1923
c18487ee
YR
1924 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1925 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1926 printk(", receive ");
1927 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1928 printk("& transmit ");
1929 } else {
1930 printk(", transmit ");
1931 }
1932 printk("flow control ON");
1933 }
1934 printk("\n");
f1410647 1935
c18487ee
YR
1936 } else { /* link_down */
1937 netif_carrier_off(bp->dev);
1938 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1939 }
c18487ee
YR
1940}
1941
1942static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1943{
19680c48
EG
1944 if (!BP_NOMCP(bp)) {
1945 u8 rc;
a2fbb9ea 1946
19680c48 1947 /* Initialize link parameters structure variables */
8c99e7b0
YR
1948 /* It is recommended to turn off RX FC for jumbo frames
1949 for better performance */
1950 if (IS_E1HMF(bp))
1951 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1952 else if (bp->dev->mtu > 5000)
1953 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1954 else
1955 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
a2fbb9ea 1956
4a37fb66 1957 bnx2x_acquire_phy_lock(bp);
19680c48 1958 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1959 bnx2x_release_phy_lock(bp);
a2fbb9ea 1960
19680c48
EG
1961 if (bp->link_vars.link_up)
1962 bnx2x_link_report(bp);
a2fbb9ea 1963
19680c48 1964 bnx2x_calc_fc_adv(bp);
34f80b04 1965
19680c48
EG
1966 return rc;
1967 }
1968 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1969 return -EINVAL;
a2fbb9ea
ET
1970}
1971
c18487ee 1972static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1973{
19680c48 1974 if (!BP_NOMCP(bp)) {
4a37fb66 1975 bnx2x_acquire_phy_lock(bp);
19680c48 1976 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1977 bnx2x_release_phy_lock(bp);
a2fbb9ea 1978
19680c48
EG
1979 bnx2x_calc_fc_adv(bp);
1980 } else
1981 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1982}
a2fbb9ea 1983
c18487ee
YR
1984static void bnx2x__link_reset(struct bnx2x *bp)
1985{
19680c48 1986 if (!BP_NOMCP(bp)) {
4a37fb66 1987 bnx2x_acquire_phy_lock(bp);
19680c48 1988 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1989 bnx2x_release_phy_lock(bp);
19680c48
EG
1990 } else
1991 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1992}
a2fbb9ea 1993
c18487ee
YR
1994static u8 bnx2x_link_test(struct bnx2x *bp)
1995{
1996 u8 rc;
a2fbb9ea 1997
4a37fb66 1998 bnx2x_acquire_phy_lock(bp);
c18487ee 1999 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2000 bnx2x_release_phy_lock(bp);
a2fbb9ea 2001
c18487ee
YR
2002 return rc;
2003}
a2fbb9ea 2004
34f80b04
EG
2005/* Calculates the sum of vn_min_rates.
2006 It's needed for further normalizing of the min_rates.
2007
2008 Returns:
2009 sum of vn_min_rates
2010 or
2011 0 - if all the min_rates are 0.
33471629 2012 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2013 If not all min_rates are zero then those that are zeroes will
2014 be set to 1.
2015 */
2016static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2017{
2018 int i, port = BP_PORT(bp);
2019 u32 wsum = 0;
2020 int all_zero = 1;
2021
2022 for (i = 0; i < E1HVN_MAX; i++) {
2023 u32 vn_cfg =
2024 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2025 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2026 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2027 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2028 /* If min rate is zero - set it to 1 */
2029 if (!vn_min_rate)
2030 vn_min_rate = DEF_MIN_RATE;
2031 else
2032 all_zero = 0;
2033
2034 wsum += vn_min_rate;
2035 }
2036 }
2037
2038 /* ... only if all min rates are zeros - disable FAIRNESS */
2039 if (all_zero)
2040 return 0;
2041
2042 return wsum;
2043}
2044
2045static void bnx2x_init_port_minmax(struct bnx2x *bp,
2046 int en_fness,
2047 u16 port_rate,
2048 struct cmng_struct_per_port *m_cmng_port)
2049{
2050 u32 r_param = port_rate / 8;
2051 int port = BP_PORT(bp);
2052 int i;
2053
2054 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2055
2056 /* Enable minmax only if we are in e1hmf mode */
2057 if (IS_E1HMF(bp)) {
2058 u32 fair_periodic_timeout_usec;
2059 u32 t_fair;
2060
2061 /* Enable rate shaping and fairness */
2062 m_cmng_port->flags.cmng_vn_enable = 1;
2063 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2064 m_cmng_port->flags.rate_shaping_enable = 1;
2065
2066 if (!en_fness)
2067 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2068 " fairness will be disabled\n");
2069
2070 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2071 m_cmng_port->rs_vars.rs_periodic_timeout =
2072 RS_PERIODIC_TIMEOUT_USEC / 4;
2073
2074 /* this is the threshold below which no timer arming will occur
2075 1.25 coefficient is for the threshold to be a little bigger
2076 than the real time, to compensate for timer in-accuracy */
2077 m_cmng_port->rs_vars.rs_threshold =
2078 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2079
2080 /* resolution of fairness timer */
2081 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2082 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2083 t_fair = T_FAIR_COEF / port_rate;
2084
2085 /* this is the threshold below which we won't arm
2086 the timer anymore */
2087 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2088
2089 /* we multiply by 1e3/8 to get bytes/msec.
2090 We don't want the credits to pass a credit
2091 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2092 m_cmng_port->fair_vars.upper_bound =
2093 r_param * t_fair * FAIR_MEM;
2094 /* since each tick is 4 usec */
2095 m_cmng_port->fair_vars.fairness_timeout =
2096 fair_periodic_timeout_usec / 4;
2097
2098 } else {
2099 /* Disable rate shaping and fairness */
2100 m_cmng_port->flags.cmng_vn_enable = 0;
2101 m_cmng_port->flags.fairness_enable = 0;
2102 m_cmng_port->flags.rate_shaping_enable = 0;
2103
2104 DP(NETIF_MSG_IFUP,
2105 "Single function mode minmax will be disabled\n");
2106 }
2107
2108 /* Store it to internal memory */
2109 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2110 REG_WR(bp, BAR_XSTRORM_INTMEM +
2111 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2112 ((u32 *)(m_cmng_port))[i]);
2113}
2114
2115static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2116 u32 wsum, u16 port_rate,
2117 struct cmng_struct_per_port *m_cmng_port)
2118{
2119 struct rate_shaping_vars_per_vn m_rs_vn;
2120 struct fairness_vars_per_vn m_fair_vn;
2121 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2122 u16 vn_min_rate, vn_max_rate;
2123 int i;
2124
2125 /* If function is hidden - set min and max to zeroes */
2126 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2127 vn_min_rate = 0;
2128 vn_max_rate = 0;
2129
2130 } else {
2131 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2132 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2133 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2134 if current min rate is zero - set it to 1.
33471629 2135 This is a requirement of the algorithm. */
34f80b04
EG
2136 if ((vn_min_rate == 0) && wsum)
2137 vn_min_rate = DEF_MIN_RATE;
2138 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2139 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2140 }
2141
2142 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2143 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2144
2145 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2146 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2147
2148 /* global vn counter - maximal Mbps for this vn */
2149 m_rs_vn.vn_counter.rate = vn_max_rate;
2150
2151 /* quota - number of bytes transmitted in this period */
2152 m_rs_vn.vn_counter.quota =
2153 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2154
2155#ifdef BNX2X_PER_PROT_QOS
2156 /* per protocol counter */
2157 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2158 /* maximal Mbps for this protocol */
2159 m_rs_vn.protocol_counters[protocol].rate =
2160 protocol_max_rate[protocol];
2161 /* the quota in each timer period -
2162 number of bytes transmitted in this period */
2163 m_rs_vn.protocol_counters[protocol].quota =
2164 (u32)(rs_periodic_timeout_usec *
2165 ((double)m_rs_vn.
2166 protocol_counters[protocol].rate/8));
2167 }
2168#endif
2169
2170 if (wsum) {
2171 /* credit for each period of the fairness algorithm:
2172 number of bytes in T_FAIR (the vn share the port rate).
2173 wsum should not be larger than 10000, thus
2174 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2175 m_fair_vn.vn_credit_delta =
2176 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2177 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2178 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2179 m_fair_vn.vn_credit_delta);
2180 }
2181
2182#ifdef BNX2X_PER_PROT_QOS
2183 do {
2184 u32 protocolWeightSum = 0;
2185
2186 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2187 protocolWeightSum +=
2188 drvInit.protocol_min_rate[protocol];
2189 /* per protocol counter -
2190 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2191 if (protocolWeightSum > 0) {
2192 for (protocol = 0;
2193 protocol < NUM_OF_PROTOCOLS; protocol++)
2194 /* credit for each period of the
2195 fairness algorithm - number of bytes in
2196 T_FAIR (the protocol share the vn rate) */
2197 m_fair_vn.protocol_credit_delta[protocol] =
2198 (u32)((vn_min_rate / 8) * t_fair *
2199 protocol_min_rate / protocolWeightSum);
2200 }
2201 } while (0);
2202#endif
2203
2204 /* Store it to internal memory */
2205 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2206 REG_WR(bp, BAR_XSTRORM_INTMEM +
2207 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2208 ((u32 *)(&m_rs_vn))[i]);
2209
2210 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2211 REG_WR(bp, BAR_XSTRORM_INTMEM +
2212 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2213 ((u32 *)(&m_fair_vn))[i]);
2214}
2215
c18487ee
YR
2216/* This function is called upon link interrupt */
2217static void bnx2x_link_attn(struct bnx2x *bp)
2218{
34f80b04
EG
2219 int vn;
2220
bb2a0f7a
YG
2221 /* Make sure that we are synced with the current statistics */
2222 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2223
4a37fb66 2224 bnx2x_acquire_phy_lock(bp);
c18487ee 2225 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2226 bnx2x_release_phy_lock(bp);
a2fbb9ea 2227
bb2a0f7a
YG
2228 if (bp->link_vars.link_up) {
2229
2230 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2231 struct host_port_stats *pstats;
2232
2233 pstats = bnx2x_sp(bp, port_stats);
2234 /* reset old bmac stats */
2235 memset(&(pstats->mac_stx[0]), 0,
2236 sizeof(struct mac_stx));
2237 }
2238 if ((bp->state == BNX2X_STATE_OPEN) ||
2239 (bp->state == BNX2X_STATE_DISABLED))
2240 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2241 }
2242
c18487ee
YR
2243 /* indicate link status */
2244 bnx2x_link_report(bp);
34f80b04
EG
2245
2246 if (IS_E1HMF(bp)) {
2247 int func;
2248
2249 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2250 if (vn == BP_E1HVN(bp))
2251 continue;
2252
2253 func = ((vn << 1) | BP_PORT(bp));
2254
2255 /* Set the attention towards other drivers
2256 on the same port */
2257 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2258 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2259 }
2260 }
2261
2262 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2263 struct cmng_struct_per_port m_cmng_port;
2264 u32 wsum;
2265 int port = BP_PORT(bp);
2266
2267 /* Init RATE SHAPING and FAIRNESS contexts */
2268 wsum = bnx2x_calc_vn_wsum(bp);
2269 bnx2x_init_port_minmax(bp, (int)wsum,
2270 bp->link_vars.line_speed,
2271 &m_cmng_port);
2272 if (IS_E1HMF(bp))
2273 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2274 bnx2x_init_vn_minmax(bp, 2*vn + port,
2275 wsum, bp->link_vars.line_speed,
2276 &m_cmng_port);
2277 }
c18487ee 2278}
a2fbb9ea 2279
c18487ee
YR
2280static void bnx2x__link_status_update(struct bnx2x *bp)
2281{
2282 if (bp->state != BNX2X_STATE_OPEN)
2283 return;
a2fbb9ea 2284
c18487ee 2285 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2286
bb2a0f7a
YG
2287 if (bp->link_vars.link_up)
2288 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2289 else
2290 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2291
c18487ee
YR
2292 /* indicate link status */
2293 bnx2x_link_report(bp);
a2fbb9ea 2294}
a2fbb9ea 2295
34f80b04
EG
2296static void bnx2x_pmf_update(struct bnx2x *bp)
2297{
2298 int port = BP_PORT(bp);
2299 u32 val;
2300
2301 bp->port.pmf = 1;
2302 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2303
2304 /* enable nig attention */
2305 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2306 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2307 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2308
2309 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2310}
2311
c18487ee 2312/* end of Link */
a2fbb9ea
ET
2313
2314/* slow path */
2315
2316/*
2317 * General service functions
2318 */
2319
2320/* the slow path queue is odd since completions arrive on the fastpath ring */
2321static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2322 u32 data_hi, u32 data_lo, int common)
2323{
34f80b04 2324 int func = BP_FUNC(bp);
a2fbb9ea 2325
34f80b04
EG
2326 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2327 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2328 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2329 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2330 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2331
2332#ifdef BNX2X_STOP_ON_ERROR
2333 if (unlikely(bp->panic))
2334 return -EIO;
2335#endif
2336
34f80b04 2337 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2338
2339 if (!bp->spq_left) {
2340 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2341 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2342 bnx2x_panic();
2343 return -EBUSY;
2344 }
f1410647 2345
a2fbb9ea
ET
2346 /* CID needs port number to be encoded int it */
2347 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2348 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2349 HW_CID(bp, cid)));
2350 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2351 if (common)
2352 bp->spq_prod_bd->hdr.type |=
2353 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2354
2355 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2356 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2357
2358 bp->spq_left--;
2359
2360 if (bp->spq_prod_bd == bp->spq_last_bd) {
2361 bp->spq_prod_bd = bp->spq;
2362 bp->spq_prod_idx = 0;
2363 DP(NETIF_MSG_TIMER, "end of spq\n");
2364
2365 } else {
2366 bp->spq_prod_bd++;
2367 bp->spq_prod_idx++;
2368 }
2369
34f80b04 2370 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2371 bp->spq_prod_idx);
2372
34f80b04 2373 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2374 return 0;
2375}
2376
2377/* acquire split MCP access lock register */
4a37fb66 2378static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2379{
a2fbb9ea 2380 u32 i, j, val;
34f80b04 2381 int rc = 0;
a2fbb9ea
ET
2382
2383 might_sleep();
2384 i = 100;
2385 for (j = 0; j < i*10; j++) {
2386 val = (1UL << 31);
2387 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2388 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2389 if (val & (1L << 31))
2390 break;
2391
2392 msleep(5);
2393 }
a2fbb9ea 2394 if (!(val & (1L << 31))) {
19680c48 2395 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2396 rc = -EBUSY;
2397 }
2398
2399 return rc;
2400}
2401
4a37fb66
YG
2402/* release split MCP access lock register */
2403static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2404{
2405 u32 val = 0;
2406
2407 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2408}
2409
2410static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2411{
2412 struct host_def_status_block *def_sb = bp->def_status_blk;
2413 u16 rc = 0;
2414
2415 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2416 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2417 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2418 rc |= 1;
2419 }
2420 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2421 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2422 rc |= 2;
2423 }
2424 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2425 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2426 rc |= 4;
2427 }
2428 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2429 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2430 rc |= 8;
2431 }
2432 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2433 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2434 rc |= 16;
2435 }
2436 return rc;
2437}
2438
2439/*
2440 * slow path service functions
2441 */
2442
2443static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2444{
34f80b04 2445 int port = BP_PORT(bp);
5c862848
EG
2446 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2447 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2448 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2449 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2450 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2451 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2452 u32 aeu_mask;
a2fbb9ea 2453
a2fbb9ea
ET
2454 if (bp->attn_state & asserted)
2455 BNX2X_ERR("IGU ERROR\n");
2456
3fcaf2e5
EG
2457 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2458 aeu_mask = REG_RD(bp, aeu_addr);
2459
a2fbb9ea 2460 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2461 aeu_mask, asserted);
2462 aeu_mask &= ~(asserted & 0xff);
2463 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2464
3fcaf2e5
EG
2465 REG_WR(bp, aeu_addr, aeu_mask);
2466 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2467
3fcaf2e5 2468 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2469 bp->attn_state |= asserted;
3fcaf2e5 2470 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2471
2472 if (asserted & ATTN_HARD_WIRED_MASK) {
2473 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2474
877e9aa4
ET
2475 /* save nig interrupt mask */
2476 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2477 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2478
c18487ee 2479 bnx2x_link_attn(bp);
a2fbb9ea
ET
2480
2481 /* handle unicore attn? */
2482 }
2483 if (asserted & ATTN_SW_TIMER_4_FUNC)
2484 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2485
2486 if (asserted & GPIO_2_FUNC)
2487 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2488
2489 if (asserted & GPIO_3_FUNC)
2490 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2491
2492 if (asserted & GPIO_4_FUNC)
2493 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2494
2495 if (port == 0) {
2496 if (asserted & ATTN_GENERAL_ATTN_1) {
2497 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2498 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2499 }
2500 if (asserted & ATTN_GENERAL_ATTN_2) {
2501 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2502 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2503 }
2504 if (asserted & ATTN_GENERAL_ATTN_3) {
2505 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2506 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2507 }
2508 } else {
2509 if (asserted & ATTN_GENERAL_ATTN_4) {
2510 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2511 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2512 }
2513 if (asserted & ATTN_GENERAL_ATTN_5) {
2514 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2515 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2516 }
2517 if (asserted & ATTN_GENERAL_ATTN_6) {
2518 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2519 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2520 }
2521 }
2522
2523 } /* if hardwired */
2524
5c862848
EG
2525 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2526 asserted, hc_addr);
2527 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2528
2529 /* now set back the mask */
2530 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2531 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2532}
2533
877e9aa4 2534static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2535{
34f80b04 2536 int port = BP_PORT(bp);
877e9aa4
ET
2537 int reg_offset;
2538 u32 val;
2539
34f80b04
EG
2540 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2541 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2542
34f80b04 2543 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2544
2545 val = REG_RD(bp, reg_offset);
2546 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2547 REG_WR(bp, reg_offset, val);
2548
2549 BNX2X_ERR("SPIO5 hw attention\n");
2550
34f80b04 2551 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2552 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554 /* Fan failure attention */
2555
17de50b7 2556 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2557 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2558 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2559 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2560 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2561 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2562 /* mark the failure */
c18487ee 2563 bp->link_params.ext_phy_config &=
877e9aa4 2564 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2565 bp->link_params.ext_phy_config |=
877e9aa4
ET
2566 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2567 SHMEM_WR(bp,
2568 dev_info.port_hw_config[port].
2569 external_phy_config,
c18487ee 2570 bp->link_params.ext_phy_config);
877e9aa4
ET
2571 /* log the failure */
2572 printk(KERN_ERR PFX "Fan Failure on Network"
2573 " Controller %s has caused the driver to"
2574 " shutdown the card to prevent permanent"
2575 " damage. Please contact Dell Support for"
2576 " assistance\n", bp->dev->name);
2577 break;
2578
2579 default:
2580 break;
2581 }
2582 }
34f80b04
EG
2583
2584 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2585
2586 val = REG_RD(bp, reg_offset);
2587 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588 REG_WR(bp, reg_offset, val);
2589
2590 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591 (attn & HW_INTERRUT_ASSERT_SET_0));
2592 bnx2x_panic();
2593 }
877e9aa4
ET
2594}
2595
2596static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2597{
2598 u32 val;
2599
2600 if (attn & BNX2X_DOORQ_ASSERT) {
2601
2602 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604 /* DORQ discard attention */
2605 if (val & 0x2)
2606 BNX2X_ERR("FATAL error from DORQ\n");
2607 }
34f80b04
EG
2608
2609 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2610
2611 int port = BP_PORT(bp);
2612 int reg_offset;
2613
2614 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2616
2617 val = REG_RD(bp, reg_offset);
2618 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619 REG_WR(bp, reg_offset, val);
2620
2621 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622 (attn & HW_INTERRUT_ASSERT_SET_1));
2623 bnx2x_panic();
2624 }
877e9aa4
ET
2625}
2626
2627static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2628{
2629 u32 val;
2630
2631 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2632
2633 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635 /* CFC error attention */
2636 if (val & 0x2)
2637 BNX2X_ERR("FATAL error from CFC\n");
2638 }
2639
2640 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2641
2642 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644 /* RQ_USDMDP_FIFO_OVERFLOW */
2645 if (val & 0x18000)
2646 BNX2X_ERR("FATAL error from PXP\n");
2647 }
34f80b04
EG
2648
2649 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2650
2651 int port = BP_PORT(bp);
2652 int reg_offset;
2653
2654 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2656
2657 val = REG_RD(bp, reg_offset);
2658 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659 REG_WR(bp, reg_offset, val);
2660
2661 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662 (attn & HW_INTERRUT_ASSERT_SET_2));
2663 bnx2x_panic();
2664 }
877e9aa4
ET
2665}
2666
2667static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2668{
34f80b04
EG
2669 u32 val;
2670
877e9aa4
ET
2671 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2672
34f80b04
EG
2673 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674 int func = BP_FUNC(bp);
2675
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677 bnx2x__link_status_update(bp);
2678 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2679 DRV_STATUS_PMF)
2680 bnx2x_pmf_update(bp);
2681
2682 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2683
2684 BNX2X_ERR("MC assert!\n");
2685 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2689 bnx2x_panic();
2690
2691 } else if (attn & BNX2X_MCP_ASSERT) {
2692
2693 BNX2X_ERR("MCP assert!\n");
2694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2695 bnx2x_fw_dump(bp);
877e9aa4
ET
2696
2697 } else
2698 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2699 }
2700
2701 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2702 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703 if (attn & BNX2X_GRC_TIMEOUT) {
2704 val = CHIP_IS_E1H(bp) ?
2705 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2707 }
2708 if (attn & BNX2X_GRC_RSV) {
2709 val = CHIP_IS_E1H(bp) ?
2710 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2712 }
877e9aa4 2713 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2714 }
2715}
2716
2717static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2718{
a2fbb9ea
ET
2719 struct attn_route attn;
2720 struct attn_route group_mask;
34f80b04 2721 int port = BP_PORT(bp);
877e9aa4 2722 int index;
a2fbb9ea
ET
2723 u32 reg_addr;
2724 u32 val;
3fcaf2e5 2725 u32 aeu_mask;
a2fbb9ea
ET
2726
2727 /* need to take HW lock because MCP or other port might also
2728 try to handle this event */
4a37fb66 2729 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2730
2731 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2735 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2737
2738 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739 if (deasserted & (1 << index)) {
2740 group_mask = bp->attn_group[index];
2741
34f80b04
EG
2742 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743 index, group_mask.sig[0], group_mask.sig[1],
2744 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2745
877e9aa4
ET
2746 bnx2x_attn_int_deasserted3(bp,
2747 attn.sig[3] & group_mask.sig[3]);
2748 bnx2x_attn_int_deasserted1(bp,
2749 attn.sig[1] & group_mask.sig[1]);
2750 bnx2x_attn_int_deasserted2(bp,
2751 attn.sig[2] & group_mask.sig[2]);
2752 bnx2x_attn_int_deasserted0(bp,
2753 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2754
a2fbb9ea
ET
2755 if ((attn.sig[0] & group_mask.sig[0] &
2756 HW_PRTY_ASSERT_SET_0) ||
2757 (attn.sig[1] & group_mask.sig[1] &
2758 HW_PRTY_ASSERT_SET_1) ||
2759 (attn.sig[2] & group_mask.sig[2] &
2760 HW_PRTY_ASSERT_SET_2))
6378c025 2761 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2762 }
2763 }
2764
4a37fb66 2765 bnx2x_release_alr(bp);
a2fbb9ea 2766
5c862848 2767 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2768
2769 val = ~deasserted;
3fcaf2e5
EG
2770 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2771 val, reg_addr);
5c862848 2772 REG_WR(bp, reg_addr, val);
a2fbb9ea 2773
a2fbb9ea 2774 if (~bp->attn_state & deasserted)
3fcaf2e5 2775 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2776
2777 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2779
3fcaf2e5
EG
2780 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781 aeu_mask = REG_RD(bp, reg_addr);
2782
2783 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2784 aeu_mask, deasserted);
2785 aeu_mask |= (deasserted & 0xff);
2786 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2787
3fcaf2e5
EG
2788 REG_WR(bp, reg_addr, aeu_mask);
2789 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2790
2791 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792 bp->attn_state &= ~deasserted;
2793 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2794}
2795
2796static void bnx2x_attn_int(struct bnx2x *bp)
2797{
2798 /* read local copy of bits */
2799 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801 u32 attn_state = bp->attn_state;
2802
2803 /* look for changed bits */
2804 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2805 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2806
2807 DP(NETIF_MSG_HW,
2808 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2809 attn_bits, attn_ack, asserted, deasserted);
2810
2811 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2812 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2813
2814 /* handle bits that were raised */
2815 if (asserted)
2816 bnx2x_attn_int_asserted(bp, asserted);
2817
2818 if (deasserted)
2819 bnx2x_attn_int_deasserted(bp, deasserted);
2820}
2821
2822static void bnx2x_sp_task(struct work_struct *work)
2823{
2824 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2825 u16 status;
2826
34f80b04 2827
a2fbb9ea
ET
2828 /* Return here if interrupt is disabled */
2829 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2830 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2831 return;
2832 }
2833
2834 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2835/* if (status == 0) */
2836/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2837
3196a88a 2838 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2839
877e9aa4
ET
2840 /* HW attentions */
2841 if (status & 0x1)
a2fbb9ea 2842 bnx2x_attn_int(bp);
a2fbb9ea 2843
bb2a0f7a
YG
2844 /* CStorm events: query_stats, port delete ramrod */
2845 if (status & 0x2)
2846 bp->stats_pending = 0;
2847
a2fbb9ea
ET
2848 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2849 IGU_INT_NOP, 1);
2850 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2851 IGU_INT_NOP, 1);
2852 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2853 IGU_INT_NOP, 1);
2854 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2855 IGU_INT_NOP, 1);
2856 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2857 IGU_INT_ENABLE, 1);
877e9aa4 2858
a2fbb9ea
ET
2859}
2860
2861static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2862{
2863 struct net_device *dev = dev_instance;
2864 struct bnx2x *bp = netdev_priv(dev);
2865
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2868 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2869 return IRQ_HANDLED;
2870 }
2871
877e9aa4 2872 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2873
2874#ifdef BNX2X_STOP_ON_ERROR
2875 if (unlikely(bp->panic))
2876 return IRQ_HANDLED;
2877#endif
2878
2879 schedule_work(&bp->sp_task);
2880
2881 return IRQ_HANDLED;
2882}
2883
2884/* end of slow path */
2885
2886/* Statistics */
2887
2888/****************************************************************************
2889* Macros
2890****************************************************************************/
2891
a2fbb9ea
ET
2892/* sum[hi:lo] += add[hi:lo] */
2893#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2894 do { \
2895 s_lo += a_lo; \
2896 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2897 } while (0)
2898
2899/* difference = minuend - subtrahend */
2900#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2901 do { \
bb2a0f7a
YG
2902 if (m_lo < s_lo) { \
2903 /* underflow */ \
a2fbb9ea 2904 d_hi = m_hi - s_hi; \
bb2a0f7a 2905 if (d_hi > 0) { \
6378c025 2906 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2907 d_hi--; \
2908 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2909 } else { \
6378c025 2910 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2911 d_hi = 0; \
2912 d_lo = 0; \
2913 } \
bb2a0f7a
YG
2914 } else { \
2915 /* m_lo >= s_lo */ \
a2fbb9ea 2916 if (m_hi < s_hi) { \
bb2a0f7a
YG
2917 d_hi = 0; \
2918 d_lo = 0; \
2919 } else { \
6378c025 2920 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2921 d_hi = m_hi - s_hi; \
2922 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2923 } \
2924 } \
2925 } while (0)
2926
bb2a0f7a 2927#define UPDATE_STAT64(s, t) \
a2fbb9ea 2928 do { \
bb2a0f7a
YG
2929 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2935 } while (0)
2936
bb2a0f7a 2937#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2938 do { \
bb2a0f7a
YG
2939 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940 diff.lo, new->s##_lo, old->s##_lo); \
2941 ADD_64(estats->t##_hi, diff.hi, \
2942 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2943 } while (0)
2944
2945/* sum[hi:lo] += add */
2946#define ADD_EXTEND_64(s_hi, s_lo, a) \
2947 do { \
2948 s_lo += a; \
2949 s_hi += (s_lo < a) ? 1 : 0; \
2950 } while (0)
2951
bb2a0f7a 2952#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2953 do { \
bb2a0f7a
YG
2954 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955 pstats->mac_stx[1].s##_lo, \
2956 new->s); \
a2fbb9ea
ET
2957 } while (0)
2958
bb2a0f7a 2959#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2960 do { \
2961 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2963 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964 } while (0)
2965
2966#define UPDATE_EXTEND_XSTAT(s, t) \
2967 do { \
2968 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969 old_xclient->s = le32_to_cpu(xclient->s); \
2970 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2971 } while (0)
2972
2973/*
2974 * General service functions
2975 */
2976
2977static inline long bnx2x_hilo(u32 *hiref)
2978{
2979 u32 lo = *(hiref + 1);
2980#if (BITS_PER_LONG == 64)
2981 u32 hi = *hiref;
2982
2983 return HILO_U64(hi, lo);
2984#else
2985 return lo;
2986#endif
2987}
2988
2989/*
2990 * Init service functions
2991 */
2992
bb2a0f7a
YG
2993static void bnx2x_storm_stats_post(struct bnx2x *bp)
2994{
2995 if (!bp->stats_pending) {
2996 struct eth_query_ramrod_data ramrod_data = {0};
2997 int rc;
2998
2999 ramrod_data.drv_counter = bp->stats_counter++;
3000 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3002
3003 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004 ((u32 *)&ramrod_data)[1],
3005 ((u32 *)&ramrod_data)[0], 0);
3006 if (rc == 0) {
3007 /* stats ramrod has it's own slot on the spq */
3008 bp->spq_left++;
3009 bp->stats_pending = 1;
3010 }
3011 }
3012}
3013
3014static void bnx2x_stats_init(struct bnx2x *bp)
3015{
3016 int port = BP_PORT(bp);
3017
3018 bp->executer_idx = 0;
3019 bp->stats_counter = 0;
3020
3021 /* port stats */
3022 if (!BP_NOMCP(bp))
3023 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3024 else
3025 bp->port.port_stx = 0;
3026 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3027
3028 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029 bp->port.old_nig_stats.brb_discard =
3030 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3031 bp->port.old_nig_stats.brb_truncate =
3032 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3033 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3037
3038 /* function stats */
3039 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3043
3044 bp->stats_state = STATS_STATE_DISABLED;
3045 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3047}
3048
3049static void bnx2x_hw_stats_post(struct bnx2x *bp)
3050{
3051 struct dmae_command *dmae = &bp->stats_dmae;
3052 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3053
3054 *stats_comp = DMAE_COMP_VAL;
3055
3056 /* loader */
3057 if (bp->executer_idx) {
3058 int loader_idx = PMF_DMAE_C(bp);
3059
3060 memset(dmae, 0, sizeof(struct dmae_command));
3061
3062 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064 DMAE_CMD_DST_RESET |
3065#ifdef __BIG_ENDIAN
3066 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3067#else
3068 DMAE_CMD_ENDIANITY_DW_SWAP |
3069#endif
3070 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3071 DMAE_CMD_PORT_0) |
3072 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076 sizeof(struct dmae_command) *
3077 (loader_idx + 1)) >> 2;
3078 dmae->dst_addr_hi = 0;
3079 dmae->len = sizeof(struct dmae_command) >> 2;
3080 if (CHIP_IS_E1(bp))
3081 dmae->len--;
3082 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083 dmae->comp_addr_hi = 0;
3084 dmae->comp_val = 1;
3085
3086 *stats_comp = 0;
3087 bnx2x_post_dmae(bp, dmae, loader_idx);
3088
3089 } else if (bp->func_stx) {
3090 *stats_comp = 0;
3091 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3092 }
3093}
3094
3095static int bnx2x_stats_comp(struct bnx2x *bp)
3096{
3097 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098 int cnt = 10;
3099
3100 might_sleep();
3101 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3102 if (!cnt) {
3103 BNX2X_ERR("timeout waiting for stats finished\n");
3104 break;
3105 }
3106 cnt--;
12469401 3107 msleep(1);
bb2a0f7a
YG
3108 }
3109 return 1;
3110}
3111
3112/*
3113 * Statistics service functions
3114 */
3115
3116static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3117{
3118 struct dmae_command *dmae;
3119 u32 opcode;
3120 int loader_idx = PMF_DMAE_C(bp);
3121 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123 /* sanity */
3124 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125 BNX2X_ERR("BUG!\n");
3126 return;
3127 }
3128
3129 bp->executer_idx = 0;
3130
3131 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3132 DMAE_CMD_C_ENABLE |
3133 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3134#ifdef __BIG_ENDIAN
3135 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3136#else
3137 DMAE_CMD_ENDIANITY_DW_SWAP |
3138#endif
3139 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3141
3142 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144 dmae->src_addr_lo = bp->port.port_stx >> 2;
3145 dmae->src_addr_hi = 0;
3146 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->len = DMAE_LEN32_RD_MAX;
3149 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150 dmae->comp_addr_hi = 0;
3151 dmae->comp_val = 1;
3152
3153 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156 dmae->src_addr_hi = 0;
7a9b2557
VZ
3157 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158 DMAE_LEN32_RD_MAX * 4);
3159 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3161 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_val = DMAE_COMP_VAL;
3165
3166 *stats_comp = 0;
3167 bnx2x_hw_stats_post(bp);
3168 bnx2x_stats_comp(bp);
3169}
3170
3171static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3172{
3173 struct dmae_command *dmae;
34f80b04 3174 int port = BP_PORT(bp);
bb2a0f7a 3175 int vn = BP_E1HVN(bp);
a2fbb9ea 3176 u32 opcode;
bb2a0f7a 3177 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3178 u32 mac_addr;
bb2a0f7a
YG
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181 /* sanity */
3182 if (!bp->link_vars.link_up || !bp->port.pmf) {
3183 BNX2X_ERR("BUG!\n");
3184 return;
3185 }
a2fbb9ea
ET
3186
3187 bp->executer_idx = 0;
bb2a0f7a
YG
3188
3189 /* MCP */
3190 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3193#ifdef __BIG_ENDIAN
bb2a0f7a 3194 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3195#else
bb2a0f7a 3196 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3197#endif
bb2a0f7a
YG
3198 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3200
bb2a0f7a 3201 if (bp->port.port_stx) {
a2fbb9ea
ET
3202
3203 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204 dmae->opcode = opcode;
bb2a0f7a
YG
3205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3208 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3209 dmae->len = sizeof(struct host_port_stats) >> 2;
3210 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211 dmae->comp_addr_hi = 0;
3212 dmae->comp_val = 1;
a2fbb9ea
ET
3213 }
3214
bb2a0f7a
YG
3215 if (bp->func_stx) {
3216
3217 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218 dmae->opcode = opcode;
3219 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->dst_addr_lo = bp->func_stx >> 2;
3222 dmae->dst_addr_hi = 0;
3223 dmae->len = sizeof(struct host_func_stats) >> 2;
3224 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225 dmae->comp_addr_hi = 0;
3226 dmae->comp_val = 1;
a2fbb9ea
ET
3227 }
3228
bb2a0f7a 3229 /* MAC */
a2fbb9ea
ET
3230 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3233#ifdef __BIG_ENDIAN
3234 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3235#else
3236 DMAE_CMD_ENDIANITY_DW_SWAP |
3237#endif
bb2a0f7a
YG
3238 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3240
c18487ee 3241 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3242
3243 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244 NIG_REG_INGRESS_BMAC0_MEM);
3245
3246 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247 BIGMAC_REGISTER_TX_STAT_GTBYT */
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
3250 dmae->src_addr_lo = (mac_addr +
3251 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252 dmae->src_addr_hi = 0;
3253 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258 dmae->comp_addr_hi = 0;
3259 dmae->comp_val = 1;
3260
3261 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = (mac_addr +
3266 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3269 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3271 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3272 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275 dmae->comp_addr_hi = 0;
3276 dmae->comp_val = 1;
3277
c18487ee 3278 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3279
3280 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3281
3282 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284 dmae->opcode = opcode;
3285 dmae->src_addr_lo = (mac_addr +
3286 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3293 dmae->comp_val = 1;
3294
3295 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = opcode;
3298 dmae->src_addr_lo = (mac_addr +
3299 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300 dmae->src_addr_hi = 0;
3301 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3302 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3303 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3304 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3305 dmae->len = 1;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3308 dmae->comp_val = 1;
3309
3310 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312 dmae->opcode = opcode;
3313 dmae->src_addr_lo = (mac_addr +
3314 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3317 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3319 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3320 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3323 dmae->comp_val = 1;
3324 }
3325
3326 /* NIG */
bb2a0f7a
YG
3327 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328 dmae->opcode = opcode;
3329 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331 dmae->src_addr_hi = 0;
3332 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336 dmae->comp_addr_hi = 0;
3337 dmae->comp_val = 1;
3338
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348 dmae->len = (2*sizeof(u32)) >> 2;
3349 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350 dmae->comp_addr_hi = 0;
3351 dmae->comp_val = 1;
3352
a2fbb9ea
ET
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3357#ifdef __BIG_ENDIAN
3358 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3359#else
3360 DMAE_CMD_ENDIANITY_DW_SWAP |
3361#endif
bb2a0f7a
YG
3362 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363 (vn << DMAE_CMD_E1HVN_SHIFT));
3364 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3366 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3367 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371 dmae->len = (2*sizeof(u32)) >> 2;
3372 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_val = DMAE_COMP_VAL;
3375
3376 *stats_comp = 0;
a2fbb9ea
ET
3377}
3378
bb2a0f7a 3379static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3380{
bb2a0f7a
YG
3381 struct dmae_command *dmae = &bp->stats_dmae;
3382 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3383
bb2a0f7a
YG
3384 /* sanity */
3385 if (!bp->func_stx) {
3386 BNX2X_ERR("BUG!\n");
3387 return;
3388 }
a2fbb9ea 3389
bb2a0f7a
YG
3390 bp->executer_idx = 0;
3391 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3392
bb2a0f7a
YG
3393 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396#ifdef __BIG_ENDIAN
3397 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398#else
3399 DMAE_CMD_ENDIANITY_DW_SWAP |
3400#endif
3401 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->dst_addr_lo = bp->func_stx >> 2;
3406 dmae->dst_addr_hi = 0;
3407 dmae->len = sizeof(struct host_func_stats) >> 2;
3408 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3411
bb2a0f7a
YG
3412 *stats_comp = 0;
3413}
a2fbb9ea 3414
bb2a0f7a
YG
3415static void bnx2x_stats_start(struct bnx2x *bp)
3416{
3417 if (bp->port.pmf)
3418 bnx2x_port_stats_init(bp);
3419
3420 else if (bp->func_stx)
3421 bnx2x_func_stats_init(bp);
3422
3423 bnx2x_hw_stats_post(bp);
3424 bnx2x_storm_stats_post(bp);
3425}
3426
3427static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3428{
3429 bnx2x_stats_comp(bp);
3430 bnx2x_stats_pmf_update(bp);
3431 bnx2x_stats_start(bp);
3432}
3433
3434static void bnx2x_stats_restart(struct bnx2x *bp)
3435{
3436 bnx2x_stats_comp(bp);
3437 bnx2x_stats_start(bp);
3438}
3439
3440static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3441{
3442 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444 struct regpair diff;
3445
3446 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3452 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3453 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458 UPDATE_STAT64(tx_stat_gt127,
3459 tx_stat_etherstatspkts65octetsto127octets);
3460 UPDATE_STAT64(tx_stat_gt255,
3461 tx_stat_etherstatspkts128octetsto255octets);
3462 UPDATE_STAT64(tx_stat_gt511,
3463 tx_stat_etherstatspkts256octetsto511octets);
3464 UPDATE_STAT64(tx_stat_gt1023,
3465 tx_stat_etherstatspkts512octetsto1023octets);
3466 UPDATE_STAT64(tx_stat_gt1518,
3467 tx_stat_etherstatspkts1024octetsto1522octets);
3468 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472 UPDATE_STAT64(tx_stat_gterr,
3473 tx_stat_dot3statsinternalmactransmiterrors);
3474 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3475}
3476
3477static void bnx2x_emac_stats_update(struct bnx2x *bp)
3478{
3479 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3481
3482 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3513}
3514
3515static int bnx2x_hw_stats_update(struct bnx2x *bp)
3516{
3517 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518 struct nig_stats *old = &(bp->port.old_nig_stats);
3519 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521 struct regpair diff;
3522
3523 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524 bnx2x_bmac_stats_update(bp);
3525
3526 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527 bnx2x_emac_stats_update(bp);
3528
3529 else { /* unreached */
3530 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3531 return -1;
3532 }
a2fbb9ea 3533
bb2a0f7a
YG
3534 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535 new->brb_discard - old->brb_discard);
66e855f3
YG
3536 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3538
bb2a0f7a
YG
3539 UPDATE_STAT64_NIG(egress_mac_pkt0,
3540 etherstatspkts1024octetsto1522octets);
3541 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3542
bb2a0f7a 3543 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3544
bb2a0f7a
YG
3545 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546 sizeof(struct mac_stx));
3547 estats->brb_drop_hi = pstats->brb_drop_hi;
3548 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3549
bb2a0f7a 3550 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3551
bb2a0f7a 3552 return 0;
a2fbb9ea
ET
3553}
3554
bb2a0f7a 3555static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3556{
3557 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3558 int cl_id = BP_CL_ID(bp);
3559 struct tstorm_per_port_stats *tport =
3560 &stats->tstorm_common.port_statistics;
a2fbb9ea 3561 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3562 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3563 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3564 struct xstorm_per_client_stats *xclient =
3565 &stats->xstorm_common.client_statistics[cl_id];
3566 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3569 u32 diff;
3570
bb2a0f7a
YG
3571 /* are storm stats valid? */
3572 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573 bp->stats_counter) {
3574 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575 " tstorm counter (%d) != stats_counter (%d)\n",
3576 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3577 return -1;
3578 }
bb2a0f7a
YG
3579 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580 bp->stats_counter) {
3581 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582 " xstorm counter (%d) != stats_counter (%d)\n",
3583 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3584 return -2;
3585 }
a2fbb9ea 3586
bb2a0f7a
YG
3587 fstats->total_bytes_received_hi =
3588 fstats->valid_bytes_received_hi =
a2fbb9ea 3589 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3590 fstats->total_bytes_received_lo =
3591 fstats->valid_bytes_received_lo =
a2fbb9ea 3592 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3593
3594 estats->error_bytes_received_hi =
3595 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596 estats->error_bytes_received_lo =
3597 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598 ADD_64(estats->error_bytes_received_hi,
3599 estats->rx_stat_ifhcinbadoctets_hi,
3600 estats->error_bytes_received_lo,
3601 estats->rx_stat_ifhcinbadoctets_lo);
3602
3603 ADD_64(fstats->total_bytes_received_hi,
3604 estats->error_bytes_received_hi,
3605 fstats->total_bytes_received_lo,
3606 estats->error_bytes_received_lo);
3607
3608 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3609 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3610 total_multicast_packets_received);
a2fbb9ea 3611 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3612 total_broadcast_packets_received);
3613
3614 fstats->total_bytes_transmitted_hi =
3615 le32_to_cpu(xclient->total_sent_bytes.hi);
3616 fstats->total_bytes_transmitted_lo =
3617 le32_to_cpu(xclient->total_sent_bytes.lo);
3618
3619 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620 total_unicast_packets_transmitted);
3621 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622 total_multicast_packets_transmitted);
3623 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624 total_broadcast_packets_transmitted);
3625
3626 memcpy(estats, &(fstats->total_bytes_received_hi),
3627 sizeof(struct host_func_stats) - 2*sizeof(u32));
3628
3629 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631 estats->brb_truncate_discard =
3632 le32_to_cpu(tport->brb_truncate_discard);
3633 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3634
3635 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3636 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3637 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3638 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3639 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3640 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3641 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3642 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3643 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3644 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3645 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3646 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3647 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3648
bb2a0f7a
YG
3649 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650 old_tclient->packets_too_big_discard =
a2fbb9ea 3651 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3652 estats->no_buff_discard =
3653 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3655
3656 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657 old_xclient->unicast_bytes_sent.hi =
3658 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659 old_xclient->unicast_bytes_sent.lo =
3660 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661 old_xclient->multicast_bytes_sent.hi =
3662 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663 old_xclient->multicast_bytes_sent.lo =
3664 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665 old_xclient->broadcast_bytes_sent.hi =
3666 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667 old_xclient->broadcast_bytes_sent.lo =
3668 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3669
3670 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3671
3672 return 0;
3673}
3674
bb2a0f7a 3675static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3676{
bb2a0f7a
YG
3677 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3679 struct net_device_stats *nstats = &bp->dev->stats;
3680
3681 nstats->rx_packets =
3682 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3685
3686 nstats->tx_packets =
3687 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3690
bb2a0f7a 3691 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3692
0e39e645 3693 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3694
bb2a0f7a
YG
3695 nstats->rx_dropped = old_tclient->checksum_discard +
3696 estats->mac_discard;
a2fbb9ea
ET
3697 nstats->tx_dropped = 0;
3698
3699 nstats->multicast =
3700 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3701
bb2a0f7a
YG
3702 nstats->collisions =
3703 estats->tx_stat_dot3statssinglecollisionframes_lo +
3704 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705 estats->tx_stat_dot3statslatecollisions_lo +
3706 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3707
bb2a0f7a
YG
3708 estats->jabber_packets_received =
3709 old_tclient->packets_too_big_discard +
3710 estats->rx_stat_dot3statsframestoolong_lo;
3711
3712 nstats->rx_length_errors =
3713 estats->rx_stat_etherstatsundersizepkts_lo +
3714 estats->jabber_packets_received;
66e855f3 3715 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3716 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3719 nstats->rx_missed_errors = estats->xxoverflow_discard;
3720
3721 nstats->rx_errors = nstats->rx_length_errors +
3722 nstats->rx_over_errors +
3723 nstats->rx_crc_errors +
3724 nstats->rx_frame_errors +
0e39e645
ET
3725 nstats->rx_fifo_errors +
3726 nstats->rx_missed_errors;
a2fbb9ea 3727
bb2a0f7a
YG
3728 nstats->tx_aborted_errors =
3729 estats->tx_stat_dot3statslatecollisions_lo +
3730 estats->tx_stat_dot3statsexcessivecollisions_lo;
3731 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3732 nstats->tx_fifo_errors = 0;
3733 nstats->tx_heartbeat_errors = 0;
3734 nstats->tx_window_errors = 0;
3735
3736 nstats->tx_errors = nstats->tx_aborted_errors +
3737 nstats->tx_carrier_errors;
a2fbb9ea
ET
3738}
3739
bb2a0f7a 3740static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3741{
bb2a0f7a
YG
3742 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743 int update = 0;
a2fbb9ea 3744
bb2a0f7a
YG
3745 if (*stats_comp != DMAE_COMP_VAL)
3746 return;
3747
3748 if (bp->port.pmf)
3749 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3750
bb2a0f7a 3751 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3752
bb2a0f7a
YG
3753 if (update)
3754 bnx2x_net_stats_update(bp);
a2fbb9ea 3755
bb2a0f7a
YG
3756 else {
3757 if (bp->stats_pending) {
3758 bp->stats_pending++;
3759 if (bp->stats_pending == 3) {
3760 BNX2X_ERR("stats not updated for 3 times\n");
3761 bnx2x_panic();
3762 return;
3763 }
3764 }
a2fbb9ea
ET
3765 }
3766
3767 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3768 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3770 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3771 int i;
a2fbb9ea
ET
3772
3773 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3775 " tx pkt (%lx)\n",
3776 bnx2x_tx_avail(bp->fp),
7a9b2557 3777 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3778 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3779 " rx pkt (%lx)\n",
7a9b2557
VZ
3780 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781 bp->fp->rx_comp_cons),
3782 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3783 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3784 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3785 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3786 printk(KERN_DEBUG "tstats: checksum_discard %u "
3787 "packets_too_big_discard %u no_buff_discard %u "
3788 "mac_discard %u mac_filter_discard %u "
3789 "xxovrflow_discard %u brb_truncate_discard %u "
3790 "ttl0_discard %u\n",
bb2a0f7a
YG
3791 old_tclient->checksum_discard,
3792 old_tclient->packets_too_big_discard,
3793 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3794 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3795 estats->brb_truncate_discard,
3796 old_tclient->ttl0_discard);
a2fbb9ea
ET
3797
3798 for_each_queue(bp, i) {
3799 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800 bnx2x_fp(bp, i, tx_pkt),
3801 bnx2x_fp(bp, i, rx_pkt),
3802 bnx2x_fp(bp, i, rx_calls));
3803 }
3804 }
3805
bb2a0f7a
YG
3806 bnx2x_hw_stats_post(bp);
3807 bnx2x_storm_stats_post(bp);
3808}
a2fbb9ea 3809
bb2a0f7a
YG
3810static void bnx2x_port_stats_stop(struct bnx2x *bp)
3811{
3812 struct dmae_command *dmae;
3813 u32 opcode;
3814 int loader_idx = PMF_DMAE_C(bp);
3815 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3816
bb2a0f7a 3817 bp->executer_idx = 0;
a2fbb9ea 3818
bb2a0f7a
YG
3819 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3820 DMAE_CMD_C_ENABLE |
3821 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3822#ifdef __BIG_ENDIAN
bb2a0f7a 3823 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3824#else
bb2a0f7a 3825 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3826#endif
bb2a0f7a
YG
3827 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829
3830 if (bp->port.port_stx) {
3831
3832 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3833 if (bp->func_stx)
3834 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3835 else
3836 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3840 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3841 dmae->len = sizeof(struct host_port_stats) >> 2;
3842 if (bp->func_stx) {
3843 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844 dmae->comp_addr_hi = 0;
3845 dmae->comp_val = 1;
3846 } else {
3847 dmae->comp_addr_lo =
3848 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849 dmae->comp_addr_hi =
3850 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3852
bb2a0f7a
YG
3853 *stats_comp = 0;
3854 }
a2fbb9ea
ET
3855 }
3856
bb2a0f7a
YG
3857 if (bp->func_stx) {
3858
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->dst_addr_lo = bp->func_stx >> 2;
3864 dmae->dst_addr_hi = 0;
3865 dmae->len = sizeof(struct host_func_stats) >> 2;
3866 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_val = DMAE_COMP_VAL;
3869
3870 *stats_comp = 0;
a2fbb9ea 3871 }
bb2a0f7a
YG
3872}
3873
3874static void bnx2x_stats_stop(struct bnx2x *bp)
3875{
3876 int update = 0;
3877
3878 bnx2x_stats_comp(bp);
3879
3880 if (bp->port.pmf)
3881 update = (bnx2x_hw_stats_update(bp) == 0);
3882
3883 update |= (bnx2x_storm_stats_update(bp) == 0);
3884
3885 if (update) {
3886 bnx2x_net_stats_update(bp);
a2fbb9ea 3887
bb2a0f7a
YG
3888 if (bp->port.pmf)
3889 bnx2x_port_stats_stop(bp);
3890
3891 bnx2x_hw_stats_post(bp);
3892 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3893 }
3894}
3895
bb2a0f7a
YG
3896static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3897{
3898}
3899
3900static const struct {
3901 void (*action)(struct bnx2x *bp);
3902 enum bnx2x_stats_state next_state;
3903} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3904/* state event */
3905{
3906/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3908/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3910},
3911{
3912/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3913/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3914/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3915/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3916}
3917};
3918
3919static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3920{
3921 enum bnx2x_stats_state state = bp->stats_state;
3922
3923 bnx2x_stats_stm[state][event].action(bp);
3924 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3925
3926 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928 state, event, bp->stats_state);
3929}
3930
a2fbb9ea
ET
3931static void bnx2x_timer(unsigned long data)
3932{
3933 struct bnx2x *bp = (struct bnx2x *) data;
3934
3935 if (!netif_running(bp->dev))
3936 return;
3937
3938 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3939 goto timer_restart;
a2fbb9ea
ET
3940
3941 if (poll) {
3942 struct bnx2x_fastpath *fp = &bp->fp[0];
3943 int rc;
3944
3945 bnx2x_tx_int(fp, 1000);
3946 rc = bnx2x_rx_int(fp, 1000);
3947 }
3948
34f80b04
EG
3949 if (!BP_NOMCP(bp)) {
3950 int func = BP_FUNC(bp);
a2fbb9ea
ET
3951 u32 drv_pulse;
3952 u32 mcp_pulse;
3953
3954 ++bp->fw_drv_pulse_wr_seq;
3955 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956 /* TBD - add SYSTEM_TIME */
3957 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3958 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3959
34f80b04 3960 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3961 MCP_PULSE_SEQ_MASK);
3962 /* The delta between driver pulse and mcp response
3963 * should be 1 (before mcp response) or 0 (after mcp response)
3964 */
3965 if ((drv_pulse != mcp_pulse) &&
3966 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967 /* someone lost a heartbeat... */
3968 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969 drv_pulse, mcp_pulse);
3970 }
3971 }
3972
bb2a0f7a
YG
3973 if ((bp->state == BNX2X_STATE_OPEN) ||
3974 (bp->state == BNX2X_STATE_DISABLED))
3975 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3976
f1410647 3977timer_restart:
a2fbb9ea
ET
3978 mod_timer(&bp->timer, jiffies + bp->current_interval);
3979}
3980
3981/* end of Statistics */
3982
3983/* nic init */
3984
3985/*
3986 * nic init service functions
3987 */
3988
34f80b04 3989static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3990{
34f80b04
EG
3991 int port = BP_PORT(bp);
3992
3993 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3995 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
3996 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3998 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
3999}
4000
5c862848
EG
4001static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4002 dma_addr_t mapping, int sb_id)
34f80b04
EG
4003{
4004 int port = BP_PORT(bp);
bb2a0f7a 4005 int func = BP_FUNC(bp);
a2fbb9ea 4006 int index;
34f80b04 4007 u64 section;
a2fbb9ea
ET
4008
4009 /* USTORM */
4010 section = ((u64)mapping) + offsetof(struct host_status_block,
4011 u_status_block);
34f80b04 4012 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4013
4014 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4015 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4016 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4017 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4018 U64_HI(section));
bb2a0f7a
YG
4019 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4021
4022 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4024 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4025
4026 /* CSTORM */
4027 section = ((u64)mapping) + offsetof(struct host_status_block,
4028 c_status_block);
34f80b04 4029 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4030
4031 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4032 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4033 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4034 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4035 U64_HI(section));
7a9b2557
VZ
4036 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4038
4039 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4041 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042
4043 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4044}
4045
4046static void bnx2x_zero_def_sb(struct bnx2x *bp)
4047{
4048 int func = BP_FUNC(bp);
a2fbb9ea 4049
34f80b04
EG
4050 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052 sizeof(struct ustorm_def_status_block)/4);
4053 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055 sizeof(struct cstorm_def_status_block)/4);
4056 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058 sizeof(struct xstorm_def_status_block)/4);
4059 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4062}
4063
4064static void bnx2x_init_def_sb(struct bnx2x *bp,
4065 struct host_def_status_block *def_sb,
34f80b04 4066 dma_addr_t mapping, int sb_id)
a2fbb9ea 4067{
34f80b04
EG
4068 int port = BP_PORT(bp);
4069 int func = BP_FUNC(bp);
a2fbb9ea
ET
4070 int index, val, reg_offset;
4071 u64 section;
4072
4073 /* ATTN */
4074 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075 atten_status_block);
34f80b04 4076 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4077
49d66772
ET
4078 bp->attn_state = 0;
4079
a2fbb9ea
ET
4080 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4081 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4082
34f80b04 4083 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4084 bp->attn_group[index].sig[0] = REG_RD(bp,
4085 reg_offset + 0x10*index);
4086 bp->attn_group[index].sig[1] = REG_RD(bp,
4087 reg_offset + 0x4 + 0x10*index);
4088 bp->attn_group[index].sig[2] = REG_RD(bp,
4089 reg_offset + 0x8 + 0x10*index);
4090 bp->attn_group[index].sig[3] = REG_RD(bp,
4091 reg_offset + 0xc + 0x10*index);
4092 }
4093
a2fbb9ea
ET
4094 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4095 HC_REG_ATTN_MSG0_ADDR_L);
4096
4097 REG_WR(bp, reg_offset, U64_LO(section));
4098 REG_WR(bp, reg_offset + 4, U64_HI(section));
4099
4100 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4101
4102 val = REG_RD(bp, reg_offset);
34f80b04 4103 val |= sb_id;
a2fbb9ea
ET
4104 REG_WR(bp, reg_offset, val);
4105
4106 /* USTORM */
4107 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4108 u_def_status_block);
34f80b04 4109 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4110
4111 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4112 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4113 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4114 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4115 U64_HI(section));
5c862848 4116 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4117 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4118
4119 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4120 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4121 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4122
4123 /* CSTORM */
4124 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4125 c_def_status_block);
34f80b04 4126 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4127
4128 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4129 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4130 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4131 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4132 U64_HI(section));
5c862848 4133 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4134 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4135
4136 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4137 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4138 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4139
4140 /* TSTORM */
4141 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4142 t_def_status_block);
34f80b04 4143 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4144
4145 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4146 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4147 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4148 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4149 U64_HI(section));
5c862848 4150 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4151 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4152
4153 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4154 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4155 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4156
4157 /* XSTORM */
4158 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4159 x_def_status_block);
34f80b04 4160 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4161
4162 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4163 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4164 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4165 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4166 U64_HI(section));
5c862848 4167 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4168 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4169
4170 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4171 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4172 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4173
bb2a0f7a 4174 bp->stats_pending = 0;
66e855f3 4175 bp->set_mac_pending = 0;
bb2a0f7a 4176
34f80b04 4177 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4178}
4179
4180static void bnx2x_update_coalesce(struct bnx2x *bp)
4181{
34f80b04 4182 int port = BP_PORT(bp);
a2fbb9ea
ET
4183 int i;
4184
4185 for_each_queue(bp, i) {
34f80b04 4186 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4187
4188 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4189 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4190 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4191 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4192 bp->rx_ticks/12);
a2fbb9ea 4193 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4194 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4195 U_SB_ETH_RX_CQ_INDEX),
4196 bp->rx_ticks ? 0 : 1);
4197 REG_WR16(bp, BAR_USTRORM_INTMEM +
4198 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4199 U_SB_ETH_RX_BD_INDEX),
34f80b04 4200 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4201
4202 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4203 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4204 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4205 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4206 bp->tx_ticks/12);
a2fbb9ea 4207 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4208 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4209 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4210 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4211 }
4212}
4213
7a9b2557
VZ
4214static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4215 struct bnx2x_fastpath *fp, int last)
4216{
4217 int i;
4218
4219 for (i = 0; i < last; i++) {
4220 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4221 struct sk_buff *skb = rx_buf->skb;
4222
4223 if (skb == NULL) {
4224 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4225 continue;
4226 }
4227
4228 if (fp->tpa_state[i] == BNX2X_TPA_START)
4229 pci_unmap_single(bp->pdev,
4230 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4231 bp->rx_buf_size,
7a9b2557
VZ
4232 PCI_DMA_FROMDEVICE);
4233
4234 dev_kfree_skb(skb);
4235 rx_buf->skb = NULL;
4236 }
4237}
4238
a2fbb9ea
ET
4239static void bnx2x_init_rx_rings(struct bnx2x *bp)
4240{
7a9b2557 4241 int func = BP_FUNC(bp);
32626230
EG
4242 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4243 ETH_MAX_AGGREGATION_QUEUES_E1H;
4244 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4245 int i, j;
a2fbb9ea 4246
437cf2f1
EG
4247 bp->rx_buf_size = bp->dev->mtu;
4248 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4249 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4250
7a9b2557
VZ
4251 if (bp->flags & TPA_ENABLE_FLAG) {
4252 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4253 "rx_buf_size %d effective_mtu %d\n",
4254 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4255
4256 for_each_queue(bp, j) {
32626230 4257 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4258
32626230 4259 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4260 fp->tpa_pool[i].skb =
4261 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4262 if (!fp->tpa_pool[i].skb) {
4263 BNX2X_ERR("Failed to allocate TPA "
4264 "skb pool for queue[%d] - "
4265 "disabling TPA on this "
4266 "queue!\n", j);
4267 bnx2x_free_tpa_pool(bp, fp, i);
4268 fp->disable_tpa = 1;
4269 break;
4270 }
4271 pci_unmap_addr_set((struct sw_rx_bd *)
4272 &bp->fp->tpa_pool[i],
4273 mapping, 0);
4274 fp->tpa_state[i] = BNX2X_TPA_STOP;
4275 }
4276 }
4277 }
4278
a2fbb9ea
ET
4279 for_each_queue(bp, j) {
4280 struct bnx2x_fastpath *fp = &bp->fp[j];
4281
4282 fp->rx_bd_cons = 0;
4283 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4284 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4285
4286 /* "next page" elements initialization */
4287 /* SGE ring */
4288 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4289 struct eth_rx_sge *sge;
4290
4291 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4292 sge->addr_hi =
4293 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4294 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4295 sge->addr_lo =
4296 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4297 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4298 }
4299
4300 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4301
7a9b2557 4302 /* RX BD ring */
a2fbb9ea
ET
4303 for (i = 1; i <= NUM_RX_RINGS; i++) {
4304 struct eth_rx_bd *rx_bd;
4305
4306 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4307 rx_bd->addr_hi =
4308 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4309 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4310 rx_bd->addr_lo =
4311 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4312 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4313 }
4314
34f80b04 4315 /* CQ ring */
a2fbb9ea
ET
4316 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4317 struct eth_rx_cqe_next_page *nextpg;
4318
4319 nextpg = (struct eth_rx_cqe_next_page *)
4320 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4321 nextpg->addr_hi =
4322 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4323 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4324 nextpg->addr_lo =
4325 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4326 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4327 }
4328
7a9b2557
VZ
4329 /* Allocate SGEs and initialize the ring elements */
4330 for (i = 0, ring_prod = 0;
4331 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4332
7a9b2557
VZ
4333 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4334 BNX2X_ERR("was only able to allocate "
4335 "%d rx sges\n", i);
4336 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4337 /* Cleanup already allocated elements */
4338 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4339 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4340 fp->disable_tpa = 1;
4341 ring_prod = 0;
4342 break;
4343 }
4344 ring_prod = NEXT_SGE_IDX(ring_prod);
4345 }
4346 fp->rx_sge_prod = ring_prod;
4347
4348 /* Allocate BDs and initialize BD ring */
66e855f3 4349 fp->rx_comp_cons = 0;
7a9b2557 4350 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4351 for (i = 0; i < bp->rx_ring_size; i++) {
4352 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4353 BNX2X_ERR("was only able to allocate "
4354 "%d rx skbs\n", i);
66e855f3 4355 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4356 break;
4357 }
4358 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4359 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4360 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4361 }
4362
7a9b2557
VZ
4363 fp->rx_bd_prod = ring_prod;
4364 /* must not have more available CQEs than BDs */
4365 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4366 cqe_ring_prod);
a2fbb9ea
ET
4367 fp->rx_pkt = fp->rx_calls = 0;
4368
7a9b2557
VZ
4369 /* Warning!
4370 * this will generate an interrupt (to the TSTORM)
4371 * must only be done after chip is initialized
4372 */
4373 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4374 fp->rx_sge_prod);
a2fbb9ea
ET
4375 if (j != 0)
4376 continue;
4377
4378 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4379 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4380 U64_LO(fp->rx_comp_mapping));
4381 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4382 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4383 U64_HI(fp->rx_comp_mapping));
4384 }
4385}
4386
4387static void bnx2x_init_tx_ring(struct bnx2x *bp)
4388{
4389 int i, j;
4390
4391 for_each_queue(bp, j) {
4392 struct bnx2x_fastpath *fp = &bp->fp[j];
4393
4394 for (i = 1; i <= NUM_TX_RINGS; i++) {
4395 struct eth_tx_bd *tx_bd =
4396 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4397
4398 tx_bd->addr_hi =
4399 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4400 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4401 tx_bd->addr_lo =
4402 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4403 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4404 }
4405
4406 fp->tx_pkt_prod = 0;
4407 fp->tx_pkt_cons = 0;
4408 fp->tx_bd_prod = 0;
4409 fp->tx_bd_cons = 0;
4410 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4411 fp->tx_pkt = 0;
4412 }
4413}
4414
4415static void bnx2x_init_sp_ring(struct bnx2x *bp)
4416{
34f80b04 4417 int func = BP_FUNC(bp);
a2fbb9ea
ET
4418
4419 spin_lock_init(&bp->spq_lock);
4420
4421 bp->spq_left = MAX_SPQ_PENDING;
4422 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4423 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4424 bp->spq_prod_bd = bp->spq;
4425 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4426
34f80b04 4427 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4428 U64_LO(bp->spq_mapping));
34f80b04
EG
4429 REG_WR(bp,
4430 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4431 U64_HI(bp->spq_mapping));
4432
34f80b04 4433 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4434 bp->spq_prod_idx);
4435}
4436
4437static void bnx2x_init_context(struct bnx2x *bp)
4438{
4439 int i;
4440
4441 for_each_queue(bp, i) {
4442 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4443 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4444 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4445
4446 context->xstorm_st_context.tx_bd_page_base_hi =
4447 U64_HI(fp->tx_desc_mapping);
4448 context->xstorm_st_context.tx_bd_page_base_lo =
4449 U64_LO(fp->tx_desc_mapping);
4450 context->xstorm_st_context.db_data_addr_hi =
4451 U64_HI(fp->tx_prods_mapping);
4452 context->xstorm_st_context.db_data_addr_lo =
4453 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4454 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4455 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4456
4457 context->ustorm_st_context.common.sb_index_numbers =
4458 BNX2X_RX_SB_INDEX_NUM;
4459 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4460 context->ustorm_st_context.common.status_block_id = sb_id;
4461 context->ustorm_st_context.common.flags =
4462 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4463 context->ustorm_st_context.common.mc_alignment_size =
4464 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4465 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4466 bp->rx_buf_size;
34f80b04 4467 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4468 U64_HI(fp->rx_desc_mapping);
34f80b04 4469 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4470 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4471 if (!fp->disable_tpa) {
4472 context->ustorm_st_context.common.flags |=
4473 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4474 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4475 context->ustorm_st_context.common.sge_buff_size =
4476 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4477 context->ustorm_st_context.common.sge_page_base_hi =
4478 U64_HI(fp->rx_sge_mapping);
4479 context->ustorm_st_context.common.sge_page_base_lo =
4480 U64_LO(fp->rx_sge_mapping);
4481 }
4482
a2fbb9ea 4483 context->cstorm_st_context.sb_index_number =
5c862848 4484 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4485 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4486
4487 context->xstorm_ag_context.cdu_reserved =
4488 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4489 CDU_REGION_NUMBER_XCM_AG,
4490 ETH_CONNECTION_TYPE);
4491 context->ustorm_ag_context.cdu_usage =
4492 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4493 CDU_REGION_NUMBER_UCM_AG,
4494 ETH_CONNECTION_TYPE);
4495 }
4496}
4497
4498static void bnx2x_init_ind_table(struct bnx2x *bp)
4499{
34f80b04 4500 int port = BP_PORT(bp);
a2fbb9ea
ET
4501 int i;
4502
4503 if (!is_multi(bp))
4504 return;
4505
34f80b04 4506 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4507 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4508 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4509 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4510 i % bp->num_queues);
4511
4512 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4513}
4514
49d66772
ET
4515static void bnx2x_set_client_config(struct bnx2x *bp)
4516{
49d66772 4517 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4518 int port = BP_PORT(bp);
4519 int i;
49d66772 4520
34f80b04 4521 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4522 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4523 tstorm_client.config_flags =
4524 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4525#ifdef BCM_VLAN
34f80b04 4526 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4527 tstorm_client.config_flags |=
4528 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4529 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4530 }
4531#endif
49d66772 4532
7a9b2557
VZ
4533 if (bp->flags & TPA_ENABLE_FLAG) {
4534 tstorm_client.max_sges_for_packet =
4535 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4536 tstorm_client.max_sges_for_packet =
4537 ((tstorm_client.max_sges_for_packet +
4538 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4539 PAGES_PER_SGE_SHIFT;
4540
4541 tstorm_client.config_flags |=
4542 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4543 }
4544
49d66772
ET
4545 for_each_queue(bp, i) {
4546 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4547 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4548 ((u32 *)&tstorm_client)[0]);
4549 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4550 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4551 ((u32 *)&tstorm_client)[1]);
4552 }
4553
34f80b04
EG
4554 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4555 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4556}
4557
a2fbb9ea
ET
4558static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4559{
a2fbb9ea 4560 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4561 int mode = bp->rx_mode;
4562 int mask = (1 << BP_L_ID(bp));
4563 int func = BP_FUNC(bp);
a2fbb9ea
ET
4564 int i;
4565
3196a88a 4566 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4567
4568 switch (mode) {
4569 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4570 tstorm_mac_filter.ucast_drop_all = mask;
4571 tstorm_mac_filter.mcast_drop_all = mask;
4572 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4573 break;
4574 case BNX2X_RX_MODE_NORMAL:
34f80b04 4575 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4576 break;
4577 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4578 tstorm_mac_filter.mcast_accept_all = mask;
4579 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4580 break;
4581 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4582 tstorm_mac_filter.ucast_accept_all = mask;
4583 tstorm_mac_filter.mcast_accept_all = mask;
4584 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4585 break;
4586 default:
34f80b04
EG
4587 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4588 break;
a2fbb9ea
ET
4589 }
4590
4591 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4592 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4593 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4594 ((u32 *)&tstorm_mac_filter)[i]);
4595
34f80b04 4596/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4597 ((u32 *)&tstorm_mac_filter)[i]); */
4598 }
a2fbb9ea 4599
49d66772
ET
4600 if (mode != BNX2X_RX_MODE_NONE)
4601 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4602}
4603
471de716
EG
4604static void bnx2x_init_internal_common(struct bnx2x *bp)
4605{
4606 int i;
4607
3cdf1db7
YG
4608 if (bp->flags & TPA_ENABLE_FLAG) {
4609 struct tstorm_eth_tpa_exist tpa = {0};
4610
4611 tpa.tpa_exist = 1;
4612
4613 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4614 ((u32 *)&tpa)[0]);
4615 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4616 ((u32 *)&tpa)[1]);
4617 }
4618
471de716
EG
4619 /* Zero this manually as its initialization is
4620 currently missing in the initTool */
4621 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4622 REG_WR(bp, BAR_USTRORM_INTMEM +
4623 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4624}
4625
4626static void bnx2x_init_internal_port(struct bnx2x *bp)
4627{
4628 int port = BP_PORT(bp);
4629
4630 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4631 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634}
4635
4636static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4637{
a2fbb9ea
ET
4638 struct tstorm_eth_function_common_config tstorm_config = {0};
4639 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4640 int port = BP_PORT(bp);
4641 int func = BP_FUNC(bp);
4642 int i;
471de716 4643 u16 max_agg_size;
a2fbb9ea
ET
4644
4645 if (is_multi(bp)) {
4646 tstorm_config.config_flags = MULTI_FLAGS;
4647 tstorm_config.rss_result_mask = MULTI_MASK;
4648 }
4649
34f80b04
EG
4650 tstorm_config.leading_client_id = BP_L_ID(bp);
4651
a2fbb9ea 4652 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4653 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4654 (*(u32 *)&tstorm_config));
4655
c14423fe 4656 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4657 bnx2x_set_storm_rx_mode(bp);
4658
66e855f3
YG
4659 /* reset xstorm per client statistics */
4660 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4661 REG_WR(bp, BAR_XSTRORM_INTMEM +
4662 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4663 i*4, 0);
4664 }
4665 /* reset tstorm per client statistics */
4666 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4667 REG_WR(bp, BAR_TSTRORM_INTMEM +
4668 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4669 i*4, 0);
4670 }
4671
4672 /* Init statistics related context */
34f80b04 4673 stats_flags.collect_eth = 1;
a2fbb9ea 4674
66e855f3 4675 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4676 ((u32 *)&stats_flags)[0]);
66e855f3 4677 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4678 ((u32 *)&stats_flags)[1]);
4679
66e855f3 4680 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4681 ((u32 *)&stats_flags)[0]);
66e855f3 4682 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4683 ((u32 *)&stats_flags)[1]);
4684
66e855f3 4685 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4686 ((u32 *)&stats_flags)[0]);
66e855f3 4687 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4688 ((u32 *)&stats_flags)[1]);
4689
66e855f3
YG
4690 REG_WR(bp, BAR_XSTRORM_INTMEM +
4691 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4692 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4693 REG_WR(bp, BAR_XSTRORM_INTMEM +
4694 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4695 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4696
4697 REG_WR(bp, BAR_TSTRORM_INTMEM +
4698 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4699 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4700 REG_WR(bp, BAR_TSTRORM_INTMEM +
4701 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4702 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4703
4704 if (CHIP_IS_E1H(bp)) {
4705 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4706 IS_E1HMF(bp));
4707 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4708 IS_E1HMF(bp));
4709 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4710 IS_E1HMF(bp));
4711 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4712 IS_E1HMF(bp));
4713
7a9b2557
VZ
4714 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4715 bp->e1hov);
34f80b04
EG
4716 }
4717
471de716 4718 /* Init CQ ring mapping and aggregation size */
437cf2f1 4719 max_agg_size = min((u32)(bp->rx_buf_size +
471de716
EG
4720 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4721 (u32)0xffff);
7a9b2557
VZ
4722 for_each_queue(bp, i) {
4723 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4724
4725 REG_WR(bp, BAR_USTRORM_INTMEM +
4726 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4727 U64_LO(fp->rx_comp_mapping));
4728 REG_WR(bp, BAR_USTRORM_INTMEM +
4729 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4730 U64_HI(fp->rx_comp_mapping));
4731
7a9b2557
VZ
4732 REG_WR16(bp, BAR_USTRORM_INTMEM +
4733 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4734 max_agg_size);
4735 }
a2fbb9ea
ET
4736}
4737
471de716
EG
4738static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4739{
4740 switch (load_code) {
4741 case FW_MSG_CODE_DRV_LOAD_COMMON:
4742 bnx2x_init_internal_common(bp);
4743 /* no break */
4744
4745 case FW_MSG_CODE_DRV_LOAD_PORT:
4746 bnx2x_init_internal_port(bp);
4747 /* no break */
4748
4749 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4750 bnx2x_init_internal_func(bp);
4751 break;
4752
4753 default:
4754 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4755 break;
4756 }
4757}
4758
4759static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4760{
4761 int i;
4762
4763 for_each_queue(bp, i) {
4764 struct bnx2x_fastpath *fp = &bp->fp[i];
4765
34f80b04 4766 fp->bp = bp;
a2fbb9ea 4767 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4768 fp->index = i;
34f80b04
EG
4769 fp->cl_id = BP_L_ID(bp) + i;
4770 fp->sb_id = fp->cl_id;
4771 DP(NETIF_MSG_IFUP,
4772 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4773 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4774 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4775 FP_SB_ID(fp));
4776 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4777 }
4778
5c862848
EG
4779 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4780 DEF_SB_ID);
4781 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4782 bnx2x_update_coalesce(bp);
4783 bnx2x_init_rx_rings(bp);
4784 bnx2x_init_tx_ring(bp);
4785 bnx2x_init_sp_ring(bp);
4786 bnx2x_init_context(bp);
471de716 4787 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4788 bnx2x_init_ind_table(bp);
615f8fd9 4789 bnx2x_int_enable(bp);
a2fbb9ea
ET
4790}
4791
4792/* end of nic init */
4793
4794/*
4795 * gzip service functions
4796 */
4797
4798static int bnx2x_gunzip_init(struct bnx2x *bp)
4799{
4800 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4801 &bp->gunzip_mapping);
4802 if (bp->gunzip_buf == NULL)
4803 goto gunzip_nomem1;
4804
4805 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4806 if (bp->strm == NULL)
4807 goto gunzip_nomem2;
4808
4809 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4810 GFP_KERNEL);
4811 if (bp->strm->workspace == NULL)
4812 goto gunzip_nomem3;
4813
4814 return 0;
4815
4816gunzip_nomem3:
4817 kfree(bp->strm);
4818 bp->strm = NULL;
4819
4820gunzip_nomem2:
4821 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4822 bp->gunzip_mapping);
4823 bp->gunzip_buf = NULL;
4824
4825gunzip_nomem1:
4826 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4827 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4828 return -ENOMEM;
4829}
4830
4831static void bnx2x_gunzip_end(struct bnx2x *bp)
4832{
4833 kfree(bp->strm->workspace);
4834
4835 kfree(bp->strm);
4836 bp->strm = NULL;
4837
4838 if (bp->gunzip_buf) {
4839 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4840 bp->gunzip_mapping);
4841 bp->gunzip_buf = NULL;
4842 }
4843}
4844
4845static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4846{
4847 int n, rc;
4848
4849 /* check gzip header */
4850 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4851 return -EINVAL;
4852
4853 n = 10;
4854
34f80b04 4855#define FNAME 0x8
a2fbb9ea
ET
4856
4857 if (zbuf[3] & FNAME)
4858 while ((zbuf[n++] != 0) && (n < len));
4859
4860 bp->strm->next_in = zbuf + n;
4861 bp->strm->avail_in = len - n;
4862 bp->strm->next_out = bp->gunzip_buf;
4863 bp->strm->avail_out = FW_BUF_SIZE;
4864
4865 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4866 if (rc != Z_OK)
4867 return rc;
4868
4869 rc = zlib_inflate(bp->strm, Z_FINISH);
4870 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4871 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4872 bp->dev->name, bp->strm->msg);
4873
4874 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4875 if (bp->gunzip_outlen & 0x3)
4876 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4877 " gunzip_outlen (%d) not aligned\n",
4878 bp->dev->name, bp->gunzip_outlen);
4879 bp->gunzip_outlen >>= 2;
4880
4881 zlib_inflateEnd(bp->strm);
4882
4883 if (rc == Z_STREAM_END)
4884 return 0;
4885
4886 return rc;
4887}
4888
4889/* nic load/unload */
4890
4891/*
34f80b04 4892 * General service functions
a2fbb9ea
ET
4893 */
4894
4895/* send a NIG loopback debug packet */
4896static void bnx2x_lb_pckt(struct bnx2x *bp)
4897{
a2fbb9ea 4898 u32 wb_write[3];
a2fbb9ea
ET
4899
4900 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4901 wb_write[0] = 0x55555555;
4902 wb_write[1] = 0x55555555;
34f80b04 4903 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4904 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4905
4906 /* NON-IP protocol */
a2fbb9ea
ET
4907 wb_write[0] = 0x09000000;
4908 wb_write[1] = 0x55555555;
34f80b04 4909 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4910 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4911}
4912
4913/* some of the internal memories
4914 * are not directly readable from the driver
4915 * to test them we send debug packets
4916 */
4917static int bnx2x_int_mem_test(struct bnx2x *bp)
4918{
4919 int factor;
4920 int count, i;
4921 u32 val = 0;
4922
ad8d3948 4923 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4924 factor = 120;
ad8d3948
EG
4925 else if (CHIP_REV_IS_EMUL(bp))
4926 factor = 200;
4927 else
a2fbb9ea 4928 factor = 1;
a2fbb9ea
ET
4929
4930 DP(NETIF_MSG_HW, "start part1\n");
4931
4932 /* Disable inputs of parser neighbor blocks */
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4936 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4937
4938 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4940
4941 /* send Ethernet packet */
4942 bnx2x_lb_pckt(bp);
4943
4944 /* TODO do i reset NIG statistic? */
4945 /* Wait until NIG register shows 1 packet of size 0x10 */
4946 count = 1000 * factor;
4947 while (count) {
34f80b04 4948
a2fbb9ea
ET
4949 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4950 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4951 if (val == 0x10)
4952 break;
4953
4954 msleep(10);
4955 count--;
4956 }
4957 if (val != 0x10) {
4958 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4959 return -1;
4960 }
4961
4962 /* Wait until PRS register shows 1 packet */
4963 count = 1000 * factor;
4964 while (count) {
4965 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4966 if (val == 1)
4967 break;
4968
4969 msleep(10);
4970 count--;
4971 }
4972 if (val != 0x1) {
4973 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4974 return -2;
4975 }
4976
4977 /* Reset and init BRB, PRS */
34f80b04 4978 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4979 msleep(50);
34f80b04 4980 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4981 msleep(50);
4982 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4983 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4984
4985 DP(NETIF_MSG_HW, "part2\n");
4986
4987 /* Disable inputs of parser neighbor blocks */
4988 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4989 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4990 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4991 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4992
4993 /* Write 0 to parser credits for CFC search request */
4994 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4995
4996 /* send 10 Ethernet packets */
4997 for (i = 0; i < 10; i++)
4998 bnx2x_lb_pckt(bp);
4999
5000 /* Wait until NIG register shows 10 + 1
5001 packets of size 11*0x10 = 0xb0 */
5002 count = 1000 * factor;
5003 while (count) {
34f80b04 5004
a2fbb9ea
ET
5005 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5006 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5007 if (val == 0xb0)
5008 break;
5009
5010 msleep(10);
5011 count--;
5012 }
5013 if (val != 0xb0) {
5014 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5015 return -3;
5016 }
5017
5018 /* Wait until PRS register shows 2 packets */
5019 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020 if (val != 2)
5021 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5022
5023 /* Write 1 to parser credits for CFC search request */
5024 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5025
5026 /* Wait until PRS register shows 3 packets */
5027 msleep(10 * factor);
5028 /* Wait until NIG register shows 1 packet of size 0x10 */
5029 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5030 if (val != 3)
5031 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5032
5033 /* clear NIG EOP FIFO */
5034 for (i = 0; i < 11; i++)
5035 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5036 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5037 if (val != 1) {
5038 BNX2X_ERR("clear of NIG failed\n");
5039 return -4;
5040 }
5041
5042 /* Reset and init BRB, PRS, NIG */
5043 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5044 msleep(50);
5045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5046 msleep(50);
5047 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5048 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5049#ifndef BCM_ISCSI
5050 /* set NIC mode */
5051 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5052#endif
5053
5054 /* Enable inputs of parser neighbor blocks */
5055 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5056 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5057 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5058 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5059
5060 DP(NETIF_MSG_HW, "done\n");
5061
5062 return 0; /* OK */
5063}
5064
5065static void enable_blocks_attention(struct bnx2x *bp)
5066{
5067 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5068 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5069 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5070 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5071 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5072 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5073 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5074 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5075 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5076/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5077/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5078 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5079 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5080 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5081/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5082/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5083 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5084 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5085 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5086 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5087/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5088/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5089 if (CHIP_REV_IS_FPGA(bp))
5090 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5091 else
5092 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5093 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5094 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5095 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5096/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5097/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5098 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5099 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5100/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5101 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5102}
5103
34f80b04
EG
5104
5105static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5106{
a2fbb9ea 5107 u32 val, i;
a2fbb9ea 5108
34f80b04 5109 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5110
34f80b04
EG
5111 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5112 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5113
34f80b04
EG
5114 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5115 if (CHIP_IS_E1H(bp))
5116 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5117
34f80b04
EG
5118 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5119 msleep(30);
5120 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5121
34f80b04
EG
5122 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5123 if (CHIP_IS_E1(bp)) {
5124 /* enable HW interrupt from PXP on USDM overflow
5125 bit 16 on INT_MASK_0 */
5126 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5127 }
a2fbb9ea 5128
34f80b04
EG
5129 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5130 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5131
5132#ifdef __BIG_ENDIAN
34f80b04
EG
5133 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5134 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5135 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5138 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5139
5140/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5141 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5142 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5143 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5144 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5145#endif
5146
34f80b04 5147 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5148#ifdef BCM_ISCSI
34f80b04
EG
5149 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5150 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5151 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5152#endif
5153
34f80b04
EG
5154 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5155 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5156
34f80b04
EG
5157 /* let the HW do it's magic ... */
5158 msleep(100);
5159 /* finish PXP init */
5160 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5161 if (val != 1) {
5162 BNX2X_ERR("PXP2 CFG failed\n");
5163 return -EBUSY;
5164 }
5165 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5166 if (val != 1) {
5167 BNX2X_ERR("PXP2 RD_INIT failed\n");
5168 return -EBUSY;
5169 }
a2fbb9ea 5170
34f80b04
EG
5171 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5172 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5173
34f80b04 5174 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5175
34f80b04
EG
5176 /* clean the DMAE memory */
5177 bp->dmae_ready = 1;
5178 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5179
34f80b04
EG
5180 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5181 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5182 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5183 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5184
34f80b04
EG
5185 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5186 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5187 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5188 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5189
5190 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5191 /* soft reset pulse */
5192 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5193 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5194
5195#ifdef BCM_ISCSI
34f80b04 5196 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5197#endif
a2fbb9ea 5198
34f80b04
EG
5199 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5200 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5201 if (!CHIP_REV_IS_SLOW(bp)) {
5202 /* enable hw interrupt from doorbell Q */
5203 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5204 }
a2fbb9ea 5205
34f80b04
EG
5206 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5207 if (CHIP_REV_IS_SLOW(bp)) {
5208 /* fix for emulation and FPGA for no pause */
5209 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5210 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5211 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5212 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5213 }
a2fbb9ea 5214
34f80b04 5215 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5216 /* set NIC mode */
5217 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5218 if (CHIP_IS_E1H(bp))
5219 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5220
34f80b04
EG
5221 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5222 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5223 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5224 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5225
34f80b04
EG
5226 if (CHIP_IS_E1H(bp)) {
5227 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5228 STORM_INTMEM_SIZE_E1H/2);
5229 bnx2x_init_fill(bp,
5230 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231 0, STORM_INTMEM_SIZE_E1H/2);
5232 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5233 STORM_INTMEM_SIZE_E1H/2);
5234 bnx2x_init_fill(bp,
5235 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5236 0, STORM_INTMEM_SIZE_E1H/2);
5237 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5238 STORM_INTMEM_SIZE_E1H/2);
5239 bnx2x_init_fill(bp,
5240 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5241 0, STORM_INTMEM_SIZE_E1H/2);
5242 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5243 STORM_INTMEM_SIZE_E1H/2);
5244 bnx2x_init_fill(bp,
5245 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5246 0, STORM_INTMEM_SIZE_E1H/2);
5247 } else { /* E1 */
ad8d3948
EG
5248 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5249 STORM_INTMEM_SIZE_E1);
5250 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1);
5252 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5253 STORM_INTMEM_SIZE_E1);
5254 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1);
34f80b04 5256 }
a2fbb9ea 5257
34f80b04
EG
5258 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5259 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5260 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5261 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5262
34f80b04
EG
5263 /* sync semi rtc */
5264 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5265 0x80000000);
5266 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5267 0x80000000);
a2fbb9ea 5268
34f80b04
EG
5269 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5270 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5271 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5272
34f80b04
EG
5273 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5274 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5275 REG_WR(bp, i, 0xc0cac01a);
5276 /* TODO: replace with something meaningful */
5277 }
5278 if (CHIP_IS_E1H(bp))
5279 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5280 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5281
34f80b04
EG
5282 if (sizeof(union cdu_context) != 1024)
5283 /* we currently assume that a context is 1024 bytes */
5284 printk(KERN_ALERT PFX "please adjust the size of"
5285 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5286
34f80b04
EG
5287 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5288 val = (4 << 24) + (0 << 12) + 1024;
5289 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5290 if (CHIP_IS_E1(bp)) {
5291 /* !!! fix pxp client crdit until excel update */
5292 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5293 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5294 }
a2fbb9ea 5295
34f80b04
EG
5296 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5297 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5298
34f80b04
EG
5299 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5300 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5301
34f80b04
EG
5302 /* PXPCS COMMON comes here */
5303 /* Reset PCIE errors for debug */
5304 REG_WR(bp, 0x2814, 0xffffffff);
5305 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5306
34f80b04
EG
5307 /* EMAC0 COMMON comes here */
5308 /* EMAC1 COMMON comes here */
5309 /* DBU COMMON comes here */
5310 /* DBG COMMON comes here */
5311
5312 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5313 if (CHIP_IS_E1H(bp)) {
5314 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5315 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5316 }
5317
5318 if (CHIP_REV_IS_SLOW(bp))
5319 msleep(200);
5320
5321 /* finish CFC init */
5322 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5323 if (val != 1) {
5324 BNX2X_ERR("CFC LL_INIT failed\n");
5325 return -EBUSY;
5326 }
5327 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5328 if (val != 1) {
5329 BNX2X_ERR("CFC AC_INIT failed\n");
5330 return -EBUSY;
5331 }
5332 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5333 if (val != 1) {
5334 BNX2X_ERR("CFC CAM_INIT failed\n");
5335 return -EBUSY;
5336 }
5337 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5338
34f80b04
EG
5339 /* read NIG statistic
5340 to see if this is our first up since powerup */
5341 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5342 val = *bnx2x_sp(bp, wb_data[0]);
5343
5344 /* do internal memory self test */
5345 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5346 BNX2X_ERR("internal mem self test failed\n");
5347 return -EBUSY;
5348 }
5349
5350 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5351 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5352 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5353 /* Fan failure is indicated by SPIO 5 */
5354 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5355 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5356
5357 /* set to active low mode */
5358 val = REG_RD(bp, MISC_REG_SPIO_INT);
5359 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5360 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5361 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5362
34f80b04
EG
5363 /* enable interrupt to signal the IGU */
5364 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5365 val |= (1 << MISC_REGISTERS_SPIO_5);
5366 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5367 break;
f1410647 5368
34f80b04
EG
5369 default:
5370 break;
5371 }
f1410647 5372
34f80b04
EG
5373 /* clear PXP2 attentions */
5374 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5375
34f80b04 5376 enable_blocks_attention(bp);
a2fbb9ea 5377
6bbca910
YR
5378 if (!BP_NOMCP(bp)) {
5379 bnx2x_acquire_phy_lock(bp);
5380 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5381 bnx2x_release_phy_lock(bp);
5382 } else
5383 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5384
34f80b04
EG
5385 return 0;
5386}
a2fbb9ea 5387
34f80b04
EG
5388static int bnx2x_init_port(struct bnx2x *bp)
5389{
5390 int port = BP_PORT(bp);
5391 u32 val;
a2fbb9ea 5392
34f80b04
EG
5393 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5394
5395 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5396
5397 /* Port PXP comes here */
5398 /* Port PXP2 comes here */
a2fbb9ea
ET
5399#ifdef BCM_ISCSI
5400 /* Port0 1
5401 * Port1 385 */
5402 i++;
5403 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5404 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5405 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5406 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5407
5408 /* Port0 2
5409 * Port1 386 */
5410 i++;
5411 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5412 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5413 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5414 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5415
5416 /* Port0 3
5417 * Port1 387 */
5418 i++;
5419 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5420 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5421 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5422 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5423#endif
34f80b04 5424 /* Port CMs come here */
a2fbb9ea
ET
5425
5426 /* Port QM comes here */
a2fbb9ea
ET
5427#ifdef BCM_ISCSI
5428 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5429 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5430
5431 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5432 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5433#endif
5434 /* Port DQ comes here */
5435 /* Port BRB1 comes here */
ad8d3948 5436 /* Port PRS comes here */
a2fbb9ea
ET
5437 /* Port TSDM comes here */
5438 /* Port CSDM comes here */
5439 /* Port USDM comes here */
5440 /* Port XSDM comes here */
34f80b04
EG
5441 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5442 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5443 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5444 port ? USEM_PORT1_END : USEM_PORT0_END);
5445 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5446 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5447 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5448 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5449 /* Port UPB comes here */
34f80b04
EG
5450 /* Port XPB comes here */
5451
5452 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5453 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5454
5455 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5456 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5457
5458 /* update threshold */
34f80b04 5459 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5460 /* update init credit */
34f80b04 5461 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5462
5463 /* probe changes */
34f80b04 5464 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5465 msleep(5);
34f80b04 5466 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5467
5468#ifdef BCM_ISCSI
5469 /* tell the searcher where the T2 table is */
5470 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5471
5472 wb_write[0] = U64_LO(bp->t2_mapping);
5473 wb_write[1] = U64_HI(bp->t2_mapping);
5474 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5475 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5476 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5477 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5478
5479 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5480 /* Port SRCH comes here */
5481#endif
5482 /* Port CDU comes here */
5483 /* Port CFC comes here */
34f80b04
EG
5484
5485 if (CHIP_IS_E1(bp)) {
5486 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5487 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5488 }
5489 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5490 port ? HC_PORT1_END : HC_PORT0_END);
5491
5492 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5493 MISC_AEU_PORT0_START,
34f80b04
EG
5494 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5495 /* init aeu_mask_attn_func_0/1:
5496 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5497 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5498 * bits 4-7 are used for "per vn group attention" */
5499 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5500 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5501
a2fbb9ea
ET
5502 /* Port PXPCS comes here */
5503 /* Port EMAC0 comes here */
5504 /* Port EMAC1 comes here */
5505 /* Port DBU comes here */
5506 /* Port DBG comes here */
34f80b04
EG
5507 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5508 port ? NIG_PORT1_END : NIG_PORT0_END);
5509
5510 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5511
5512 if (CHIP_IS_E1H(bp)) {
5513 u32 wsum;
5514 struct cmng_struct_per_port m_cmng_port;
5515 int vn;
5516
5517 /* 0x2 disable e1hov, 0x1 enable */
5518 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5519 (IS_E1HMF(bp) ? 0x1 : 0x2));
5520
5521 /* Init RATE SHAPING and FAIRNESS contexts.
5522 Initialize as if there is 10G link. */
5523 wsum = bnx2x_calc_vn_wsum(bp);
5524 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5525 if (IS_E1HMF(bp))
5526 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5527 bnx2x_init_vn_minmax(bp, 2*vn + port,
5528 wsum, 10000, &m_cmng_port);
5529 }
5530
a2fbb9ea
ET
5531 /* Port MCP comes here */
5532 /* Port DMAE comes here */
5533
34f80b04 5534 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5535 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5536 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5537 /* add SPIO 5 to group 0 */
5538 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5539 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5540 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5541 break;
5542
5543 default:
5544 break;
5545 }
5546
c18487ee 5547 bnx2x__link_reset(bp);
a2fbb9ea 5548
34f80b04
EG
5549 return 0;
5550}
5551
5552#define ILT_PER_FUNC (768/2)
5553#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5554/* the phys address is shifted right 12 bits and has an added
5555 1=valid bit added to the 53rd bit
5556 then since this is a wide register(TM)
5557 we split it into two 32 bit writes
5558 */
5559#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5560#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5561#define PXP_ONE_ILT(x) (((x) << 10) | x)
5562#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5563
5564#define CNIC_ILT_LINES 0
5565
5566static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5567{
5568 int reg;
5569
5570 if (CHIP_IS_E1H(bp))
5571 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5572 else /* E1 */
5573 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5574
5575 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5576}
5577
5578static int bnx2x_init_func(struct bnx2x *bp)
5579{
5580 int port = BP_PORT(bp);
5581 int func = BP_FUNC(bp);
5582 int i;
5583
5584 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5585
5586 i = FUNC_ILT_BASE(func);
5587
5588 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5589 if (CHIP_IS_E1H(bp)) {
5590 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5591 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5592 } else /* E1 */
5593 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5594 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5595
5596
5597 if (CHIP_IS_E1H(bp)) {
5598 for (i = 0; i < 9; i++)
5599 bnx2x_init_block(bp,
5600 cm_start[func][i], cm_end[func][i]);
5601
5602 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5603 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5604 }
5605
5606 /* HC init per function */
5607 if (CHIP_IS_E1H(bp)) {
5608 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5609
5610 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5611 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5612 }
5613 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5614
5615 if (CHIP_IS_E1H(bp))
5616 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5617
c14423fe 5618 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5619 REG_WR(bp, 0x2114, 0xffffffff);
5620 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5621
34f80b04
EG
5622 return 0;
5623}
5624
5625static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5626{
5627 int i, rc = 0;
a2fbb9ea 5628
34f80b04
EG
5629 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5630 BP_FUNC(bp), load_code);
a2fbb9ea 5631
34f80b04
EG
5632 bp->dmae_ready = 0;
5633 mutex_init(&bp->dmae_mutex);
5634 bnx2x_gunzip_init(bp);
a2fbb9ea 5635
34f80b04
EG
5636 switch (load_code) {
5637 case FW_MSG_CODE_DRV_LOAD_COMMON:
5638 rc = bnx2x_init_common(bp);
5639 if (rc)
5640 goto init_hw_err;
5641 /* no break */
5642
5643 case FW_MSG_CODE_DRV_LOAD_PORT:
5644 bp->dmae_ready = 1;
5645 rc = bnx2x_init_port(bp);
5646 if (rc)
5647 goto init_hw_err;
5648 /* no break */
5649
5650 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5651 bp->dmae_ready = 1;
5652 rc = bnx2x_init_func(bp);
5653 if (rc)
5654 goto init_hw_err;
5655 break;
5656
5657 default:
5658 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5659 break;
5660 }
5661
5662 if (!BP_NOMCP(bp)) {
5663 int func = BP_FUNC(bp);
a2fbb9ea
ET
5664
5665 bp->fw_drv_pulse_wr_seq =
34f80b04 5666 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5667 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5668 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5669 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5670 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5671 } else
5672 bp->func_stx = 0;
a2fbb9ea 5673
34f80b04
EG
5674 /* this needs to be done before gunzip end */
5675 bnx2x_zero_def_sb(bp);
5676 for_each_queue(bp, i)
5677 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5678
5679init_hw_err:
5680 bnx2x_gunzip_end(bp);
5681
5682 return rc;
a2fbb9ea
ET
5683}
5684
c14423fe 5685/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5686static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5687{
34f80b04 5688 int func = BP_FUNC(bp);
f1410647
ET
5689 u32 seq = ++bp->fw_seq;
5690 u32 rc = 0;
19680c48
EG
5691 u32 cnt = 1;
5692 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5693
34f80b04 5694 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5695 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5696
19680c48
EG
5697 do {
5698 /* let the FW do it's magic ... */
5699 msleep(delay);
a2fbb9ea 5700
19680c48 5701 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5702
19680c48
EG
5703 /* Give the FW up to 2 second (200*10ms) */
5704 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5705
5706 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5707 cnt*delay, rc, seq);
a2fbb9ea
ET
5708
5709 /* is this a reply to our command? */
5710 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5711 rc &= FW_MSG_CODE_MASK;
f1410647 5712
a2fbb9ea
ET
5713 } else {
5714 /* FW BUG! */
5715 BNX2X_ERR("FW failed to respond!\n");
5716 bnx2x_fw_dump(bp);
5717 rc = 0;
5718 }
f1410647 5719
a2fbb9ea
ET
5720 return rc;
5721}
5722
5723static void bnx2x_free_mem(struct bnx2x *bp)
5724{
5725
5726#define BNX2X_PCI_FREE(x, y, size) \
5727 do { \
5728 if (x) { \
5729 pci_free_consistent(bp->pdev, size, x, y); \
5730 x = NULL; \
5731 y = 0; \
5732 } \
5733 } while (0)
5734
5735#define BNX2X_FREE(x) \
5736 do { \
5737 if (x) { \
5738 vfree(x); \
5739 x = NULL; \
5740 } \
5741 } while (0)
5742
5743 int i;
5744
5745 /* fastpath */
5746 for_each_queue(bp, i) {
5747
5748 /* Status blocks */
5749 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5750 bnx2x_fp(bp, i, status_blk_mapping),
5751 sizeof(struct host_status_block) +
5752 sizeof(struct eth_tx_db_data));
5753
5754 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5755 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5756 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5757 bnx2x_fp(bp, i, tx_desc_mapping),
5758 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5759
5760 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5761 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5762 bnx2x_fp(bp, i, rx_desc_mapping),
5763 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5764
5765 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5766 bnx2x_fp(bp, i, rx_comp_mapping),
5767 sizeof(struct eth_fast_path_rx_cqe) *
5768 NUM_RCQ_BD);
a2fbb9ea 5769
7a9b2557 5770 /* SGE ring */
32626230 5771 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5772 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5773 bnx2x_fp(bp, i, rx_sge_mapping),
5774 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5775 }
a2fbb9ea
ET
5776 /* end of fastpath */
5777
5778 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5779 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5780
5781 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5782 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5783
5784#ifdef BCM_ISCSI
5785 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5786 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5787 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5788 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5789#endif
7a9b2557 5790 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5791
5792#undef BNX2X_PCI_FREE
5793#undef BNX2X_KFREE
5794}
5795
5796static int bnx2x_alloc_mem(struct bnx2x *bp)
5797{
5798
5799#define BNX2X_PCI_ALLOC(x, y, size) \
5800 do { \
5801 x = pci_alloc_consistent(bp->pdev, size, y); \
5802 if (x == NULL) \
5803 goto alloc_mem_err; \
5804 memset(x, 0, size); \
5805 } while (0)
5806
5807#define BNX2X_ALLOC(x, size) \
5808 do { \
5809 x = vmalloc(size); \
5810 if (x == NULL) \
5811 goto alloc_mem_err; \
5812 memset(x, 0, size); \
5813 } while (0)
5814
5815 int i;
5816
5817 /* fastpath */
a2fbb9ea
ET
5818 for_each_queue(bp, i) {
5819 bnx2x_fp(bp, i, bp) = bp;
5820
5821 /* Status blocks */
5822 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5823 &bnx2x_fp(bp, i, status_blk_mapping),
5824 sizeof(struct host_status_block) +
5825 sizeof(struct eth_tx_db_data));
5826
5827 bnx2x_fp(bp, i, hw_tx_prods) =
5828 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5829
5830 bnx2x_fp(bp, i, tx_prods_mapping) =
5831 bnx2x_fp(bp, i, status_blk_mapping) +
5832 sizeof(struct host_status_block);
5833
5834 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5835 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5836 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5837 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5838 &bnx2x_fp(bp, i, tx_desc_mapping),
5839 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5840
5841 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5842 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5843 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5844 &bnx2x_fp(bp, i, rx_desc_mapping),
5845 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5846
5847 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5848 &bnx2x_fp(bp, i, rx_comp_mapping),
5849 sizeof(struct eth_fast_path_rx_cqe) *
5850 NUM_RCQ_BD);
5851
7a9b2557
VZ
5852 /* SGE ring */
5853 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5854 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5855 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5856 &bnx2x_fp(bp, i, rx_sge_mapping),
5857 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5858 }
5859 /* end of fastpath */
5860
5861 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5862 sizeof(struct host_def_status_block));
5863
5864 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5865 sizeof(struct bnx2x_slowpath));
5866
5867#ifdef BCM_ISCSI
5868 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5869
5870 /* Initialize T1 */
5871 for (i = 0; i < 64*1024; i += 64) {
5872 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5873 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5874 }
5875
5876 /* allocate searcher T2 table
5877 we allocate 1/4 of alloc num for T2
5878 (which is not entered into the ILT) */
5879 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5880
5881 /* Initialize T2 */
5882 for (i = 0; i < 16*1024; i += 64)
5883 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5884
c14423fe 5885 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5886 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5887
5888 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5889 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5890
5891 /* QM queues (128*MAX_CONN) */
5892 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5893#endif
5894
5895 /* Slow path ring */
5896 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5897
5898 return 0;
5899
5900alloc_mem_err:
5901 bnx2x_free_mem(bp);
5902 return -ENOMEM;
5903
5904#undef BNX2X_PCI_ALLOC
5905#undef BNX2X_ALLOC
5906}
5907
5908static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5909{
5910 int i;
5911
5912 for_each_queue(bp, i) {
5913 struct bnx2x_fastpath *fp = &bp->fp[i];
5914
5915 u16 bd_cons = fp->tx_bd_cons;
5916 u16 sw_prod = fp->tx_pkt_prod;
5917 u16 sw_cons = fp->tx_pkt_cons;
5918
a2fbb9ea
ET
5919 while (sw_cons != sw_prod) {
5920 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5921 sw_cons++;
5922 }
5923 }
5924}
5925
5926static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5927{
5928 int i, j;
5929
5930 for_each_queue(bp, j) {
5931 struct bnx2x_fastpath *fp = &bp->fp[j];
5932
a2fbb9ea
ET
5933 for (i = 0; i < NUM_RX_BD; i++) {
5934 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5935 struct sk_buff *skb = rx_buf->skb;
5936
5937 if (skb == NULL)
5938 continue;
5939
5940 pci_unmap_single(bp->pdev,
5941 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5942 bp->rx_buf_size,
a2fbb9ea
ET
5943 PCI_DMA_FROMDEVICE);
5944
5945 rx_buf->skb = NULL;
5946 dev_kfree_skb(skb);
5947 }
7a9b2557 5948 if (!fp->disable_tpa)
32626230
EG
5949 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5950 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5951 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5952 }
5953}
5954
5955static void bnx2x_free_skbs(struct bnx2x *bp)
5956{
5957 bnx2x_free_tx_skbs(bp);
5958 bnx2x_free_rx_skbs(bp);
5959}
5960
5961static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5962{
34f80b04 5963 int i, offset = 1;
a2fbb9ea
ET
5964
5965 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5966 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5967 bp->msix_table[0].vector);
5968
5969 for_each_queue(bp, i) {
c14423fe 5970 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5971 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5972 bnx2x_fp(bp, i, state));
5973
228241eb
ET
5974 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5975 BNX2X_ERR("IRQ of fp #%d being freed while "
5976 "state != closed\n", i);
a2fbb9ea 5977
34f80b04 5978 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5979 }
a2fbb9ea
ET
5980}
5981
5982static void bnx2x_free_irq(struct bnx2x *bp)
5983{
a2fbb9ea 5984 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5985 bnx2x_free_msix_irqs(bp);
5986 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5987 bp->flags &= ~USING_MSIX_FLAG;
5988
5989 } else
5990 free_irq(bp->pdev->irq, bp->dev);
5991}
5992
5993static int bnx2x_enable_msix(struct bnx2x *bp)
5994{
34f80b04 5995 int i, rc, offset;
a2fbb9ea
ET
5996
5997 bp->msix_table[0].entry = 0;
34f80b04
EG
5998 offset = 1;
5999 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6000
34f80b04
EG
6001 for_each_queue(bp, i) {
6002 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6003
34f80b04
EG
6004 bp->msix_table[i + offset].entry = igu_vec;
6005 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6006 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6007 }
6008
34f80b04
EG
6009 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6010 bp->num_queues + offset);
6011 if (rc) {
6012 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6013 return -1;
6014 }
a2fbb9ea
ET
6015 bp->flags |= USING_MSIX_FLAG;
6016
6017 return 0;
a2fbb9ea
ET
6018}
6019
a2fbb9ea
ET
6020static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6021{
34f80b04 6022 int i, rc, offset = 1;
a2fbb9ea 6023
a2fbb9ea
ET
6024 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6025 bp->dev->name, bp->dev);
a2fbb9ea
ET
6026 if (rc) {
6027 BNX2X_ERR("request sp irq failed\n");
6028 return -EBUSY;
6029 }
6030
6031 for_each_queue(bp, i) {
34f80b04 6032 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6033 bnx2x_msix_fp_int, 0,
6034 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6035 if (rc) {
3196a88a
EG
6036 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6037 i + offset, -rc);
a2fbb9ea
ET
6038 bnx2x_free_msix_irqs(bp);
6039 return -EBUSY;
6040 }
6041
6042 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6043 }
6044
6045 return 0;
a2fbb9ea
ET
6046}
6047
6048static int bnx2x_req_irq(struct bnx2x *bp)
6049{
34f80b04 6050 int rc;
a2fbb9ea 6051
34f80b04
EG
6052 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6053 bp->dev->name, bp->dev);
a2fbb9ea
ET
6054 if (!rc)
6055 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6056
6057 return rc;
a2fbb9ea
ET
6058}
6059
65abd74d
YG
6060static void bnx2x_napi_enable(struct bnx2x *bp)
6061{
6062 int i;
6063
6064 for_each_queue(bp, i)
6065 napi_enable(&bnx2x_fp(bp, i, napi));
6066}
6067
6068static void bnx2x_napi_disable(struct bnx2x *bp)
6069{
6070 int i;
6071
6072 for_each_queue(bp, i)
6073 napi_disable(&bnx2x_fp(bp, i, napi));
6074}
6075
6076static void bnx2x_netif_start(struct bnx2x *bp)
6077{
6078 if (atomic_dec_and_test(&bp->intr_sem)) {
6079 if (netif_running(bp->dev)) {
6080 if (bp->state == BNX2X_STATE_OPEN)
6081 netif_wake_queue(bp->dev);
6082 bnx2x_napi_enable(bp);
6083 bnx2x_int_enable(bp);
6084 }
6085 }
6086}
6087
f8ef6e44 6088static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6089{
f8ef6e44 6090 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6091 if (netif_running(bp->dev)) {
6092 bnx2x_napi_disable(bp);
6093 netif_tx_disable(bp->dev);
6094 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6095 }
6096}
6097
a2fbb9ea
ET
6098/*
6099 * Init service functions
6100 */
6101
3101c2bc 6102static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6103{
6104 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6105 int port = BP_PORT(bp);
a2fbb9ea
ET
6106
6107 /* CAM allocation
6108 * unicasts 0-31:port0 32-63:port1
6109 * multicast 64-127:port0 128-191:port1
6110 */
6111 config->hdr.length_6b = 2;
34f80b04
EG
6112 config->hdr.offset = port ? 31 : 0;
6113 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6114 config->hdr.reserved1 = 0;
6115
6116 /* primary MAC */
6117 config->config_table[0].cam_entry.msb_mac_addr =
6118 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6119 config->config_table[0].cam_entry.middle_mac_addr =
6120 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6121 config->config_table[0].cam_entry.lsb_mac_addr =
6122 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6123 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6124 if (set)
6125 config->config_table[0].target_table_entry.flags = 0;
6126 else
6127 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6128 config->config_table[0].target_table_entry.client_id = 0;
6129 config->config_table[0].target_table_entry.vlan_id = 0;
6130
3101c2bc
YG
6131 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6132 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6133 config->config_table[0].cam_entry.msb_mac_addr,
6134 config->config_table[0].cam_entry.middle_mac_addr,
6135 config->config_table[0].cam_entry.lsb_mac_addr);
6136
6137 /* broadcast */
6138 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6139 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6140 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6141 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6142 if (set)
6143 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6144 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6145 else
6146 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6147 config->config_table[1].target_table_entry.client_id = 0;
6148 config->config_table[1].target_table_entry.vlan_id = 0;
6149
6150 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6151 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6152 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6153}
6154
3101c2bc 6155static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6156{
6157 struct mac_configuration_cmd_e1h *config =
6158 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6159
3101c2bc 6160 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6161 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6162 return;
6163 }
6164
6165 /* CAM allocation for E1H
6166 * unicasts: by func number
6167 * multicast: 20+FUNC*20, 20 each
6168 */
6169 config->hdr.length_6b = 1;
6170 config->hdr.offset = BP_FUNC(bp);
6171 config->hdr.client_id = BP_CL_ID(bp);
6172 config->hdr.reserved1 = 0;
6173
6174 /* primary MAC */
6175 config->config_table[0].msb_mac_addr =
6176 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6177 config->config_table[0].middle_mac_addr =
6178 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6179 config->config_table[0].lsb_mac_addr =
6180 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6181 config->config_table[0].client_id = BP_L_ID(bp);
6182 config->config_table[0].vlan_id = 0;
6183 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6184 if (set)
6185 config->config_table[0].flags = BP_PORT(bp);
6186 else
6187 config->config_table[0].flags =
6188 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6189
3101c2bc
YG
6190 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6191 (set ? "setting" : "clearing"),
34f80b04
EG
6192 config->config_table[0].msb_mac_addr,
6193 config->config_table[0].middle_mac_addr,
6194 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6195
6196 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6197 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6198 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6199}
6200
a2fbb9ea
ET
6201static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6202 int *state_p, int poll)
6203{
6204 /* can take a while if any port is running */
34f80b04 6205 int cnt = 500;
a2fbb9ea 6206
c14423fe
ET
6207 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6208 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6209
6210 might_sleep();
34f80b04 6211 while (cnt--) {
a2fbb9ea
ET
6212 if (poll) {
6213 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6214 /* if index is different from 0
6215 * the reply for some commands will
3101c2bc 6216 * be on the non default queue
a2fbb9ea
ET
6217 */
6218 if (idx)
6219 bnx2x_rx_int(&bp->fp[idx], 10);
6220 }
a2fbb9ea 6221
3101c2bc 6222 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6223 if (*state_p == state)
a2fbb9ea
ET
6224 return 0;
6225
a2fbb9ea 6226 msleep(1);
a2fbb9ea
ET
6227 }
6228
a2fbb9ea 6229 /* timeout! */
49d66772
ET
6230 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6231 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6232#ifdef BNX2X_STOP_ON_ERROR
6233 bnx2x_panic();
6234#endif
a2fbb9ea 6235
49d66772 6236 return -EBUSY;
a2fbb9ea
ET
6237}
6238
6239static int bnx2x_setup_leading(struct bnx2x *bp)
6240{
34f80b04 6241 int rc;
a2fbb9ea 6242
c14423fe 6243 /* reset IGU state */
34f80b04 6244 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6245
6246 /* SETUP ramrod */
6247 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6248
34f80b04
EG
6249 /* Wait for completion */
6250 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6251
34f80b04 6252 return rc;
a2fbb9ea
ET
6253}
6254
6255static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6256{
a2fbb9ea 6257 /* reset IGU state */
34f80b04 6258 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6259
228241eb 6260 /* SETUP ramrod */
a2fbb9ea
ET
6261 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6262 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6263
6264 /* Wait for completion */
6265 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6266 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6267}
6268
a2fbb9ea
ET
6269static int bnx2x_poll(struct napi_struct *napi, int budget);
6270static void bnx2x_set_rx_mode(struct net_device *dev);
6271
34f80b04
EG
6272/* must be called with rtnl_lock */
6273static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6274{
228241eb 6275 u32 load_code;
34f80b04 6276 int i, rc;
34f80b04
EG
6277#ifdef BNX2X_STOP_ON_ERROR
6278 if (unlikely(bp->panic))
6279 return -EPERM;
6280#endif
a2fbb9ea
ET
6281
6282 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6283
34f80b04
EG
6284 /* Send LOAD_REQUEST command to MCP
6285 Returns the type of LOAD command:
6286 if it is the first port to be initialized
6287 common blocks should be initialized, otherwise - not
a2fbb9ea 6288 */
34f80b04 6289 if (!BP_NOMCP(bp)) {
228241eb
ET
6290 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6291 if (!load_code) {
da5a662a 6292 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6293 return -EBUSY;
6294 }
34f80b04 6295 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6296 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6297
a2fbb9ea 6298 } else {
da5a662a
VZ
6299 int port = BP_PORT(bp);
6300
34f80b04
EG
6301 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6302 load_count[0], load_count[1], load_count[2]);
6303 load_count[0]++;
da5a662a 6304 load_count[1 + port]++;
34f80b04
EG
6305 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6306 load_count[0], load_count[1], load_count[2]);
6307 if (load_count[0] == 1)
6308 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6309 else if (load_count[1 + port] == 1)
34f80b04
EG
6310 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6311 else
6312 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6313 }
6314
34f80b04
EG
6315 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6316 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6317 bp->port.pmf = 1;
6318 else
6319 bp->port.pmf = 0;
6320 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6321
6322 /* if we can't use MSI-X we only need one fp,
6323 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6324 * and fallback to inta with one fp
6325 */
34f80b04
EG
6326 if (use_inta) {
6327 bp->num_queues = 1;
6328
6329 } else {
6330 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6331 /* user requested number */
6332 bp->num_queues = use_multi;
6333
6334 else if (use_multi)
6335 bp->num_queues = min_t(u32, num_online_cpus(),
6336 BP_MAX_QUEUES(bp));
6337 else
a2fbb9ea 6338 bp->num_queues = 1;
34f80b04
EG
6339
6340 if (bnx2x_enable_msix(bp)) {
6341 /* failed to enable MSI-X */
6342 bp->num_queues = 1;
6343 if (use_multi)
6344 BNX2X_ERR("Multi requested but failed"
6345 " to enable MSI-X\n");
a2fbb9ea
ET
6346 }
6347 }
34f80b04
EG
6348 DP(NETIF_MSG_IFUP,
6349 "set number of queues to %d\n", bp->num_queues);
c14423fe 6350
a2fbb9ea
ET
6351 if (bnx2x_alloc_mem(bp))
6352 return -ENOMEM;
6353
7a9b2557
VZ
6354 for_each_queue(bp, i)
6355 bnx2x_fp(bp, i, disable_tpa) =
6356 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6357
34f80b04
EG
6358 if (bp->flags & USING_MSIX_FLAG) {
6359 rc = bnx2x_req_msix_irqs(bp);
6360 if (rc) {
6361 pci_disable_msix(bp->pdev);
6362 goto load_error;
6363 }
6364 } else {
6365 bnx2x_ack_int(bp);
6366 rc = bnx2x_req_irq(bp);
6367 if (rc) {
6368 BNX2X_ERR("IRQ request failed, aborting\n");
6369 goto load_error;
a2fbb9ea
ET
6370 }
6371 }
6372
6373 for_each_queue(bp, i)
6374 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6375 bnx2x_poll, 128);
6376
a2fbb9ea 6377 /* Initialize HW */
34f80b04
EG
6378 rc = bnx2x_init_hw(bp, load_code);
6379 if (rc) {
a2fbb9ea 6380 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6381 goto load_int_disable;
a2fbb9ea
ET
6382 }
6383
a2fbb9ea 6384 /* Setup NIC internals and enable interrupts */
471de716 6385 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6386
6387 /* Send LOAD_DONE command to MCP */
34f80b04 6388 if (!BP_NOMCP(bp)) {
228241eb
ET
6389 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6390 if (!load_code) {
da5a662a 6391 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6392 rc = -EBUSY;
d1014634 6393 goto load_rings_free;
a2fbb9ea
ET
6394 }
6395 }
6396
bb2a0f7a
YG
6397 bnx2x_stats_init(bp);
6398
a2fbb9ea
ET
6399 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6400
6401 /* Enable Rx interrupt handling before sending the ramrod
6402 as it's completed on Rx FP queue */
65abd74d 6403 bnx2x_napi_enable(bp);
a2fbb9ea 6404
da5a662a
VZ
6405 /* Enable interrupt handling */
6406 atomic_set(&bp->intr_sem, 0);
6407
34f80b04
EG
6408 rc = bnx2x_setup_leading(bp);
6409 if (rc) {
da5a662a 6410 BNX2X_ERR("Setup leading failed!\n");
d1014634 6411 goto load_netif_stop;
34f80b04 6412 }
a2fbb9ea 6413
34f80b04
EG
6414 if (CHIP_IS_E1H(bp))
6415 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6416 BNX2X_ERR("!!! mf_cfg function disabled\n");
6417 bp->state = BNX2X_STATE_DISABLED;
6418 }
a2fbb9ea 6419
34f80b04
EG
6420 if (bp->state == BNX2X_STATE_OPEN)
6421 for_each_nondefault_queue(bp, i) {
6422 rc = bnx2x_setup_multi(bp, i);
6423 if (rc)
d1014634 6424 goto load_netif_stop;
34f80b04 6425 }
a2fbb9ea 6426
34f80b04 6427 if (CHIP_IS_E1(bp))
3101c2bc 6428 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6429 else
3101c2bc 6430 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6431
6432 if (bp->port.pmf)
6433 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6434
6435 /* Start fast path */
34f80b04
EG
6436 switch (load_mode) {
6437 case LOAD_NORMAL:
6438 /* Tx queue should be only reenabled */
6439 netif_wake_queue(bp->dev);
6440 bnx2x_set_rx_mode(bp->dev);
6441 break;
6442
6443 case LOAD_OPEN:
a2fbb9ea 6444 netif_start_queue(bp->dev);
34f80b04 6445 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6446 if (bp->flags & USING_MSIX_FLAG)
6447 printk(KERN_INFO PFX "%s: using MSI-X\n",
6448 bp->dev->name);
34f80b04 6449 break;
a2fbb9ea 6450
34f80b04 6451 case LOAD_DIAG:
a2fbb9ea 6452 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6453 bp->state = BNX2X_STATE_DIAG;
6454 break;
6455
6456 default:
6457 break;
a2fbb9ea
ET
6458 }
6459
34f80b04
EG
6460 if (!bp->port.pmf)
6461 bnx2x__link_status_update(bp);
6462
a2fbb9ea
ET
6463 /* start the timer */
6464 mod_timer(&bp->timer, jiffies + bp->current_interval);
6465
34f80b04 6466
a2fbb9ea
ET
6467 return 0;
6468
d1014634 6469load_netif_stop:
65abd74d 6470 bnx2x_napi_disable(bp);
d1014634 6471load_rings_free:
7a9b2557
VZ
6472 /* Free SKBs, SGEs, TPA pool and driver internals */
6473 bnx2x_free_skbs(bp);
6474 for_each_queue(bp, i)
3196a88a 6475 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6476load_int_disable:
f8ef6e44 6477 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6478 /* Release IRQs */
6479 bnx2x_free_irq(bp);
228241eb 6480load_error:
a2fbb9ea
ET
6481 bnx2x_free_mem(bp);
6482
6483 /* TBD we really need to reset the chip
6484 if we want to recover from this */
34f80b04 6485 return rc;
a2fbb9ea
ET
6486}
6487
6488static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6489{
a2fbb9ea
ET
6490 int rc;
6491
c14423fe 6492 /* halt the connection */
a2fbb9ea 6493 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6494 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6495
34f80b04 6496 /* Wait for completion */
a2fbb9ea 6497 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6498 &(bp->fp[index].state), 1);
c14423fe 6499 if (rc) /* timeout */
a2fbb9ea
ET
6500 return rc;
6501
6502 /* delete cfc entry */
6503 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6504
34f80b04
EG
6505 /* Wait for completion */
6506 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6507 &(bp->fp[index].state), 1);
6508 return rc;
a2fbb9ea
ET
6509}
6510
da5a662a 6511static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6512{
49d66772 6513 u16 dsb_sp_prod_idx;
c14423fe 6514 /* if the other port is handling traffic,
a2fbb9ea 6515 this can take a lot of time */
34f80b04
EG
6516 int cnt = 500;
6517 int rc;
a2fbb9ea
ET
6518
6519 might_sleep();
6520
6521 /* Send HALT ramrod */
6522 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6523 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6524
34f80b04
EG
6525 /* Wait for completion */
6526 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6527 &(bp->fp[0].state), 1);
6528 if (rc) /* timeout */
da5a662a 6529 return rc;
a2fbb9ea 6530
49d66772 6531 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6532
228241eb 6533 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6534 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6535
49d66772 6536 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6537 we are going to reset the chip anyway
6538 so there is not much to do if this times out
6539 */
34f80b04 6540 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6541 if (!cnt) {
6542 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6543 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6544 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6545#ifdef BNX2X_STOP_ON_ERROR
6546 bnx2x_panic();
da5a662a
VZ
6547#else
6548 rc = -EBUSY;
34f80b04
EG
6549#endif
6550 break;
6551 }
6552 cnt--;
da5a662a 6553 msleep(1);
49d66772
ET
6554 }
6555 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6556 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6557
6558 return rc;
a2fbb9ea
ET
6559}
6560
34f80b04
EG
6561static void bnx2x_reset_func(struct bnx2x *bp)
6562{
6563 int port = BP_PORT(bp);
6564 int func = BP_FUNC(bp);
6565 int base, i;
6566
6567 /* Configure IGU */
6568 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6569 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6570
6571 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6572
6573 /* Clear ILT */
6574 base = FUNC_ILT_BASE(func);
6575 for (i = base; i < base + ILT_PER_FUNC; i++)
6576 bnx2x_ilt_wr(bp, i, 0);
6577}
6578
6579static void bnx2x_reset_port(struct bnx2x *bp)
6580{
6581 int port = BP_PORT(bp);
6582 u32 val;
6583
6584 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6585
6586 /* Do not rcv packets to BRB */
6587 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6588 /* Do not direct rcv packets that are not for MCP to the BRB */
6589 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6590 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6591
6592 /* Configure AEU */
6593 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6594
6595 msleep(100);
6596 /* Check for BRB port occupancy */
6597 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6598 if (val)
6599 DP(NETIF_MSG_IFDOWN,
33471629 6600 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6601
6602 /* TODO: Close Doorbell port? */
6603}
6604
6605static void bnx2x_reset_common(struct bnx2x *bp)
6606{
6607 /* reset_common */
6608 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6609 0xd3ffff7f);
6610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6611}
6612
6613static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6614{
6615 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6616 BP_FUNC(bp), reset_code);
6617
6618 switch (reset_code) {
6619 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6620 bnx2x_reset_port(bp);
6621 bnx2x_reset_func(bp);
6622 bnx2x_reset_common(bp);
6623 break;
6624
6625 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6626 bnx2x_reset_port(bp);
6627 bnx2x_reset_func(bp);
6628 break;
6629
6630 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6631 bnx2x_reset_func(bp);
6632 break;
49d66772 6633
34f80b04
EG
6634 default:
6635 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6636 break;
6637 }
6638}
6639
33471629 6640/* must be called with rtnl_lock */
34f80b04 6641static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6642{
da5a662a 6643 int port = BP_PORT(bp);
a2fbb9ea 6644 u32 reset_code = 0;
da5a662a 6645 int i, cnt, rc;
a2fbb9ea
ET
6646
6647 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6648
228241eb
ET
6649 bp->rx_mode = BNX2X_RX_MODE_NONE;
6650 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6651
f8ef6e44 6652 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6653 if (!netif_running(bp->dev))
6654 bnx2x_napi_disable(bp);
34f80b04
EG
6655 del_timer_sync(&bp->timer);
6656 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6657 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6658 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6659
da5a662a 6660 /* Wait until tx fast path tasks complete */
228241eb
ET
6661 for_each_queue(bp, i) {
6662 struct bnx2x_fastpath *fp = &bp->fp[i];
6663
34f80b04
EG
6664 cnt = 1000;
6665 smp_rmb();
da5a662a
VZ
6666 while (BNX2X_HAS_TX_WORK(fp)) {
6667
65abd74d 6668 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6669 if (!cnt) {
6670 BNX2X_ERR("timeout waiting for queue[%d]\n",
6671 i);
6672#ifdef BNX2X_STOP_ON_ERROR
6673 bnx2x_panic();
6674 return -EBUSY;
6675#else
6676 break;
6677#endif
6678 }
6679 cnt--;
da5a662a 6680 msleep(1);
34f80b04
EG
6681 smp_rmb();
6682 }
228241eb 6683 }
da5a662a
VZ
6684 /* Give HW time to discard old tx messages */
6685 msleep(1);
a2fbb9ea 6686
34f80b04
EG
6687 /* Release IRQs */
6688 bnx2x_free_irq(bp);
6689
3101c2bc
YG
6690 if (CHIP_IS_E1(bp)) {
6691 struct mac_configuration_cmd *config =
6692 bnx2x_sp(bp, mcast_config);
6693
6694 bnx2x_set_mac_addr_e1(bp, 0);
6695
6696 for (i = 0; i < config->hdr.length_6b; i++)
6697 CAM_INVALIDATE(config->config_table[i]);
6698
6699 config->hdr.length_6b = i;
6700 if (CHIP_REV_IS_SLOW(bp))
6701 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6702 else
6703 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6704 config->hdr.client_id = BP_CL_ID(bp);
6705 config->hdr.reserved1 = 0;
6706
6707 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6708 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6709 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6710
6711 } else { /* E1H */
65abd74d
YG
6712 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6713
3101c2bc
YG
6714 bnx2x_set_mac_addr_e1h(bp, 0);
6715
6716 for (i = 0; i < MC_HASH_SIZE; i++)
6717 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6718 }
6719
65abd74d
YG
6720 if (unload_mode == UNLOAD_NORMAL)
6721 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6722
6723 else if (bp->flags & NO_WOL_FLAG) {
6724 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6725 if (CHIP_IS_E1H(bp))
6726 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6727
6728 } else if (bp->wol) {
6729 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6730 u8 *mac_addr = bp->dev->dev_addr;
6731 u32 val;
6732 /* The mac address is written to entries 1-4 to
6733 preserve entry 0 which is used by the PMF */
6734 u8 entry = (BP_E1HVN(bp) + 1)*8;
6735
6736 val = (mac_addr[0] << 8) | mac_addr[1];
6737 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6738
6739 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6740 (mac_addr[4] << 8) | mac_addr[5];
6741 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6742
6743 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6744
6745 } else
6746 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6747
34f80b04
EG
6748 /* Close multi and leading connections
6749 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6750 for_each_nondefault_queue(bp, i)
6751 if (bnx2x_stop_multi(bp, i))
228241eb 6752 goto unload_error;
a2fbb9ea 6753
da5a662a
VZ
6754 rc = bnx2x_stop_leading(bp);
6755 if (rc) {
34f80b04 6756 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6757#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6758 return -EBUSY;
da5a662a
VZ
6759#else
6760 goto unload_error;
34f80b04 6761#endif
228241eb
ET
6762 }
6763
6764unload_error:
34f80b04 6765 if (!BP_NOMCP(bp))
228241eb 6766 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6767 else {
6768 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6769 load_count[0], load_count[1], load_count[2]);
6770 load_count[0]--;
da5a662a 6771 load_count[1 + port]--;
34f80b04
EG
6772 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6773 load_count[0], load_count[1], load_count[2]);
6774 if (load_count[0] == 0)
6775 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6776 else if (load_count[1 + port] == 0)
34f80b04
EG
6777 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6778 else
6779 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6780 }
a2fbb9ea 6781
34f80b04
EG
6782 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6783 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6784 bnx2x__link_reset(bp);
a2fbb9ea
ET
6785
6786 /* Reset the chip */
228241eb 6787 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6788
6789 /* Report UNLOAD_DONE to MCP */
34f80b04 6790 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6791 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6792
7a9b2557 6793 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6794 bnx2x_free_skbs(bp);
7a9b2557 6795 for_each_queue(bp, i)
3196a88a 6796 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6797 bnx2x_free_mem(bp);
6798
6799 bp->state = BNX2X_STATE_CLOSED;
228241eb 6800
a2fbb9ea
ET
6801 netif_carrier_off(bp->dev);
6802
6803 return 0;
6804}
6805
34f80b04
EG
6806static void bnx2x_reset_task(struct work_struct *work)
6807{
6808 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6809
6810#ifdef BNX2X_STOP_ON_ERROR
6811 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6812 " so reset not done to allow debug dump,\n"
6813 KERN_ERR " you will need to reboot when done\n");
6814 return;
6815#endif
6816
6817 rtnl_lock();
6818
6819 if (!netif_running(bp->dev))
6820 goto reset_task_exit;
6821
6822 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6823 bnx2x_nic_load(bp, LOAD_NORMAL);
6824
6825reset_task_exit:
6826 rtnl_unlock();
6827}
6828
a2fbb9ea
ET
6829/* end of nic load/unload */
6830
6831/* ethtool_ops */
6832
6833/*
6834 * Init service functions
6835 */
6836
34f80b04
EG
6837static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6838{
6839 u32 val;
6840
6841 /* Check if there is any driver already loaded */
6842 val = REG_RD(bp, MISC_REG_UNPREPARED);
6843 if (val == 0x1) {
6844 /* Check if it is the UNDI driver
6845 * UNDI driver initializes CID offset for normal bell to 0x7
6846 */
4a37fb66 6847 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6848 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6849 if (val == 0x7)
6850 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6852
34f80b04
EG
6853 if (val == 0x7) {
6854 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6855 /* save our func */
34f80b04 6856 int func = BP_FUNC(bp);
da5a662a
VZ
6857 u32 swap_en;
6858 u32 swap_val;
34f80b04
EG
6859
6860 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6861
6862 /* try unload UNDI on port 0 */
6863 bp->func = 0;
da5a662a
VZ
6864 bp->fw_seq =
6865 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6866 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6867 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6868
6869 /* if UNDI is loaded on the other port */
6870 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6871
da5a662a
VZ
6872 /* send "DONE" for previous unload */
6873 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6874
6875 /* unload UNDI on port 1 */
34f80b04 6876 bp->func = 1;
da5a662a
VZ
6877 bp->fw_seq =
6878 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6879 DRV_MSG_SEQ_NUMBER_MASK);
6880 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6881
6882 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6883 }
6884
da5a662a
VZ
6885 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6886 HC_REG_CONFIG_0), 0x1000);
6887
6888 /* close input traffic and wait for it */
6889 /* Do not rcv packets to BRB */
6890 REG_WR(bp,
6891 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6892 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6893 /* Do not direct rcv packets that are not for MCP to
6894 * the BRB */
6895 REG_WR(bp,
6896 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6897 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6898 /* clear AEU */
6899 REG_WR(bp,
6900 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6901 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6902 msleep(10);
6903
6904 /* save NIG port swap info */
6905 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6906 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6907 /* reset device */
6908 REG_WR(bp,
6909 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6910 0xd3ffffff);
34f80b04
EG
6911 REG_WR(bp,
6912 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6913 0x1403);
da5a662a
VZ
6914 /* take the NIG out of reset and restore swap values */
6915 REG_WR(bp,
6916 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6917 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6918 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6919 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6920
6921 /* send unload done to the MCP */
6922 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6923
6924 /* restore our func and fw_seq */
6925 bp->func = func;
6926 bp->fw_seq =
6927 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6928 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6929 }
6930 }
6931}
6932
6933static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6934{
6935 u32 val, val2, val3, val4, id;
72ce58c3 6936 u16 pmc;
34f80b04
EG
6937
6938 /* Get the chip revision id and number. */
6939 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6940 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6941 id = ((val & 0xffff) << 16);
6942 val = REG_RD(bp, MISC_REG_CHIP_REV);
6943 id |= ((val & 0xf) << 12);
6944 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6945 id |= ((val & 0xff) << 4);
6946 REG_RD(bp, MISC_REG_BOND_ID);
6947 id |= (val & 0xf);
6948 bp->common.chip_id = id;
6949 bp->link_params.chip_id = bp->common.chip_id;
6950 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6951
6952 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6953 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6954 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6955 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6956 bp->common.flash_size, bp->common.flash_size);
6957
6958 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6959 bp->link_params.shmem_base = bp->common.shmem_base;
6960 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6961
6962 if (!bp->common.shmem_base ||
6963 (bp->common.shmem_base < 0xA0000) ||
6964 (bp->common.shmem_base >= 0xC0000)) {
6965 BNX2X_DEV_INFO("MCP not active\n");
6966 bp->flags |= NO_MCP_FLAG;
6967 return;
6968 }
6969
6970 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6971 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6972 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6973 BNX2X_ERR("BAD MCP validity signature\n");
6974
6975 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6976 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6977
6978 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6979 bp->common.hw_config, bp->common.board);
6980
6981 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6982 SHARED_HW_CFG_LED_MODE_MASK) >>
6983 SHARED_HW_CFG_LED_MODE_SHIFT);
6984
6985 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6986 bp->common.bc_ver = val;
6987 BNX2X_DEV_INFO("bc_ver %X\n", val);
6988 if (val < BNX2X_BC_VER) {
6989 /* for now only warn
6990 * later we might need to enforce this */
6991 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6992 " please upgrade BC\n", BNX2X_BC_VER, val);
6993 }
72ce58c3
EG
6994
6995 if (BP_E1HVN(bp) == 0) {
6996 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6997 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6998 } else {
6999 /* no WOL capability for E1HVN != 0 */
7000 bp->flags |= NO_WOL_FLAG;
7001 }
7002 BNX2X_DEV_INFO("%sWoL capable\n",
7003 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7004
7005 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7006 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7007 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7008 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7009
7010 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7011 val, val2, val3, val4);
7012}
7013
7014static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7015 u32 switch_cfg)
a2fbb9ea 7016{
34f80b04 7017 int port = BP_PORT(bp);
a2fbb9ea
ET
7018 u32 ext_phy_type;
7019
a2fbb9ea
ET
7020 switch (switch_cfg) {
7021 case SWITCH_CFG_1G:
7022 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7023
c18487ee
YR
7024 ext_phy_type =
7025 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7026 switch (ext_phy_type) {
7027 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7028 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7029 ext_phy_type);
7030
34f80b04
EG
7031 bp->port.supported |= (SUPPORTED_10baseT_Half |
7032 SUPPORTED_10baseT_Full |
7033 SUPPORTED_100baseT_Half |
7034 SUPPORTED_100baseT_Full |
7035 SUPPORTED_1000baseT_Full |
7036 SUPPORTED_2500baseX_Full |
7037 SUPPORTED_TP |
7038 SUPPORTED_FIBRE |
7039 SUPPORTED_Autoneg |
7040 SUPPORTED_Pause |
7041 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7042 break;
7043
7044 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7045 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7046 ext_phy_type);
7047
34f80b04
EG
7048 bp->port.supported |= (SUPPORTED_10baseT_Half |
7049 SUPPORTED_10baseT_Full |
7050 SUPPORTED_100baseT_Half |
7051 SUPPORTED_100baseT_Full |
7052 SUPPORTED_1000baseT_Full |
7053 SUPPORTED_TP |
7054 SUPPORTED_FIBRE |
7055 SUPPORTED_Autoneg |
7056 SUPPORTED_Pause |
7057 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7058 break;
7059
7060 default:
7061 BNX2X_ERR("NVRAM config error. "
7062 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7063 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7064 return;
7065 }
7066
34f80b04
EG
7067 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7068 port*0x10);
7069 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7070 break;
7071
7072 case SWITCH_CFG_10G:
7073 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7074
c18487ee
YR
7075 ext_phy_type =
7076 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7077 switch (ext_phy_type) {
7078 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7079 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7080 ext_phy_type);
7081
34f80b04
EG
7082 bp->port.supported |= (SUPPORTED_10baseT_Half |
7083 SUPPORTED_10baseT_Full |
7084 SUPPORTED_100baseT_Half |
7085 SUPPORTED_100baseT_Full |
7086 SUPPORTED_1000baseT_Full |
7087 SUPPORTED_2500baseX_Full |
7088 SUPPORTED_10000baseT_Full |
7089 SUPPORTED_TP |
7090 SUPPORTED_FIBRE |
7091 SUPPORTED_Autoneg |
7092 SUPPORTED_Pause |
7093 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7094 break;
7095
7096 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7097 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7098 ext_phy_type);
f1410647 7099
34f80b04
EG
7100 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7101 SUPPORTED_FIBRE |
7102 SUPPORTED_Pause |
7103 SUPPORTED_Asym_Pause);
f1410647
ET
7104 break;
7105
a2fbb9ea 7106 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7107 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7108 ext_phy_type);
7109
34f80b04
EG
7110 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7111 SUPPORTED_1000baseT_Full |
7112 SUPPORTED_FIBRE |
7113 SUPPORTED_Pause |
7114 SUPPORTED_Asym_Pause);
f1410647
ET
7115 break;
7116
7117 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7118 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7119 ext_phy_type);
7120
34f80b04
EG
7121 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7122 SUPPORTED_1000baseT_Full |
7123 SUPPORTED_FIBRE |
7124 SUPPORTED_Autoneg |
7125 SUPPORTED_Pause |
7126 SUPPORTED_Asym_Pause);
f1410647
ET
7127 break;
7128
c18487ee
YR
7129 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7130 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7131 ext_phy_type);
7132
34f80b04
EG
7133 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7134 SUPPORTED_2500baseX_Full |
7135 SUPPORTED_1000baseT_Full |
7136 SUPPORTED_FIBRE |
7137 SUPPORTED_Autoneg |
7138 SUPPORTED_Pause |
7139 SUPPORTED_Asym_Pause);
c18487ee
YR
7140 break;
7141
f1410647
ET
7142 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7143 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7144 ext_phy_type);
7145
34f80b04
EG
7146 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7147 SUPPORTED_TP |
7148 SUPPORTED_Autoneg |
7149 SUPPORTED_Pause |
7150 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7151 break;
7152
c18487ee
YR
7153 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7154 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7155 bp->link_params.ext_phy_config);
7156 break;
7157
a2fbb9ea
ET
7158 default:
7159 BNX2X_ERR("NVRAM config error. "
7160 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7161 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7162 return;
7163 }
7164
34f80b04
EG
7165 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7166 port*0x18);
7167 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7168
a2fbb9ea
ET
7169 break;
7170
7171 default:
7172 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7173 bp->port.link_config);
a2fbb9ea
ET
7174 return;
7175 }
34f80b04 7176 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7177
7178 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7179 if (!(bp->link_params.speed_cap_mask &
7180 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7181 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7182
c18487ee
YR
7183 if (!(bp->link_params.speed_cap_mask &
7184 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7185 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7186
c18487ee
YR
7187 if (!(bp->link_params.speed_cap_mask &
7188 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7189 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7190
c18487ee
YR
7191 if (!(bp->link_params.speed_cap_mask &
7192 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7193 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7194
c18487ee
YR
7195 if (!(bp->link_params.speed_cap_mask &
7196 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7197 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7198 SUPPORTED_1000baseT_Full);
a2fbb9ea 7199
c18487ee
YR
7200 if (!(bp->link_params.speed_cap_mask &
7201 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7202 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7203
c18487ee
YR
7204 if (!(bp->link_params.speed_cap_mask &
7205 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7206 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7207
34f80b04 7208 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7209}
7210
34f80b04 7211static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7212{
c18487ee 7213 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7214
34f80b04 7215 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7216 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7217 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7218 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7219 bp->port.advertising = bp->port.supported;
a2fbb9ea 7220 } else {
c18487ee
YR
7221 u32 ext_phy_type =
7222 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7223
7224 if ((ext_phy_type ==
7225 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7226 (ext_phy_type ==
7227 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7228 /* force 10G, no AN */
c18487ee 7229 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7230 bp->port.advertising =
a2fbb9ea
ET
7231 (ADVERTISED_10000baseT_Full |
7232 ADVERTISED_FIBRE);
7233 break;
7234 }
7235 BNX2X_ERR("NVRAM config error. "
7236 "Invalid link_config 0x%x"
7237 " Autoneg not supported\n",
34f80b04 7238 bp->port.link_config);
a2fbb9ea
ET
7239 return;
7240 }
7241 break;
7242
7243 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7244 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7245 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7246 bp->port.advertising = (ADVERTISED_10baseT_Full |
7247 ADVERTISED_TP);
a2fbb9ea
ET
7248 } else {
7249 BNX2X_ERR("NVRAM config error. "
7250 "Invalid link_config 0x%x"
7251 " speed_cap_mask 0x%x\n",
34f80b04 7252 bp->port.link_config,
c18487ee 7253 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7254 return;
7255 }
7256 break;
7257
7258 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7259 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7260 bp->link_params.req_line_speed = SPEED_10;
7261 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7262 bp->port.advertising = (ADVERTISED_10baseT_Half |
7263 ADVERTISED_TP);
a2fbb9ea
ET
7264 } else {
7265 BNX2X_ERR("NVRAM config error. "
7266 "Invalid link_config 0x%x"
7267 " speed_cap_mask 0x%x\n",
34f80b04 7268 bp->port.link_config,
c18487ee 7269 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7270 return;
7271 }
7272 break;
7273
7274 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7275 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7276 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7277 bp->port.advertising = (ADVERTISED_100baseT_Full |
7278 ADVERTISED_TP);
a2fbb9ea
ET
7279 } else {
7280 BNX2X_ERR("NVRAM config error. "
7281 "Invalid link_config 0x%x"
7282 " speed_cap_mask 0x%x\n",
34f80b04 7283 bp->port.link_config,
c18487ee 7284 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7285 return;
7286 }
7287 break;
7288
7289 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7290 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7291 bp->link_params.req_line_speed = SPEED_100;
7292 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7293 bp->port.advertising = (ADVERTISED_100baseT_Half |
7294 ADVERTISED_TP);
a2fbb9ea
ET
7295 } else {
7296 BNX2X_ERR("NVRAM config error. "
7297 "Invalid link_config 0x%x"
7298 " speed_cap_mask 0x%x\n",
34f80b04 7299 bp->port.link_config,
c18487ee 7300 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7301 return;
7302 }
7303 break;
7304
7305 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7306 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7307 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7308 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7309 ADVERTISED_TP);
a2fbb9ea
ET
7310 } else {
7311 BNX2X_ERR("NVRAM config error. "
7312 "Invalid link_config 0x%x"
7313 " speed_cap_mask 0x%x\n",
34f80b04 7314 bp->port.link_config,
c18487ee 7315 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7316 return;
7317 }
7318 break;
7319
7320 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7321 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7322 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7323 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7324 ADVERTISED_TP);
a2fbb9ea
ET
7325 } else {
7326 BNX2X_ERR("NVRAM config error. "
7327 "Invalid link_config 0x%x"
7328 " speed_cap_mask 0x%x\n",
34f80b04 7329 bp->port.link_config,
c18487ee 7330 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7331 return;
7332 }
7333 break;
7334
7335 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7336 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7337 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7338 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7339 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7340 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7341 ADVERTISED_FIBRE);
a2fbb9ea
ET
7342 } else {
7343 BNX2X_ERR("NVRAM config error. "
7344 "Invalid link_config 0x%x"
7345 " speed_cap_mask 0x%x\n",
34f80b04 7346 bp->port.link_config,
c18487ee 7347 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7348 return;
7349 }
7350 break;
7351
7352 default:
7353 BNX2X_ERR("NVRAM config error. "
7354 "BAD link speed link_config 0x%x\n",
34f80b04 7355 bp->port.link_config);
c18487ee 7356 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7357 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7358 break;
7359 }
a2fbb9ea 7360
34f80b04
EG
7361 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7362 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7363 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7364 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7365 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7366
c18487ee 7367 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7368 " advertising 0x%x\n",
c18487ee
YR
7369 bp->link_params.req_line_speed,
7370 bp->link_params.req_duplex,
34f80b04 7371 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7372}
7373
34f80b04 7374static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7375{
34f80b04
EG
7376 int port = BP_PORT(bp);
7377 u32 val, val2;
a2fbb9ea 7378
c18487ee 7379 bp->link_params.bp = bp;
34f80b04 7380 bp->link_params.port = port;
c18487ee 7381
c18487ee 7382 bp->link_params.serdes_config =
f1410647 7383 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7384 bp->link_params.lane_config =
a2fbb9ea 7385 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7386 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7387 SHMEM_RD(bp,
7388 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7389 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7390 SHMEM_RD(bp,
7391 dev_info.port_hw_config[port].speed_capability_mask);
7392
34f80b04 7393 bp->port.link_config =
a2fbb9ea
ET
7394 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7395
34f80b04
EG
7396 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7397 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7398 " link_config 0x%08x\n",
c18487ee
YR
7399 bp->link_params.serdes_config,
7400 bp->link_params.lane_config,
7401 bp->link_params.ext_phy_config,
34f80b04 7402 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7403
34f80b04 7404 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7405 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7406 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7407
7408 bnx2x_link_settings_requested(bp);
7409
7410 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7411 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7412 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7413 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7414 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7415 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7416 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7417 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7418 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7419 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7420}
7421
7422static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7423{
7424 int func = BP_FUNC(bp);
7425 u32 val, val2;
7426 int rc = 0;
a2fbb9ea 7427
34f80b04 7428 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7429
34f80b04
EG
7430 bp->e1hov = 0;
7431 bp->e1hmf = 0;
7432 if (CHIP_IS_E1H(bp)) {
7433 bp->mf_config =
7434 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7435
3196a88a
EG
7436 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7437 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7438 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7439
34f80b04
EG
7440 bp->e1hov = val;
7441 bp->e1hmf = 1;
7442 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7443 "(0x%04x)\n",
7444 func, bp->e1hov, bp->e1hov);
7445 } else {
7446 BNX2X_DEV_INFO("Single function mode\n");
7447 if (BP_E1HVN(bp)) {
7448 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7449 " aborting\n", func);
7450 rc = -EPERM;
7451 }
7452 }
7453 }
a2fbb9ea 7454
34f80b04
EG
7455 if (!BP_NOMCP(bp)) {
7456 bnx2x_get_port_hwinfo(bp);
7457
7458 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7459 DRV_MSG_SEQ_NUMBER_MASK);
7460 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7461 }
7462
7463 if (IS_E1HMF(bp)) {
7464 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7465 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7466 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7467 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7468 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7469 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7470 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7471 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7472 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7473 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7474 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7475 ETH_ALEN);
7476 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7477 ETH_ALEN);
a2fbb9ea 7478 }
34f80b04
EG
7479
7480 return rc;
a2fbb9ea
ET
7481 }
7482
34f80b04
EG
7483 if (BP_NOMCP(bp)) {
7484 /* only supposed to happen on emulation/FPGA */
33471629 7485 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7486 random_ether_addr(bp->dev->dev_addr);
7487 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7488 }
a2fbb9ea 7489
34f80b04
EG
7490 return rc;
7491}
7492
7493static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7494{
7495 int func = BP_FUNC(bp);
7496 int rc;
7497
da5a662a
VZ
7498 /* Disable interrupt handling until HW is initialized */
7499 atomic_set(&bp->intr_sem, 1);
7500
34f80b04 7501 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7502
34f80b04
EG
7503 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7504 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7505
7506 rc = bnx2x_get_hwinfo(bp);
7507
7508 /* need to reset chip if undi was active */
7509 if (!BP_NOMCP(bp))
7510 bnx2x_undi_unload(bp);
7511
7512 if (CHIP_REV_IS_FPGA(bp))
7513 printk(KERN_ERR PFX "FPGA detected\n");
7514
7515 if (BP_NOMCP(bp) && (func == 0))
7516 printk(KERN_ERR PFX
7517 "MCP disabled, must load devices in order!\n");
7518
7a9b2557
VZ
7519 /* Set TPA flags */
7520 if (disable_tpa) {
7521 bp->flags &= ~TPA_ENABLE_FLAG;
7522 bp->dev->features &= ~NETIF_F_LRO;
7523 } else {
7524 bp->flags |= TPA_ENABLE_FLAG;
7525 bp->dev->features |= NETIF_F_LRO;
7526 }
7527
7528
34f80b04
EG
7529 bp->tx_ring_size = MAX_TX_AVAIL;
7530 bp->rx_ring_size = MAX_RX_AVAIL;
7531
7532 bp->rx_csum = 1;
7533 bp->rx_offset = 0;
7534
7535 bp->tx_ticks = 50;
7536 bp->rx_ticks = 25;
7537
34f80b04
EG
7538 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7539 bp->current_interval = (poll ? poll : bp->timer_interval);
7540
7541 init_timer(&bp->timer);
7542 bp->timer.expires = jiffies + bp->current_interval;
7543 bp->timer.data = (unsigned long) bp;
7544 bp->timer.function = bnx2x_timer;
7545
7546 return rc;
a2fbb9ea
ET
7547}
7548
7549/*
7550 * ethtool service functions
7551 */
7552
7553/* All ethtool functions called with rtnl_lock */
7554
7555static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7556{
7557 struct bnx2x *bp = netdev_priv(dev);
7558
34f80b04
EG
7559 cmd->supported = bp->port.supported;
7560 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7561
7562 if (netif_carrier_ok(dev)) {
c18487ee
YR
7563 cmd->speed = bp->link_vars.line_speed;
7564 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7565 } else {
c18487ee
YR
7566 cmd->speed = bp->link_params.req_line_speed;
7567 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7568 }
34f80b04
EG
7569 if (IS_E1HMF(bp)) {
7570 u16 vn_max_rate;
7571
7572 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7573 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7574 if (vn_max_rate < cmd->speed)
7575 cmd->speed = vn_max_rate;
7576 }
a2fbb9ea 7577
c18487ee
YR
7578 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7579 u32 ext_phy_type =
7580 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7581
7582 switch (ext_phy_type) {
7583 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7584 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7585 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7586 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7587 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7588 cmd->port = PORT_FIBRE;
7589 break;
7590
7591 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7592 cmd->port = PORT_TP;
7593 break;
7594
c18487ee
YR
7595 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7596 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7597 bp->link_params.ext_phy_config);
7598 break;
7599
f1410647
ET
7600 default:
7601 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7602 bp->link_params.ext_phy_config);
7603 break;
f1410647
ET
7604 }
7605 } else
a2fbb9ea 7606 cmd->port = PORT_TP;
a2fbb9ea 7607
34f80b04 7608 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7609 cmd->transceiver = XCVR_INTERNAL;
7610
c18487ee 7611 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7612 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7613 else
a2fbb9ea 7614 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7615
7616 cmd->maxtxpkt = 0;
7617 cmd->maxrxpkt = 0;
7618
7619 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7620 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7621 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7622 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7623 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7624 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7625 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7626
7627 return 0;
7628}
7629
7630static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7631{
7632 struct bnx2x *bp = netdev_priv(dev);
7633 u32 advertising;
7634
34f80b04
EG
7635 if (IS_E1HMF(bp))
7636 return 0;
7637
a2fbb9ea
ET
7638 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7639 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7640 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7641 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7642 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7643 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7644 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7645
a2fbb9ea 7646 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7647 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7648 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7649 return -EINVAL;
f1410647 7650 }
a2fbb9ea
ET
7651
7652 /* advertise the requested speed and duplex if supported */
34f80b04 7653 cmd->advertising &= bp->port.supported;
a2fbb9ea 7654
c18487ee
YR
7655 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7656 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7657 bp->port.advertising |= (ADVERTISED_Autoneg |
7658 cmd->advertising);
a2fbb9ea
ET
7659
7660 } else { /* forced speed */
7661 /* advertise the requested speed and duplex if supported */
7662 switch (cmd->speed) {
7663 case SPEED_10:
7664 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7665 if (!(bp->port.supported &
f1410647
ET
7666 SUPPORTED_10baseT_Full)) {
7667 DP(NETIF_MSG_LINK,
7668 "10M full not supported\n");
a2fbb9ea 7669 return -EINVAL;
f1410647 7670 }
a2fbb9ea
ET
7671
7672 advertising = (ADVERTISED_10baseT_Full |
7673 ADVERTISED_TP);
7674 } else {
34f80b04 7675 if (!(bp->port.supported &
f1410647
ET
7676 SUPPORTED_10baseT_Half)) {
7677 DP(NETIF_MSG_LINK,
7678 "10M half not supported\n");
a2fbb9ea 7679 return -EINVAL;
f1410647 7680 }
a2fbb9ea
ET
7681
7682 advertising = (ADVERTISED_10baseT_Half |
7683 ADVERTISED_TP);
7684 }
7685 break;
7686
7687 case SPEED_100:
7688 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7689 if (!(bp->port.supported &
f1410647
ET
7690 SUPPORTED_100baseT_Full)) {
7691 DP(NETIF_MSG_LINK,
7692 "100M full not supported\n");
a2fbb9ea 7693 return -EINVAL;
f1410647 7694 }
a2fbb9ea
ET
7695
7696 advertising = (ADVERTISED_100baseT_Full |
7697 ADVERTISED_TP);
7698 } else {
34f80b04 7699 if (!(bp->port.supported &
f1410647
ET
7700 SUPPORTED_100baseT_Half)) {
7701 DP(NETIF_MSG_LINK,
7702 "100M half not supported\n");
a2fbb9ea 7703 return -EINVAL;
f1410647 7704 }
a2fbb9ea
ET
7705
7706 advertising = (ADVERTISED_100baseT_Half |
7707 ADVERTISED_TP);
7708 }
7709 break;
7710
7711 case SPEED_1000:
f1410647
ET
7712 if (cmd->duplex != DUPLEX_FULL) {
7713 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7714 return -EINVAL;
f1410647 7715 }
a2fbb9ea 7716
34f80b04 7717 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7718 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7719 return -EINVAL;
f1410647 7720 }
a2fbb9ea
ET
7721
7722 advertising = (ADVERTISED_1000baseT_Full |
7723 ADVERTISED_TP);
7724 break;
7725
7726 case SPEED_2500:
f1410647
ET
7727 if (cmd->duplex != DUPLEX_FULL) {
7728 DP(NETIF_MSG_LINK,
7729 "2.5G half not supported\n");
a2fbb9ea 7730 return -EINVAL;
f1410647 7731 }
a2fbb9ea 7732
34f80b04 7733 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7734 DP(NETIF_MSG_LINK,
7735 "2.5G full not supported\n");
a2fbb9ea 7736 return -EINVAL;
f1410647 7737 }
a2fbb9ea 7738
f1410647 7739 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7740 ADVERTISED_TP);
7741 break;
7742
7743 case SPEED_10000:
f1410647
ET
7744 if (cmd->duplex != DUPLEX_FULL) {
7745 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7746 return -EINVAL;
f1410647 7747 }
a2fbb9ea 7748
34f80b04 7749 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7750 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7751 return -EINVAL;
f1410647 7752 }
a2fbb9ea
ET
7753
7754 advertising = (ADVERTISED_10000baseT_Full |
7755 ADVERTISED_FIBRE);
7756 break;
7757
7758 default:
f1410647 7759 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7760 return -EINVAL;
7761 }
7762
c18487ee
YR
7763 bp->link_params.req_line_speed = cmd->speed;
7764 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7765 bp->port.advertising = advertising;
a2fbb9ea
ET
7766 }
7767
c18487ee 7768 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7769 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7770 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7771 bp->port.advertising);
a2fbb9ea 7772
34f80b04 7773 if (netif_running(dev)) {
bb2a0f7a 7774 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7775 bnx2x_link_set(bp);
7776 }
a2fbb9ea
ET
7777
7778 return 0;
7779}
7780
c18487ee
YR
7781#define PHY_FW_VER_LEN 10
7782
a2fbb9ea
ET
7783static void bnx2x_get_drvinfo(struct net_device *dev,
7784 struct ethtool_drvinfo *info)
7785{
7786 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7787 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7788
7789 strcpy(info->driver, DRV_MODULE_NAME);
7790 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7791
7792 phy_fw_ver[0] = '\0';
34f80b04 7793 if (bp->port.pmf) {
4a37fb66 7794 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7795 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7796 (bp->state != BNX2X_STATE_CLOSED),
7797 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7798 bnx2x_release_phy_lock(bp);
34f80b04 7799 }
c18487ee 7800
f0e53a84
EG
7801 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7802 (bp->common.bc_ver & 0xff0000) >> 16,
7803 (bp->common.bc_ver & 0xff00) >> 8,
7804 (bp->common.bc_ver & 0xff),
7805 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7806 strcpy(info->bus_info, pci_name(bp->pdev));
7807 info->n_stats = BNX2X_NUM_STATS;
7808 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7809 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7810 info->regdump_len = 0;
7811}
7812
7813static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7814{
7815 struct bnx2x *bp = netdev_priv(dev);
7816
7817 if (bp->flags & NO_WOL_FLAG) {
7818 wol->supported = 0;
7819 wol->wolopts = 0;
7820 } else {
7821 wol->supported = WAKE_MAGIC;
7822 if (bp->wol)
7823 wol->wolopts = WAKE_MAGIC;
7824 else
7825 wol->wolopts = 0;
7826 }
7827 memset(&wol->sopass, 0, sizeof(wol->sopass));
7828}
7829
7830static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7831{
7832 struct bnx2x *bp = netdev_priv(dev);
7833
7834 if (wol->wolopts & ~WAKE_MAGIC)
7835 return -EINVAL;
7836
7837 if (wol->wolopts & WAKE_MAGIC) {
7838 if (bp->flags & NO_WOL_FLAG)
7839 return -EINVAL;
7840
7841 bp->wol = 1;
34f80b04 7842 } else
a2fbb9ea 7843 bp->wol = 0;
34f80b04 7844
a2fbb9ea
ET
7845 return 0;
7846}
7847
7848static u32 bnx2x_get_msglevel(struct net_device *dev)
7849{
7850 struct bnx2x *bp = netdev_priv(dev);
7851
7852 return bp->msglevel;
7853}
7854
7855static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7856{
7857 struct bnx2x *bp = netdev_priv(dev);
7858
7859 if (capable(CAP_NET_ADMIN))
7860 bp->msglevel = level;
7861}
7862
7863static int bnx2x_nway_reset(struct net_device *dev)
7864{
7865 struct bnx2x *bp = netdev_priv(dev);
7866
34f80b04
EG
7867 if (!bp->port.pmf)
7868 return 0;
a2fbb9ea 7869
34f80b04 7870 if (netif_running(dev)) {
bb2a0f7a 7871 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7872 bnx2x_link_set(bp);
7873 }
a2fbb9ea
ET
7874
7875 return 0;
7876}
7877
7878static int bnx2x_get_eeprom_len(struct net_device *dev)
7879{
7880 struct bnx2x *bp = netdev_priv(dev);
7881
34f80b04 7882 return bp->common.flash_size;
a2fbb9ea
ET
7883}
7884
7885static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7886{
34f80b04 7887 int port = BP_PORT(bp);
a2fbb9ea
ET
7888 int count, i;
7889 u32 val = 0;
7890
7891 /* adjust timeout for emulation/FPGA */
7892 count = NVRAM_TIMEOUT_COUNT;
7893 if (CHIP_REV_IS_SLOW(bp))
7894 count *= 100;
7895
7896 /* request access to nvram interface */
7897 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7898 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7899
7900 for (i = 0; i < count*10; i++) {
7901 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7902 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7903 break;
7904
7905 udelay(5);
7906 }
7907
7908 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7909 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7910 return -EBUSY;
7911 }
7912
7913 return 0;
7914}
7915
7916static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7917{
34f80b04 7918 int port = BP_PORT(bp);
a2fbb9ea
ET
7919 int count, i;
7920 u32 val = 0;
7921
7922 /* adjust timeout for emulation/FPGA */
7923 count = NVRAM_TIMEOUT_COUNT;
7924 if (CHIP_REV_IS_SLOW(bp))
7925 count *= 100;
7926
7927 /* relinquish nvram interface */
7928 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7929 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7930
7931 for (i = 0; i < count*10; i++) {
7932 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7933 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7934 break;
7935
7936 udelay(5);
7937 }
7938
7939 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7940 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7941 return -EBUSY;
7942 }
7943
7944 return 0;
7945}
7946
7947static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7948{
7949 u32 val;
7950
7951 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7952
7953 /* enable both bits, even on read */
7954 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7955 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7956 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7957}
7958
7959static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7960{
7961 u32 val;
7962
7963 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7964
7965 /* disable both bits, even after read */
7966 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7967 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7968 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7969}
7970
7971static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7972 u32 cmd_flags)
7973{
f1410647 7974 int count, i, rc;
a2fbb9ea
ET
7975 u32 val;
7976
7977 /* build the command word */
7978 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7979
7980 /* need to clear DONE bit separately */
7981 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7982
7983 /* address of the NVRAM to read from */
7984 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7985 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7986
7987 /* issue a read command */
7988 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7989
7990 /* adjust timeout for emulation/FPGA */
7991 count = NVRAM_TIMEOUT_COUNT;
7992 if (CHIP_REV_IS_SLOW(bp))
7993 count *= 100;
7994
7995 /* wait for completion */
7996 *ret_val = 0;
7997 rc = -EBUSY;
7998 for (i = 0; i < count; i++) {
7999 udelay(5);
8000 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8001
8002 if (val & MCPR_NVM_COMMAND_DONE) {
8003 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8004 /* we read nvram data in cpu order
8005 * but ethtool sees it as an array of bytes
8006 * converting to big-endian will do the work */
8007 val = cpu_to_be32(val);
8008 *ret_val = val;
8009 rc = 0;
8010 break;
8011 }
8012 }
8013
8014 return rc;
8015}
8016
8017static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8018 int buf_size)
8019{
8020 int rc;
8021 u32 cmd_flags;
8022 u32 val;
8023
8024 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8025 DP(BNX2X_MSG_NVM,
c14423fe 8026 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8027 offset, buf_size);
8028 return -EINVAL;
8029 }
8030
34f80b04
EG
8031 if (offset + buf_size > bp->common.flash_size) {
8032 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8033 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8034 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8035 return -EINVAL;
8036 }
8037
8038 /* request access to nvram interface */
8039 rc = bnx2x_acquire_nvram_lock(bp);
8040 if (rc)
8041 return rc;
8042
8043 /* enable access to nvram interface */
8044 bnx2x_enable_nvram_access(bp);
8045
8046 /* read the first word(s) */
8047 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8048 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8049 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8050 memcpy(ret_buf, &val, 4);
8051
8052 /* advance to the next dword */
8053 offset += sizeof(u32);
8054 ret_buf += sizeof(u32);
8055 buf_size -= sizeof(u32);
8056 cmd_flags = 0;
8057 }
8058
8059 if (rc == 0) {
8060 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8061 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8062 memcpy(ret_buf, &val, 4);
8063 }
8064
8065 /* disable access to nvram interface */
8066 bnx2x_disable_nvram_access(bp);
8067 bnx2x_release_nvram_lock(bp);
8068
8069 return rc;
8070}
8071
8072static int bnx2x_get_eeprom(struct net_device *dev,
8073 struct ethtool_eeprom *eeprom, u8 *eebuf)
8074{
8075 struct bnx2x *bp = netdev_priv(dev);
8076 int rc;
8077
34f80b04 8078 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8079 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8080 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8081 eeprom->len, eeprom->len);
8082
8083 /* parameters already validated in ethtool_get_eeprom */
8084
8085 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8086
8087 return rc;
8088}
8089
8090static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8091 u32 cmd_flags)
8092{
f1410647 8093 int count, i, rc;
a2fbb9ea
ET
8094
8095 /* build the command word */
8096 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8097
8098 /* need to clear DONE bit separately */
8099 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8100
8101 /* write the data */
8102 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8103
8104 /* address of the NVRAM to write to */
8105 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8106 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8107
8108 /* issue the write command */
8109 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8110
8111 /* adjust timeout for emulation/FPGA */
8112 count = NVRAM_TIMEOUT_COUNT;
8113 if (CHIP_REV_IS_SLOW(bp))
8114 count *= 100;
8115
8116 /* wait for completion */
8117 rc = -EBUSY;
8118 for (i = 0; i < count; i++) {
8119 udelay(5);
8120 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8121 if (val & MCPR_NVM_COMMAND_DONE) {
8122 rc = 0;
8123 break;
8124 }
8125 }
8126
8127 return rc;
8128}
8129
f1410647 8130#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8131
8132static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8133 int buf_size)
8134{
8135 int rc;
8136 u32 cmd_flags;
8137 u32 align_offset;
8138 u32 val;
8139
34f80b04
EG
8140 if (offset + buf_size > bp->common.flash_size) {
8141 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8142 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8143 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8144 return -EINVAL;
8145 }
8146
8147 /* request access to nvram interface */
8148 rc = bnx2x_acquire_nvram_lock(bp);
8149 if (rc)
8150 return rc;
8151
8152 /* enable access to nvram interface */
8153 bnx2x_enable_nvram_access(bp);
8154
8155 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8156 align_offset = (offset & ~0x03);
8157 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8158
8159 if (rc == 0) {
8160 val &= ~(0xff << BYTE_OFFSET(offset));
8161 val |= (*data_buf << BYTE_OFFSET(offset));
8162
8163 /* nvram data is returned as an array of bytes
8164 * convert it back to cpu order */
8165 val = be32_to_cpu(val);
8166
a2fbb9ea
ET
8167 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8168 cmd_flags);
8169 }
8170
8171 /* disable access to nvram interface */
8172 bnx2x_disable_nvram_access(bp);
8173 bnx2x_release_nvram_lock(bp);
8174
8175 return rc;
8176}
8177
8178static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8179 int buf_size)
8180{
8181 int rc;
8182 u32 cmd_flags;
8183 u32 val;
8184 u32 written_so_far;
8185
34f80b04 8186 if (buf_size == 1) /* ethtool */
a2fbb9ea 8187 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8188
8189 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8190 DP(BNX2X_MSG_NVM,
c14423fe 8191 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8192 offset, buf_size);
8193 return -EINVAL;
8194 }
8195
34f80b04
EG
8196 if (offset + buf_size > bp->common.flash_size) {
8197 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8198 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8199 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8200 return -EINVAL;
8201 }
8202
8203 /* request access to nvram interface */
8204 rc = bnx2x_acquire_nvram_lock(bp);
8205 if (rc)
8206 return rc;
8207
8208 /* enable access to nvram interface */
8209 bnx2x_enable_nvram_access(bp);
8210
8211 written_so_far = 0;
8212 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8213 while ((written_so_far < buf_size) && (rc == 0)) {
8214 if (written_so_far == (buf_size - sizeof(u32)))
8215 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8216 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8217 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8218 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8219 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8220
8221 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8222
8223 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8224
8225 /* advance to the next dword */
8226 offset += sizeof(u32);
8227 data_buf += sizeof(u32);
8228 written_so_far += sizeof(u32);
8229 cmd_flags = 0;
8230 }
8231
8232 /* disable access to nvram interface */
8233 bnx2x_disable_nvram_access(bp);
8234 bnx2x_release_nvram_lock(bp);
8235
8236 return rc;
8237}
8238
8239static int bnx2x_set_eeprom(struct net_device *dev,
8240 struct ethtool_eeprom *eeprom, u8 *eebuf)
8241{
8242 struct bnx2x *bp = netdev_priv(dev);
8243 int rc;
8244
34f80b04 8245 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8246 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8247 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8248 eeprom->len, eeprom->len);
8249
8250 /* parameters already validated in ethtool_set_eeprom */
8251
c18487ee 8252 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8253 if (eeprom->magic == 0x00504859)
8254 if (bp->port.pmf) {
8255
4a37fb66 8256 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8257 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8258 bp->link_params.ext_phy_config,
8259 (bp->state != BNX2X_STATE_CLOSED),
8260 eebuf, eeprom->len);
bb2a0f7a
YG
8261 if ((bp->state == BNX2X_STATE_OPEN) ||
8262 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8263 rc |= bnx2x_link_reset(&bp->link_params,
8264 &bp->link_vars);
8265 rc |= bnx2x_phy_init(&bp->link_params,
8266 &bp->link_vars);
bb2a0f7a 8267 }
4a37fb66 8268 bnx2x_release_phy_lock(bp);
34f80b04
EG
8269
8270 } else /* Only the PMF can access the PHY */
8271 return -EINVAL;
8272 else
c18487ee 8273 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8274
8275 return rc;
8276}
8277
8278static int bnx2x_get_coalesce(struct net_device *dev,
8279 struct ethtool_coalesce *coal)
8280{
8281 struct bnx2x *bp = netdev_priv(dev);
8282
8283 memset(coal, 0, sizeof(struct ethtool_coalesce));
8284
8285 coal->rx_coalesce_usecs = bp->rx_ticks;
8286 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8287
8288 return 0;
8289}
8290
8291static int bnx2x_set_coalesce(struct net_device *dev,
8292 struct ethtool_coalesce *coal)
8293{
8294 struct bnx2x *bp = netdev_priv(dev);
8295
8296 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8297 if (bp->rx_ticks > 3000)
8298 bp->rx_ticks = 3000;
8299
8300 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8301 if (bp->tx_ticks > 0x3000)
8302 bp->tx_ticks = 0x3000;
8303
34f80b04 8304 if (netif_running(dev))
a2fbb9ea
ET
8305 bnx2x_update_coalesce(bp);
8306
8307 return 0;
8308}
8309
8310static void bnx2x_get_ringparam(struct net_device *dev,
8311 struct ethtool_ringparam *ering)
8312{
8313 struct bnx2x *bp = netdev_priv(dev);
8314
8315 ering->rx_max_pending = MAX_RX_AVAIL;
8316 ering->rx_mini_max_pending = 0;
8317 ering->rx_jumbo_max_pending = 0;
8318
8319 ering->rx_pending = bp->rx_ring_size;
8320 ering->rx_mini_pending = 0;
8321 ering->rx_jumbo_pending = 0;
8322
8323 ering->tx_max_pending = MAX_TX_AVAIL;
8324 ering->tx_pending = bp->tx_ring_size;
8325}
8326
8327static int bnx2x_set_ringparam(struct net_device *dev,
8328 struct ethtool_ringparam *ering)
8329{
8330 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8331 int rc = 0;
a2fbb9ea
ET
8332
8333 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8334 (ering->tx_pending > MAX_TX_AVAIL) ||
8335 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8336 return -EINVAL;
8337
8338 bp->rx_ring_size = ering->rx_pending;
8339 bp->tx_ring_size = ering->tx_pending;
8340
34f80b04
EG
8341 if (netif_running(dev)) {
8342 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8343 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8344 }
8345
34f80b04 8346 return rc;
a2fbb9ea
ET
8347}
8348
8349static void bnx2x_get_pauseparam(struct net_device *dev,
8350 struct ethtool_pauseparam *epause)
8351{
8352 struct bnx2x *bp = netdev_priv(dev);
8353
c18487ee
YR
8354 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8355 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8356
8357 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8358 FLOW_CTRL_RX);
8359 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8360 FLOW_CTRL_TX);
a2fbb9ea
ET
8361
8362 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8363 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8364 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8365}
8366
8367static int bnx2x_set_pauseparam(struct net_device *dev,
8368 struct ethtool_pauseparam *epause)
8369{
8370 struct bnx2x *bp = netdev_priv(dev);
8371
34f80b04
EG
8372 if (IS_E1HMF(bp))
8373 return 0;
8374
a2fbb9ea
ET
8375 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8376 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8377 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8378
c18487ee 8379 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8380
f1410647 8381 if (epause->rx_pause)
c18487ee
YR
8382 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8383
f1410647 8384 if (epause->tx_pause)
c18487ee
YR
8385 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8386
8387 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8388 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8389
c18487ee 8390 if (epause->autoneg) {
34f80b04 8391 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8392 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8393 return -EINVAL;
8394 }
a2fbb9ea 8395
c18487ee
YR
8396 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8397 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8398 }
a2fbb9ea 8399
c18487ee
YR
8400 DP(NETIF_MSG_LINK,
8401 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8402
8403 if (netif_running(dev)) {
bb2a0f7a 8404 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8405 bnx2x_link_set(bp);
8406 }
a2fbb9ea
ET
8407
8408 return 0;
8409}
8410
df0f2343
VZ
8411static int bnx2x_set_flags(struct net_device *dev, u32 data)
8412{
8413 struct bnx2x *bp = netdev_priv(dev);
8414 int changed = 0;
8415 int rc = 0;
8416
8417 /* TPA requires Rx CSUM offloading */
8418 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8419 if (!(dev->features & NETIF_F_LRO)) {
8420 dev->features |= NETIF_F_LRO;
8421 bp->flags |= TPA_ENABLE_FLAG;
8422 changed = 1;
8423 }
8424
8425 } else if (dev->features & NETIF_F_LRO) {
8426 dev->features &= ~NETIF_F_LRO;
8427 bp->flags &= ~TPA_ENABLE_FLAG;
8428 changed = 1;
8429 }
8430
8431 if (changed && netif_running(dev)) {
8432 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8433 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8434 }
8435
8436 return rc;
8437}
8438
a2fbb9ea
ET
8439static u32 bnx2x_get_rx_csum(struct net_device *dev)
8440{
8441 struct bnx2x *bp = netdev_priv(dev);
8442
8443 return bp->rx_csum;
8444}
8445
8446static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8447{
8448 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8449 int rc = 0;
a2fbb9ea
ET
8450
8451 bp->rx_csum = data;
df0f2343
VZ
8452
8453 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8454 TPA'ed packets will be discarded due to wrong TCP CSUM */
8455 if (!data) {
8456 u32 flags = ethtool_op_get_flags(dev);
8457
8458 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8459 }
8460
8461 return rc;
a2fbb9ea
ET
8462}
8463
8464static int bnx2x_set_tso(struct net_device *dev, u32 data)
8465{
755735eb 8466 if (data) {
a2fbb9ea 8467 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8468 dev->features |= NETIF_F_TSO6;
8469 } else {
a2fbb9ea 8470 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8471 dev->features &= ~NETIF_F_TSO6;
8472 }
8473
a2fbb9ea
ET
8474 return 0;
8475}
8476
f3c87cdd 8477static const struct {
a2fbb9ea
ET
8478 char string[ETH_GSTRING_LEN];
8479} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8480 { "register_test (offline)" },
8481 { "memory_test (offline)" },
8482 { "loopback_test (offline)" },
8483 { "nvram_test (online)" },
8484 { "interrupt_test (online)" },
8485 { "link_test (online)" },
8486 { "idle check (online)" },
8487 { "MC errors (online)" }
a2fbb9ea
ET
8488};
8489
8490static int bnx2x_self_test_count(struct net_device *dev)
8491{
8492 return BNX2X_NUM_TESTS;
8493}
8494
f3c87cdd
YG
8495static int bnx2x_test_registers(struct bnx2x *bp)
8496{
8497 int idx, i, rc = -ENODEV;
8498 u32 wr_val = 0;
9dabc424 8499 int port = BP_PORT(bp);
f3c87cdd
YG
8500 static const struct {
8501 u32 offset0;
8502 u32 offset1;
8503 u32 mask;
8504 } reg_tbl[] = {
8505/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8506 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8507 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8508 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8509 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8510 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8511 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8512 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8513 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8514 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8515/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8516 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8517 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8518 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8519 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8520 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8521 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8522 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8523 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8524 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8525/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8526 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8527 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8528 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8529 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8530 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8531 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8532 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8533 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8534 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8535/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8536 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8537 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8538 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8539 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8540 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8541 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8542 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8543
8544 { 0xffffffff, 0, 0x00000000 }
8545 };
8546
8547 if (!netif_running(bp->dev))
8548 return rc;
8549
8550 /* Repeat the test twice:
8551 First by writing 0x00000000, second by writing 0xffffffff */
8552 for (idx = 0; idx < 2; idx++) {
8553
8554 switch (idx) {
8555 case 0:
8556 wr_val = 0;
8557 break;
8558 case 1:
8559 wr_val = 0xffffffff;
8560 break;
8561 }
8562
8563 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8564 u32 offset, mask, save_val, val;
f3c87cdd
YG
8565
8566 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8567 mask = reg_tbl[i].mask;
8568
8569 save_val = REG_RD(bp, offset);
8570
8571 REG_WR(bp, offset, wr_val);
8572 val = REG_RD(bp, offset);
8573
8574 /* Restore the original register's value */
8575 REG_WR(bp, offset, save_val);
8576
8577 /* verify that value is as expected value */
8578 if ((val & mask) != (wr_val & mask))
8579 goto test_reg_exit;
8580 }
8581 }
8582
8583 rc = 0;
8584
8585test_reg_exit:
8586 return rc;
8587}
8588
8589static int bnx2x_test_memory(struct bnx2x *bp)
8590{
8591 int i, j, rc = -ENODEV;
8592 u32 val;
8593 static const struct {
8594 u32 offset;
8595 int size;
8596 } mem_tbl[] = {
8597 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8598 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8599 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8600 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8601 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8602 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8603 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8604
8605 { 0xffffffff, 0 }
8606 };
8607 static const struct {
8608 char *name;
8609 u32 offset;
9dabc424
YG
8610 u32 e1_mask;
8611 u32 e1h_mask;
f3c87cdd 8612 } prty_tbl[] = {
9dabc424
YG
8613 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8614 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8615 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8616 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8617 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8618 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8619
8620 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8621 };
8622
8623 if (!netif_running(bp->dev))
8624 return rc;
8625
8626 /* Go through all the memories */
8627 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8628 for (j = 0; j < mem_tbl[i].size; j++)
8629 REG_RD(bp, mem_tbl[i].offset + j*4);
8630
8631 /* Check the parity status */
8632 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8633 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8634 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8635 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8636 DP(NETIF_MSG_HW,
8637 "%s is 0x%x\n", prty_tbl[i].name, val);
8638 goto test_mem_exit;
8639 }
8640 }
8641
8642 rc = 0;
8643
8644test_mem_exit:
8645 return rc;
8646}
8647
f3c87cdd
YG
8648static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8649{
8650 int cnt = 1000;
8651
8652 if (link_up)
8653 while (bnx2x_link_test(bp) && cnt--)
8654 msleep(10);
8655}
8656
8657static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8658{
8659 unsigned int pkt_size, num_pkts, i;
8660 struct sk_buff *skb;
8661 unsigned char *packet;
8662 struct bnx2x_fastpath *fp = &bp->fp[0];
8663 u16 tx_start_idx, tx_idx;
8664 u16 rx_start_idx, rx_idx;
8665 u16 pkt_prod;
8666 struct sw_tx_bd *tx_buf;
8667 struct eth_tx_bd *tx_bd;
8668 dma_addr_t mapping;
8669 union eth_rx_cqe *cqe;
8670 u8 cqe_fp_flags;
8671 struct sw_rx_bd *rx_buf;
8672 u16 len;
8673 int rc = -ENODEV;
8674
8675 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8676 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8677 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8678 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8679 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8680
8681 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8682 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8683 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8684 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8685 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8686 /* wait until link state is restored */
8687 bnx2x_wait_for_link(bp, link_up);
8688
8689 } else
8690 return -EINVAL;
8691
8692 pkt_size = 1514;
8693 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8694 if (!skb) {
8695 rc = -ENOMEM;
8696 goto test_loopback_exit;
8697 }
8698 packet = skb_put(skb, pkt_size);
8699 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8700 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8701 for (i = ETH_HLEN; i < pkt_size; i++)
8702 packet[i] = (unsigned char) (i & 0xff);
8703
8704 num_pkts = 0;
8705 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8706 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8707
8708 pkt_prod = fp->tx_pkt_prod++;
8709 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8710 tx_buf->first_bd = fp->tx_bd_prod;
8711 tx_buf->skb = skb;
8712
8713 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8714 mapping = pci_map_single(bp->pdev, skb->data,
8715 skb_headlen(skb), PCI_DMA_TODEVICE);
8716 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8717 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8718 tx_bd->nbd = cpu_to_le16(1);
8719 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8720 tx_bd->vlan = cpu_to_le16(pkt_prod);
8721 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8722 ETH_TX_BD_FLAGS_END_BD);
8723 tx_bd->general_data = ((UNICAST_ADDRESS <<
8724 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8725
8726 fp->hw_tx_prods->bds_prod =
8727 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8728 mb(); /* FW restriction: must not reorder writing nbd and packets */
8729 fp->hw_tx_prods->packets_prod =
8730 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8731 DOORBELL(bp, FP_IDX(fp), 0);
8732
8733 mmiowb();
8734
8735 num_pkts++;
8736 fp->tx_bd_prod++;
8737 bp->dev->trans_start = jiffies;
8738
8739 udelay(100);
8740
8741 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8742 if (tx_idx != tx_start_idx + num_pkts)
8743 goto test_loopback_exit;
8744
8745 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8746 if (rx_idx != rx_start_idx + num_pkts)
8747 goto test_loopback_exit;
8748
8749 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8750 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8751 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8752 goto test_loopback_rx_exit;
8753
8754 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8755 if (len != pkt_size)
8756 goto test_loopback_rx_exit;
8757
8758 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8759 skb = rx_buf->skb;
8760 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8761 for (i = ETH_HLEN; i < pkt_size; i++)
8762 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8763 goto test_loopback_rx_exit;
8764
8765 rc = 0;
8766
8767test_loopback_rx_exit:
f3c87cdd
YG
8768
8769 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8770 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8771 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8772 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8773
8774 /* Update producers */
8775 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8776 fp->rx_sge_prod);
8777 mmiowb(); /* keep prod updates ordered */
8778
8779test_loopback_exit:
8780 bp->link_params.loopback_mode = LOOPBACK_NONE;
8781
8782 return rc;
8783}
8784
8785static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8786{
8787 int rc = 0;
8788
8789 if (!netif_running(bp->dev))
8790 return BNX2X_LOOPBACK_FAILED;
8791
f8ef6e44 8792 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8793
8794 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8795 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8796 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8797 }
8798
8799 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8800 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8801 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8802 }
8803
8804 bnx2x_netif_start(bp);
8805
8806 return rc;
8807}
8808
8809#define CRC32_RESIDUAL 0xdebb20e3
8810
8811static int bnx2x_test_nvram(struct bnx2x *bp)
8812{
8813 static const struct {
8814 int offset;
8815 int size;
8816 } nvram_tbl[] = {
8817 { 0, 0x14 }, /* bootstrap */
8818 { 0x14, 0xec }, /* dir */
8819 { 0x100, 0x350 }, /* manuf_info */
8820 { 0x450, 0xf0 }, /* feature_info */
8821 { 0x640, 0x64 }, /* upgrade_key_info */
8822 { 0x6a4, 0x64 },
8823 { 0x708, 0x70 }, /* manuf_key_info */
8824 { 0x778, 0x70 },
8825 { 0, 0 }
8826 };
8827 u32 buf[0x350 / 4];
8828 u8 *data = (u8 *)buf;
8829 int i, rc;
8830 u32 magic, csum;
8831
8832 rc = bnx2x_nvram_read(bp, 0, data, 4);
8833 if (rc) {
8834 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8835 goto test_nvram_exit;
8836 }
8837
8838 magic = be32_to_cpu(buf[0]);
8839 if (magic != 0x669955aa) {
8840 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8841 rc = -ENODEV;
8842 goto test_nvram_exit;
8843 }
8844
8845 for (i = 0; nvram_tbl[i].size; i++) {
8846
8847 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8848 nvram_tbl[i].size);
8849 if (rc) {
8850 DP(NETIF_MSG_PROBE,
8851 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8852 goto test_nvram_exit;
8853 }
8854
8855 csum = ether_crc_le(nvram_tbl[i].size, data);
8856 if (csum != CRC32_RESIDUAL) {
8857 DP(NETIF_MSG_PROBE,
8858 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8859 rc = -ENODEV;
8860 goto test_nvram_exit;
8861 }
8862 }
8863
8864test_nvram_exit:
8865 return rc;
8866}
8867
8868static int bnx2x_test_intr(struct bnx2x *bp)
8869{
8870 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8871 int i, rc;
8872
8873 if (!netif_running(bp->dev))
8874 return -ENODEV;
8875
8876 config->hdr.length_6b = 0;
8877 config->hdr.offset = 0;
8878 config->hdr.client_id = BP_CL_ID(bp);
8879 config->hdr.reserved1 = 0;
8880
8881 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8882 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8883 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8884 if (rc == 0) {
8885 bp->set_mac_pending++;
8886 for (i = 0; i < 10; i++) {
8887 if (!bp->set_mac_pending)
8888 break;
8889 msleep_interruptible(10);
8890 }
8891 if (i == 10)
8892 rc = -ENODEV;
8893 }
8894
8895 return rc;
8896}
8897
a2fbb9ea
ET
8898static void bnx2x_self_test(struct net_device *dev,
8899 struct ethtool_test *etest, u64 *buf)
8900{
8901 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8902
8903 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8904
f3c87cdd 8905 if (!netif_running(dev))
a2fbb9ea 8906 return;
a2fbb9ea 8907
33471629 8908 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8909 if (IS_E1HMF(bp))
8910 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8911
8912 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8913 u8 link_up;
8914
8915 link_up = bp->link_vars.link_up;
8916 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8917 bnx2x_nic_load(bp, LOAD_DIAG);
8918 /* wait until link state is restored */
8919 bnx2x_wait_for_link(bp, link_up);
8920
8921 if (bnx2x_test_registers(bp) != 0) {
8922 buf[0] = 1;
8923 etest->flags |= ETH_TEST_FL_FAILED;
8924 }
8925 if (bnx2x_test_memory(bp) != 0) {
8926 buf[1] = 1;
8927 etest->flags |= ETH_TEST_FL_FAILED;
8928 }
8929 buf[2] = bnx2x_test_loopback(bp, link_up);
8930 if (buf[2] != 0)
8931 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8932
f3c87cdd
YG
8933 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8934 bnx2x_nic_load(bp, LOAD_NORMAL);
8935 /* wait until link state is restored */
8936 bnx2x_wait_for_link(bp, link_up);
8937 }
8938 if (bnx2x_test_nvram(bp) != 0) {
8939 buf[3] = 1;
a2fbb9ea
ET
8940 etest->flags |= ETH_TEST_FL_FAILED;
8941 }
f3c87cdd
YG
8942 if (bnx2x_test_intr(bp) != 0) {
8943 buf[4] = 1;
8944 etest->flags |= ETH_TEST_FL_FAILED;
8945 }
8946 if (bp->port.pmf)
8947 if (bnx2x_link_test(bp) != 0) {
8948 buf[5] = 1;
8949 etest->flags |= ETH_TEST_FL_FAILED;
8950 }
8951 buf[7] = bnx2x_mc_assert(bp);
8952 if (buf[7] != 0)
8953 etest->flags |= ETH_TEST_FL_FAILED;
8954
8955#ifdef BNX2X_EXTRA_DEBUG
8956 bnx2x_panic_dump(bp);
8957#endif
a2fbb9ea
ET
8958}
8959
bb2a0f7a
YG
8960static const struct {
8961 long offset;
8962 int size;
8963 u32 flags;
66e855f3
YG
8964#define STATS_FLAGS_PORT 1
8965#define STATS_FLAGS_FUNC 2
8966 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8967} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8968/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8969 8, STATS_FLAGS_FUNC, "rx_bytes" },
8970 { STATS_OFFSET32(error_bytes_received_hi),
8971 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8972 { STATS_OFFSET32(total_bytes_transmitted_hi),
8973 8, STATS_FLAGS_FUNC, "tx_bytes" },
8974 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8975 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8976 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8977 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8978 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8979 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8980 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8981 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8982 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8983 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8984 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8985 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8986/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8987 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8988 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8989 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8990 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8991 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8992 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8993 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8994 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8995 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8996 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8997 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8998 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 8999 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9000 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9001 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9002 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9003 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9004 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9005 8, STATS_FLAGS_PORT, "rx_fragments" },
9006/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9007 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9008 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9009 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9010 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9011 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9012 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9013 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9014 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9015 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9016 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9017 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9018 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9019 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9020 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9021 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9022 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9023 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9024 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9025 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9026/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9027 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9028 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9029 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9030 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9031 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9032 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9033 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9034 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9035 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9036 { STATS_OFFSET32(mac_filter_discard),
9037 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9038 { STATS_OFFSET32(no_buff_discard),
9039 4, STATS_FLAGS_FUNC, "rx_discards" },
9040 { STATS_OFFSET32(xxoverflow_discard),
9041 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9042 { STATS_OFFSET32(brb_drop_hi),
9043 8, STATS_FLAGS_PORT, "brb_discard" },
9044 { STATS_OFFSET32(brb_truncate_hi),
9045 8, STATS_FLAGS_PORT, "brb_truncate" },
9046/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9047 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9048 { STATS_OFFSET32(rx_skb_alloc_failed),
9049 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9050/* 42 */{ STATS_OFFSET32(hw_csum_err),
9051 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9052};
9053
66e855f3
YG
9054#define IS_NOT_E1HMF_STAT(bp, i) \
9055 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9056
a2fbb9ea
ET
9057static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9058{
bb2a0f7a
YG
9059 struct bnx2x *bp = netdev_priv(dev);
9060 int i, j;
9061
a2fbb9ea
ET
9062 switch (stringset) {
9063 case ETH_SS_STATS:
bb2a0f7a 9064 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9065 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9066 continue;
9067 strcpy(buf + j*ETH_GSTRING_LEN,
9068 bnx2x_stats_arr[i].string);
9069 j++;
9070 }
a2fbb9ea
ET
9071 break;
9072
9073 case ETH_SS_TEST:
9074 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9075 break;
9076 }
9077}
9078
9079static int bnx2x_get_stats_count(struct net_device *dev)
9080{
bb2a0f7a
YG
9081 struct bnx2x *bp = netdev_priv(dev);
9082 int i, num_stats = 0;
9083
9084 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9085 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9086 continue;
9087 num_stats++;
9088 }
9089 return num_stats;
a2fbb9ea
ET
9090}
9091
9092static void bnx2x_get_ethtool_stats(struct net_device *dev,
9093 struct ethtool_stats *stats, u64 *buf)
9094{
9095 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9096 u32 *hw_stats = (u32 *)&bp->eth_stats;
9097 int i, j;
a2fbb9ea 9098
bb2a0f7a 9099 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9100 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9101 continue;
bb2a0f7a
YG
9102
9103 if (bnx2x_stats_arr[i].size == 0) {
9104 /* skip this counter */
9105 buf[j] = 0;
9106 j++;
a2fbb9ea
ET
9107 continue;
9108 }
bb2a0f7a 9109 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9110 /* 4-byte counter */
bb2a0f7a
YG
9111 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9112 j++;
a2fbb9ea
ET
9113 continue;
9114 }
9115 /* 8-byte counter */
bb2a0f7a
YG
9116 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9117 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9118 j++;
a2fbb9ea
ET
9119 }
9120}
9121
9122static int bnx2x_phys_id(struct net_device *dev, u32 data)
9123{
9124 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9125 int port = BP_PORT(bp);
a2fbb9ea
ET
9126 int i;
9127
34f80b04
EG
9128 if (!netif_running(dev))
9129 return 0;
9130
9131 if (!bp->port.pmf)
9132 return 0;
9133
a2fbb9ea
ET
9134 if (data == 0)
9135 data = 2;
9136
9137 for (i = 0; i < (data * 2); i++) {
c18487ee 9138 if ((i % 2) == 0)
34f80b04 9139 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9140 bp->link_params.hw_led_mode,
9141 bp->link_params.chip_id);
9142 else
34f80b04 9143 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9144 bp->link_params.hw_led_mode,
9145 bp->link_params.chip_id);
9146
a2fbb9ea
ET
9147 msleep_interruptible(500);
9148 if (signal_pending(current))
9149 break;
9150 }
9151
c18487ee 9152 if (bp->link_vars.link_up)
34f80b04 9153 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9154 bp->link_vars.line_speed,
9155 bp->link_params.hw_led_mode,
9156 bp->link_params.chip_id);
a2fbb9ea
ET
9157
9158 return 0;
9159}
9160
9161static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9162 .get_settings = bnx2x_get_settings,
9163 .set_settings = bnx2x_set_settings,
9164 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9165 .get_wol = bnx2x_get_wol,
9166 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9167 .get_msglevel = bnx2x_get_msglevel,
9168 .set_msglevel = bnx2x_set_msglevel,
9169 .nway_reset = bnx2x_nway_reset,
9170 .get_link = ethtool_op_get_link,
9171 .get_eeprom_len = bnx2x_get_eeprom_len,
9172 .get_eeprom = bnx2x_get_eeprom,
9173 .set_eeprom = bnx2x_set_eeprom,
9174 .get_coalesce = bnx2x_get_coalesce,
9175 .set_coalesce = bnx2x_set_coalesce,
9176 .get_ringparam = bnx2x_get_ringparam,
9177 .set_ringparam = bnx2x_set_ringparam,
9178 .get_pauseparam = bnx2x_get_pauseparam,
9179 .set_pauseparam = bnx2x_set_pauseparam,
9180 .get_rx_csum = bnx2x_get_rx_csum,
9181 .set_rx_csum = bnx2x_set_rx_csum,
9182 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9183 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9184 .set_flags = bnx2x_set_flags,
9185 .get_flags = ethtool_op_get_flags,
9186 .get_sg = ethtool_op_get_sg,
9187 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9188 .get_tso = ethtool_op_get_tso,
9189 .set_tso = bnx2x_set_tso,
9190 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9191 .self_test = bnx2x_self_test,
9192 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9193 .phys_id = bnx2x_phys_id,
9194 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9195 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9196};
9197
9198/* end of ethtool_ops */
9199
9200/****************************************************************************
9201* General service functions
9202****************************************************************************/
9203
9204static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9205{
9206 u16 pmcsr;
9207
9208 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9209
9210 switch (state) {
9211 case PCI_D0:
34f80b04 9212 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9213 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9214 PCI_PM_CTRL_PME_STATUS));
9215
9216 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9217 /* delay required during transition out of D3hot */
a2fbb9ea 9218 msleep(20);
34f80b04 9219 break;
a2fbb9ea 9220
34f80b04
EG
9221 case PCI_D3hot:
9222 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9223 pmcsr |= 3;
a2fbb9ea 9224
34f80b04
EG
9225 if (bp->wol)
9226 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9227
34f80b04
EG
9228 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9229 pmcsr);
a2fbb9ea 9230
34f80b04
EG
9231 /* No more memory access after this point until
9232 * device is brought back to D0.
9233 */
9234 break;
9235
9236 default:
9237 return -EINVAL;
9238 }
9239 return 0;
a2fbb9ea
ET
9240}
9241
34f80b04
EG
9242/*
9243 * net_device service functions
9244 */
9245
a2fbb9ea
ET
9246static int bnx2x_poll(struct napi_struct *napi, int budget)
9247{
9248 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9249 napi);
9250 struct bnx2x *bp = fp->bp;
9251 int work_done = 0;
2772f903 9252 u16 rx_cons_sb;
a2fbb9ea
ET
9253
9254#ifdef BNX2X_STOP_ON_ERROR
9255 if (unlikely(bp->panic))
34f80b04 9256 goto poll_panic;
a2fbb9ea
ET
9257#endif
9258
9259 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9260 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9261 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9262
9263 bnx2x_update_fpsb_idx(fp);
9264
da5a662a 9265 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9266 bnx2x_tx_int(fp, budget);
9267
2772f903
EG
9268 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9269 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9270 rx_cons_sb++;
da5a662a 9271 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9272 work_done = bnx2x_rx_int(fp, budget);
9273
da5a662a 9274 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9275 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9276 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9277 rx_cons_sb++;
a2fbb9ea
ET
9278
9279 /* must not complete if we consumed full budget */
da5a662a 9280 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9281
9282#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9283poll_panic:
a2fbb9ea
ET
9284#endif
9285 netif_rx_complete(bp->dev, napi);
9286
34f80b04 9287 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9288 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9289 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9290 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9291 }
a2fbb9ea
ET
9292 return work_done;
9293}
9294
755735eb
EG
9295
9296/* we split the first BD into headers and data BDs
33471629 9297 * to ease the pain of our fellow microcode engineers
755735eb
EG
9298 * we use one mapping for both BDs
9299 * So far this has only been observed to happen
9300 * in Other Operating Systems(TM)
9301 */
9302static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9303 struct bnx2x_fastpath *fp,
9304 struct eth_tx_bd **tx_bd, u16 hlen,
9305 u16 bd_prod, int nbd)
9306{
9307 struct eth_tx_bd *h_tx_bd = *tx_bd;
9308 struct eth_tx_bd *d_tx_bd;
9309 dma_addr_t mapping;
9310 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9311
9312 /* first fix first BD */
9313 h_tx_bd->nbd = cpu_to_le16(nbd);
9314 h_tx_bd->nbytes = cpu_to_le16(hlen);
9315
9316 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9317 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9318 h_tx_bd->addr_lo, h_tx_bd->nbd);
9319
9320 /* now get a new data BD
9321 * (after the pbd) and fill it */
9322 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9323 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9324
9325 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9326 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9327
9328 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9329 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9330 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9331 d_tx_bd->vlan = 0;
9332 /* this marks the BD as one that has no individual mapping
9333 * the FW ignores this flag in a BD not marked start
9334 */
9335 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9336 DP(NETIF_MSG_TX_QUEUED,
9337 "TSO split data size is %d (%x:%x)\n",
9338 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9339
9340 /* update tx_bd for marking the last BD flag */
9341 *tx_bd = d_tx_bd;
9342
9343 return bd_prod;
9344}
9345
9346static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9347{
9348 if (fix > 0)
9349 csum = (u16) ~csum_fold(csum_sub(csum,
9350 csum_partial(t_header - fix, fix, 0)));
9351
9352 else if (fix < 0)
9353 csum = (u16) ~csum_fold(csum_add(csum,
9354 csum_partial(t_header, -fix, 0)));
9355
9356 return swab16(csum);
9357}
9358
9359static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9360{
9361 u32 rc;
9362
9363 if (skb->ip_summed != CHECKSUM_PARTIAL)
9364 rc = XMIT_PLAIN;
9365
9366 else {
9367 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9368 rc = XMIT_CSUM_V6;
9369 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9370 rc |= XMIT_CSUM_TCP;
9371
9372 } else {
9373 rc = XMIT_CSUM_V4;
9374 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9375 rc |= XMIT_CSUM_TCP;
9376 }
9377 }
9378
9379 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9380 rc |= XMIT_GSO_V4;
9381
9382 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9383 rc |= XMIT_GSO_V6;
9384
9385 return rc;
9386}
9387
9388/* check if packet requires linearization (packet is too fragmented) */
9389static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9390 u32 xmit_type)
9391{
9392 int to_copy = 0;
9393 int hlen = 0;
9394 int first_bd_sz = 0;
9395
9396 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9397 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9398
9399 if (xmit_type & XMIT_GSO) {
9400 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9401 /* Check if LSO packet needs to be copied:
9402 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9403 int wnd_size = MAX_FETCH_BD - 3;
33471629 9404 /* Number of windows to check */
755735eb
EG
9405 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9406 int wnd_idx = 0;
9407 int frag_idx = 0;
9408 u32 wnd_sum = 0;
9409
9410 /* Headers length */
9411 hlen = (int)(skb_transport_header(skb) - skb->data) +
9412 tcp_hdrlen(skb);
9413
9414 /* Amount of data (w/o headers) on linear part of SKB*/
9415 first_bd_sz = skb_headlen(skb) - hlen;
9416
9417 wnd_sum = first_bd_sz;
9418
9419 /* Calculate the first sum - it's special */
9420 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9421 wnd_sum +=
9422 skb_shinfo(skb)->frags[frag_idx].size;
9423
9424 /* If there was data on linear skb data - check it */
9425 if (first_bd_sz > 0) {
9426 if (unlikely(wnd_sum < lso_mss)) {
9427 to_copy = 1;
9428 goto exit_lbl;
9429 }
9430
9431 wnd_sum -= first_bd_sz;
9432 }
9433
9434 /* Others are easier: run through the frag list and
9435 check all windows */
9436 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9437 wnd_sum +=
9438 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9439
9440 if (unlikely(wnd_sum < lso_mss)) {
9441 to_copy = 1;
9442 break;
9443 }
9444 wnd_sum -=
9445 skb_shinfo(skb)->frags[wnd_idx].size;
9446 }
9447
9448 } else {
9449 /* in non-LSO too fragmented packet should always
9450 be linearized */
9451 to_copy = 1;
9452 }
9453 }
9454
9455exit_lbl:
9456 if (unlikely(to_copy))
9457 DP(NETIF_MSG_TX_QUEUED,
9458 "Linearization IS REQUIRED for %s packet. "
9459 "num_frags %d hlen %d first_bd_sz %d\n",
9460 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9461 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9462
9463 return to_copy;
9464}
9465
9466/* called with netif_tx_lock
a2fbb9ea 9467 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9468 * netif_wake_queue()
a2fbb9ea
ET
9469 */
9470static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9471{
9472 struct bnx2x *bp = netdev_priv(dev);
9473 struct bnx2x_fastpath *fp;
9474 struct sw_tx_bd *tx_buf;
9475 struct eth_tx_bd *tx_bd;
9476 struct eth_tx_parse_bd *pbd = NULL;
9477 u16 pkt_prod, bd_prod;
755735eb 9478 int nbd, fp_index;
a2fbb9ea 9479 dma_addr_t mapping;
755735eb
EG
9480 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9481 int vlan_off = (bp->e1hov ? 4 : 0);
9482 int i;
9483 u8 hlen = 0;
a2fbb9ea
ET
9484
9485#ifdef BNX2X_STOP_ON_ERROR
9486 if (unlikely(bp->panic))
9487 return NETDEV_TX_BUSY;
9488#endif
9489
755735eb 9490 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9491 fp = &bp->fp[fp_index];
755735eb 9492
231fd58a 9493 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9494 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9495 netif_stop_queue(dev);
9496 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9497 return NETDEV_TX_BUSY;
9498 }
9499
755735eb
EG
9500 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9501 " gso type %x xmit_type %x\n",
9502 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9503 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9504
33471629 9505 /* First, check if we need to linearize the skb
755735eb
EG
9506 (due to FW restrictions) */
9507 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9508 /* Statistics of linearization */
9509 bp->lin_cnt++;
9510 if (skb_linearize(skb) != 0) {
9511 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9512 "silently dropping this SKB\n");
9513 dev_kfree_skb_any(skb);
da5a662a 9514 return NETDEV_TX_OK;
755735eb
EG
9515 }
9516 }
9517
a2fbb9ea 9518 /*
755735eb 9519 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9520 then for TSO or xsum we have a parsing info BD,
755735eb 9521 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9522 (don't forget to mark the last one as last,
9523 and to unmap only AFTER you write to the BD ...)
755735eb 9524 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9525 */
9526
9527 pkt_prod = fp->tx_pkt_prod++;
755735eb 9528 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9529
755735eb 9530 /* get a tx_buf and first BD */
a2fbb9ea
ET
9531 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9532 tx_bd = &fp->tx_desc_ring[bd_prod];
9533
9534 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9535 tx_bd->general_data = (UNICAST_ADDRESS <<
9536 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9537 /* header nbd */
9538 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9539
755735eb
EG
9540 /* remember the first BD of the packet */
9541 tx_buf->first_bd = fp->tx_bd_prod;
9542 tx_buf->skb = skb;
a2fbb9ea
ET
9543
9544 DP(NETIF_MSG_TX_QUEUED,
9545 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9546 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9547
755735eb
EG
9548 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9549 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9550 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9551 vlan_off += 4;
9552 } else
9553 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9554
755735eb 9555 if (xmit_type) {
755735eb 9556 /* turn on parsing and get a BD */
a2fbb9ea
ET
9557 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9558 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9559
9560 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9561 }
9562
9563 if (xmit_type & XMIT_CSUM) {
9564 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9565
9566 /* for now NS flag is not used in Linux */
755735eb 9567 pbd->global_data = (hlen |
96fc1784 9568 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9569 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9570
755735eb
EG
9571 pbd->ip_hlen = (skb_transport_header(skb) -
9572 skb_network_header(skb)) / 2;
9573
9574 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9575
755735eb
EG
9576 pbd->total_hlen = cpu_to_le16(hlen);
9577 hlen = hlen*2 - vlan_off;
a2fbb9ea 9578
755735eb
EG
9579 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9580
9581 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9582 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9583 ETH_TX_BD_FLAGS_IP_CSUM;
9584 else
9585 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9586
9587 if (xmit_type & XMIT_CSUM_TCP) {
9588 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9589
9590 } else {
9591 s8 fix = SKB_CS_OFF(skb); /* signed! */
9592
a2fbb9ea 9593 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9594 pbd->cs_offset = fix / 2;
a2fbb9ea 9595
755735eb
EG
9596 DP(NETIF_MSG_TX_QUEUED,
9597 "hlen %d offset %d fix %d csum before fix %x\n",
9598 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9599 SKB_CS(skb));
9600
9601 /* HW bug: fixup the CSUM */
9602 pbd->tcp_pseudo_csum =
9603 bnx2x_csum_fix(skb_transport_header(skb),
9604 SKB_CS(skb), fix);
9605
9606 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9607 pbd->tcp_pseudo_csum);
9608 }
a2fbb9ea
ET
9609 }
9610
9611 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9612 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9613
9614 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9615 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9616 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9617 tx_bd->nbd = cpu_to_le16(nbd);
9618 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9619
9620 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9621 " nbytes %d flags %x vlan %x\n",
9622 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9623 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9624 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9625
755735eb 9626 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9627
9628 DP(NETIF_MSG_TX_QUEUED,
9629 "TSO packet len %d hlen %d total len %d tso size %d\n",
9630 skb->len, hlen, skb_headlen(skb),
9631 skb_shinfo(skb)->gso_size);
9632
9633 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9634
755735eb
EG
9635 if (unlikely(skb_headlen(skb) > hlen))
9636 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9637 bd_prod, ++nbd);
a2fbb9ea
ET
9638
9639 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9640 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9641 pbd->tcp_flags = pbd_tcp_flags(skb);
9642
9643 if (xmit_type & XMIT_GSO_V4) {
9644 pbd->ip_id = swab16(ip_hdr(skb)->id);
9645 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9646 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9647 ip_hdr(skb)->daddr,
9648 0, IPPROTO_TCP, 0));
755735eb
EG
9649
9650 } else
9651 pbd->tcp_pseudo_csum =
9652 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9653 &ipv6_hdr(skb)->daddr,
9654 0, IPPROTO_TCP, 0));
9655
a2fbb9ea
ET
9656 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9657 }
9658
755735eb
EG
9659 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9660 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9661
755735eb
EG
9662 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9663 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9664
755735eb
EG
9665 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9666 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9667
755735eb
EG
9668 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9669 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9670 tx_bd->nbytes = cpu_to_le16(frag->size);
9671 tx_bd->vlan = cpu_to_le16(pkt_prod);
9672 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9673
755735eb
EG
9674 DP(NETIF_MSG_TX_QUEUED,
9675 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9676 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9677 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9678 }
9679
755735eb 9680 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9681 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9682
9683 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9684 tx_bd, tx_bd->bd_flags.as_bitfield);
9685
a2fbb9ea
ET
9686 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9687
755735eb 9688 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9689 * if the packet contains or ends with it
9690 */
9691 if (TX_BD_POFF(bd_prod) < nbd)
9692 nbd++;
9693
9694 if (pbd)
9695 DP(NETIF_MSG_TX_QUEUED,
9696 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9697 " tcp_flags %x xsum %x seq %u hlen %u\n",
9698 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9699 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9700 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9701
755735eb 9702 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9703
96fc1784
ET
9704 fp->hw_tx_prods->bds_prod =
9705 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9706 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9707 fp->hw_tx_prods->packets_prod =
9708 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9709 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9710
9711 mmiowb();
9712
755735eb 9713 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9714 dev->trans_start = jiffies;
9715
9716 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9717 netif_stop_queue(dev);
bb2a0f7a 9718 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9719 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9720 netif_wake_queue(dev);
9721 }
9722 fp->tx_pkt++;
9723
9724 return NETDEV_TX_OK;
9725}
9726
bb2a0f7a 9727/* called with rtnl_lock */
a2fbb9ea
ET
9728static int bnx2x_open(struct net_device *dev)
9729{
9730 struct bnx2x *bp = netdev_priv(dev);
9731
9732 bnx2x_set_power_state(bp, PCI_D0);
9733
bb2a0f7a 9734 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9735}
9736
bb2a0f7a 9737/* called with rtnl_lock */
a2fbb9ea
ET
9738static int bnx2x_close(struct net_device *dev)
9739{
a2fbb9ea
ET
9740 struct bnx2x *bp = netdev_priv(dev);
9741
9742 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9743 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9744 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9745 if (!CHIP_REV_IS_SLOW(bp))
9746 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9747
9748 return 0;
9749}
9750
34f80b04
EG
9751/* called with netif_tx_lock from set_multicast */
9752static void bnx2x_set_rx_mode(struct net_device *dev)
9753{
9754 struct bnx2x *bp = netdev_priv(dev);
9755 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9756 int port = BP_PORT(bp);
9757
9758 if (bp->state != BNX2X_STATE_OPEN) {
9759 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9760 return;
9761 }
9762
9763 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9764
9765 if (dev->flags & IFF_PROMISC)
9766 rx_mode = BNX2X_RX_MODE_PROMISC;
9767
9768 else if ((dev->flags & IFF_ALLMULTI) ||
9769 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9770 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9771
9772 else { /* some multicasts */
9773 if (CHIP_IS_E1(bp)) {
9774 int i, old, offset;
9775 struct dev_mc_list *mclist;
9776 struct mac_configuration_cmd *config =
9777 bnx2x_sp(bp, mcast_config);
9778
9779 for (i = 0, mclist = dev->mc_list;
9780 mclist && (i < dev->mc_count);
9781 i++, mclist = mclist->next) {
9782
9783 config->config_table[i].
9784 cam_entry.msb_mac_addr =
9785 swab16(*(u16 *)&mclist->dmi_addr[0]);
9786 config->config_table[i].
9787 cam_entry.middle_mac_addr =
9788 swab16(*(u16 *)&mclist->dmi_addr[2]);
9789 config->config_table[i].
9790 cam_entry.lsb_mac_addr =
9791 swab16(*(u16 *)&mclist->dmi_addr[4]);
9792 config->config_table[i].cam_entry.flags =
9793 cpu_to_le16(port);
9794 config->config_table[i].
9795 target_table_entry.flags = 0;
9796 config->config_table[i].
9797 target_table_entry.client_id = 0;
9798 config->config_table[i].
9799 target_table_entry.vlan_id = 0;
9800
9801 DP(NETIF_MSG_IFUP,
9802 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9803 config->config_table[i].
9804 cam_entry.msb_mac_addr,
9805 config->config_table[i].
9806 cam_entry.middle_mac_addr,
9807 config->config_table[i].
9808 cam_entry.lsb_mac_addr);
9809 }
9810 old = config->hdr.length_6b;
9811 if (old > i) {
9812 for (; i < old; i++) {
9813 if (CAM_IS_INVALID(config->
9814 config_table[i])) {
9815 i--; /* already invalidated */
9816 break;
9817 }
9818 /* invalidate */
9819 CAM_INVALIDATE(config->
9820 config_table[i]);
9821 }
9822 }
9823
9824 if (CHIP_REV_IS_SLOW(bp))
9825 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9826 else
9827 offset = BNX2X_MAX_MULTICAST*(1 + port);
9828
9829 config->hdr.length_6b = i;
9830 config->hdr.offset = offset;
9831 config->hdr.client_id = BP_CL_ID(bp);
9832 config->hdr.reserved1 = 0;
9833
9834 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9835 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9836 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9837 0);
9838 } else { /* E1H */
9839 /* Accept one or more multicasts */
9840 struct dev_mc_list *mclist;
9841 u32 mc_filter[MC_HASH_SIZE];
9842 u32 crc, bit, regidx;
9843 int i;
9844
9845 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9846
9847 for (i = 0, mclist = dev->mc_list;
9848 mclist && (i < dev->mc_count);
9849 i++, mclist = mclist->next) {
9850
7c510e4b
JB
9851 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9852 mclist->dmi_addr);
34f80b04
EG
9853
9854 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9855 bit = (crc >> 24) & 0xff;
9856 regidx = bit >> 5;
9857 bit &= 0x1f;
9858 mc_filter[regidx] |= (1 << bit);
9859 }
9860
9861 for (i = 0; i < MC_HASH_SIZE; i++)
9862 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9863 mc_filter[i]);
9864 }
9865 }
9866
9867 bp->rx_mode = rx_mode;
9868 bnx2x_set_storm_rx_mode(bp);
9869}
9870
9871/* called with rtnl_lock */
a2fbb9ea
ET
9872static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9873{
9874 struct sockaddr *addr = p;
9875 struct bnx2x *bp = netdev_priv(dev);
9876
34f80b04 9877 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9878 return -EINVAL;
9879
9880 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9881 if (netif_running(dev)) {
9882 if (CHIP_IS_E1(bp))
3101c2bc 9883 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9884 else
3101c2bc 9885 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9886 }
a2fbb9ea
ET
9887
9888 return 0;
9889}
9890
c18487ee 9891/* called with rtnl_lock */
a2fbb9ea
ET
9892static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9893{
9894 struct mii_ioctl_data *data = if_mii(ifr);
9895 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9896 int port = BP_PORT(bp);
a2fbb9ea
ET
9897 int err;
9898
9899 switch (cmd) {
9900 case SIOCGMIIPHY:
34f80b04 9901 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9902
c14423fe 9903 /* fallthrough */
c18487ee 9904
a2fbb9ea 9905 case SIOCGMIIREG: {
c18487ee 9906 u16 mii_regval;
a2fbb9ea 9907
c18487ee
YR
9908 if (!netif_running(dev))
9909 return -EAGAIN;
a2fbb9ea 9910
34f80b04 9911 mutex_lock(&bp->port.phy_mutex);
3196a88a 9912 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9913 DEFAULT_PHY_DEV_ADDR,
9914 (data->reg_num & 0x1f), &mii_regval);
9915 data->val_out = mii_regval;
34f80b04 9916 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9917 return err;
9918 }
9919
9920 case SIOCSMIIREG:
9921 if (!capable(CAP_NET_ADMIN))
9922 return -EPERM;
9923
c18487ee
YR
9924 if (!netif_running(dev))
9925 return -EAGAIN;
9926
34f80b04 9927 mutex_lock(&bp->port.phy_mutex);
3196a88a 9928 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9929 DEFAULT_PHY_DEV_ADDR,
9930 (data->reg_num & 0x1f), data->val_in);
34f80b04 9931 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9932 return err;
9933
9934 default:
9935 /* do nothing */
9936 break;
9937 }
9938
9939 return -EOPNOTSUPP;
9940}
9941
34f80b04 9942/* called with rtnl_lock */
a2fbb9ea
ET
9943static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9944{
9945 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9946 int rc = 0;
a2fbb9ea
ET
9947
9948 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9949 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9950 return -EINVAL;
9951
9952 /* This does not race with packet allocation
c14423fe 9953 * because the actual alloc size is
a2fbb9ea
ET
9954 * only updated as part of load
9955 */
9956 dev->mtu = new_mtu;
9957
9958 if (netif_running(dev)) {
34f80b04
EG
9959 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9960 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9961 }
34f80b04
EG
9962
9963 return rc;
a2fbb9ea
ET
9964}
9965
9966static void bnx2x_tx_timeout(struct net_device *dev)
9967{
9968 struct bnx2x *bp = netdev_priv(dev);
9969
9970#ifdef BNX2X_STOP_ON_ERROR
9971 if (!bp->panic)
9972 bnx2x_panic();
9973#endif
9974 /* This allows the netif to be shutdown gracefully before resetting */
9975 schedule_work(&bp->reset_task);
9976}
9977
9978#ifdef BCM_VLAN
34f80b04 9979/* called with rtnl_lock */
a2fbb9ea
ET
9980static void bnx2x_vlan_rx_register(struct net_device *dev,
9981 struct vlan_group *vlgrp)
9982{
9983 struct bnx2x *bp = netdev_priv(dev);
9984
9985 bp->vlgrp = vlgrp;
9986 if (netif_running(dev))
49d66772 9987 bnx2x_set_client_config(bp);
a2fbb9ea 9988}
34f80b04 9989
a2fbb9ea
ET
9990#endif
9991
9992#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9993static void poll_bnx2x(struct net_device *dev)
9994{
9995 struct bnx2x *bp = netdev_priv(dev);
9996
9997 disable_irq(bp->pdev->irq);
9998 bnx2x_interrupt(bp->pdev->irq, dev);
9999 enable_irq(bp->pdev->irq);
10000}
10001#endif
10002
34f80b04
EG
10003static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10004 struct net_device *dev)
a2fbb9ea
ET
10005{
10006 struct bnx2x *bp;
10007 int rc;
10008
10009 SET_NETDEV_DEV(dev, &pdev->dev);
10010 bp = netdev_priv(dev);
10011
34f80b04
EG
10012 bp->dev = dev;
10013 bp->pdev = pdev;
a2fbb9ea 10014 bp->flags = 0;
34f80b04 10015 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10016
10017 rc = pci_enable_device(pdev);
10018 if (rc) {
10019 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10020 goto err_out;
10021 }
10022
10023 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10024 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10025 " aborting\n");
10026 rc = -ENODEV;
10027 goto err_out_disable;
10028 }
10029
10030 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10031 printk(KERN_ERR PFX "Cannot find second PCI device"
10032 " base address, aborting\n");
10033 rc = -ENODEV;
10034 goto err_out_disable;
10035 }
10036
34f80b04
EG
10037 if (atomic_read(&pdev->enable_cnt) == 1) {
10038 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10039 if (rc) {
10040 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10041 " aborting\n");
10042 goto err_out_disable;
10043 }
a2fbb9ea 10044
34f80b04
EG
10045 pci_set_master(pdev);
10046 pci_save_state(pdev);
10047 }
a2fbb9ea
ET
10048
10049 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10050 if (bp->pm_cap == 0) {
10051 printk(KERN_ERR PFX "Cannot find power management"
10052 " capability, aborting\n");
10053 rc = -EIO;
10054 goto err_out_release;
10055 }
10056
10057 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10058 if (bp->pcie_cap == 0) {
10059 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10060 " aborting\n");
10061 rc = -EIO;
10062 goto err_out_release;
10063 }
10064
10065 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10066 bp->flags |= USING_DAC_FLAG;
10067 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10068 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10069 " failed, aborting\n");
10070 rc = -EIO;
10071 goto err_out_release;
10072 }
10073
10074 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10075 printk(KERN_ERR PFX "System does not support DMA,"
10076 " aborting\n");
10077 rc = -EIO;
10078 goto err_out_release;
10079 }
10080
34f80b04
EG
10081 dev->mem_start = pci_resource_start(pdev, 0);
10082 dev->base_addr = dev->mem_start;
10083 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10084
10085 dev->irq = pdev->irq;
10086
275f165f 10087 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10088 if (!bp->regview) {
10089 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10090 rc = -ENOMEM;
10091 goto err_out_release;
10092 }
10093
34f80b04
EG
10094 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10095 min_t(u64, BNX2X_DB_SIZE,
10096 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10097 if (!bp->doorbells) {
10098 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10099 rc = -ENOMEM;
10100 goto err_out_unmap;
10101 }
10102
10103 bnx2x_set_power_state(bp, PCI_D0);
10104
34f80b04
EG
10105 /* clean indirect addresses */
10106 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10107 PCICFG_VENDOR_ID_OFFSET);
10108 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10109 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10110 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10111 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10112
34f80b04
EG
10113 dev->hard_start_xmit = bnx2x_start_xmit;
10114 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10115
34f80b04
EG
10116 dev->ethtool_ops = &bnx2x_ethtool_ops;
10117 dev->open = bnx2x_open;
10118 dev->stop = bnx2x_close;
10119 dev->set_multicast_list = bnx2x_set_rx_mode;
10120 dev->set_mac_address = bnx2x_change_mac_addr;
10121 dev->do_ioctl = bnx2x_ioctl;
10122 dev->change_mtu = bnx2x_change_mtu;
10123 dev->tx_timeout = bnx2x_tx_timeout;
10124#ifdef BCM_VLAN
10125 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10126#endif
10127#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10128 dev->poll_controller = poll_bnx2x;
10129#endif
10130 dev->features |= NETIF_F_SG;
10131 dev->features |= NETIF_F_HW_CSUM;
10132 if (bp->flags & USING_DAC_FLAG)
10133 dev->features |= NETIF_F_HIGHDMA;
10134#ifdef BCM_VLAN
10135 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10136#endif
10137 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10138 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10139
10140 return 0;
10141
10142err_out_unmap:
10143 if (bp->regview) {
10144 iounmap(bp->regview);
10145 bp->regview = NULL;
10146 }
a2fbb9ea
ET
10147 if (bp->doorbells) {
10148 iounmap(bp->doorbells);
10149 bp->doorbells = NULL;
10150 }
10151
10152err_out_release:
34f80b04
EG
10153 if (atomic_read(&pdev->enable_cnt) == 1)
10154 pci_release_regions(pdev);
a2fbb9ea
ET
10155
10156err_out_disable:
10157 pci_disable_device(pdev);
10158 pci_set_drvdata(pdev, NULL);
10159
10160err_out:
10161 return rc;
10162}
10163
25047950
ET
10164static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10165{
10166 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10167
10168 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10169 return val;
10170}
10171
10172/* return value of 1=2.5GHz 2=5GHz */
10173static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10174{
10175 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10176
10177 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10178 return val;
10179}
10180
a2fbb9ea
ET
10181static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10182 const struct pci_device_id *ent)
10183{
10184 static int version_printed;
10185 struct net_device *dev = NULL;
10186 struct bnx2x *bp;
25047950 10187 int rc;
a2fbb9ea
ET
10188
10189 if (version_printed++ == 0)
10190 printk(KERN_INFO "%s", version);
10191
10192 /* dev zeroed in init_etherdev */
10193 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10194 if (!dev) {
10195 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10196 return -ENOMEM;
34f80b04 10197 }
a2fbb9ea
ET
10198
10199 netif_carrier_off(dev);
10200
10201 bp = netdev_priv(dev);
10202 bp->msglevel = debug;
10203
34f80b04 10204 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10205 if (rc < 0) {
10206 free_netdev(dev);
10207 return rc;
10208 }
10209
a2fbb9ea
ET
10210 rc = register_netdev(dev);
10211 if (rc) {
c14423fe 10212 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10213 goto init_one_exit;
a2fbb9ea
ET
10214 }
10215
10216 pci_set_drvdata(pdev, dev);
10217
34f80b04
EG
10218 rc = bnx2x_init_bp(bp);
10219 if (rc) {
10220 unregister_netdev(dev);
10221 goto init_one_exit;
10222 }
10223
10224 bp->common.name = board_info[ent->driver_data].name;
25047950 10225 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10226 " IRQ %d, ", dev->name, bp->common.name,
10227 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10228 bnx2x_get_pcie_width(bp),
10229 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10230 dev->base_addr, bp->pdev->irq);
e174961c 10231 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10232 return 0;
34f80b04
EG
10233
10234init_one_exit:
10235 if (bp->regview)
10236 iounmap(bp->regview);
10237
10238 if (bp->doorbells)
10239 iounmap(bp->doorbells);
10240
10241 free_netdev(dev);
10242
10243 if (atomic_read(&pdev->enable_cnt) == 1)
10244 pci_release_regions(pdev);
10245
10246 pci_disable_device(pdev);
10247 pci_set_drvdata(pdev, NULL);
10248
10249 return rc;
a2fbb9ea
ET
10250}
10251
10252static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10253{
10254 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10255 struct bnx2x *bp;
10256
10257 if (!dev) {
228241eb
ET
10258 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10259 return;
10260 }
228241eb 10261 bp = netdev_priv(dev);
a2fbb9ea 10262
a2fbb9ea
ET
10263 unregister_netdev(dev);
10264
10265 if (bp->regview)
10266 iounmap(bp->regview);
10267
10268 if (bp->doorbells)
10269 iounmap(bp->doorbells);
10270
10271 free_netdev(dev);
34f80b04
EG
10272
10273 if (atomic_read(&pdev->enable_cnt) == 1)
10274 pci_release_regions(pdev);
10275
a2fbb9ea
ET
10276 pci_disable_device(pdev);
10277 pci_set_drvdata(pdev, NULL);
10278}
10279
10280static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10281{
10282 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10283 struct bnx2x *bp;
10284
34f80b04
EG
10285 if (!dev) {
10286 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10287 return -ENODEV;
10288 }
10289 bp = netdev_priv(dev);
a2fbb9ea 10290
34f80b04 10291 rtnl_lock();
a2fbb9ea 10292
34f80b04 10293 pci_save_state(pdev);
228241eb 10294
34f80b04
EG
10295 if (!netif_running(dev)) {
10296 rtnl_unlock();
10297 return 0;
10298 }
a2fbb9ea
ET
10299
10300 netif_device_detach(dev);
a2fbb9ea 10301
da5a662a 10302 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10303
a2fbb9ea 10304 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10305
34f80b04
EG
10306 rtnl_unlock();
10307
a2fbb9ea
ET
10308 return 0;
10309}
10310
10311static int bnx2x_resume(struct pci_dev *pdev)
10312{
10313 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10314 struct bnx2x *bp;
a2fbb9ea
ET
10315 int rc;
10316
228241eb
ET
10317 if (!dev) {
10318 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10319 return -ENODEV;
10320 }
228241eb 10321 bp = netdev_priv(dev);
a2fbb9ea 10322
34f80b04
EG
10323 rtnl_lock();
10324
228241eb 10325 pci_restore_state(pdev);
34f80b04
EG
10326
10327 if (!netif_running(dev)) {
10328 rtnl_unlock();
10329 return 0;
10330 }
10331
a2fbb9ea
ET
10332 bnx2x_set_power_state(bp, PCI_D0);
10333 netif_device_attach(dev);
10334
da5a662a 10335 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10336
34f80b04
EG
10337 rtnl_unlock();
10338
10339 return rc;
a2fbb9ea
ET
10340}
10341
f8ef6e44
YG
10342static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10343{
10344 int i;
10345
10346 bp->state = BNX2X_STATE_ERROR;
10347
10348 bp->rx_mode = BNX2X_RX_MODE_NONE;
10349
10350 bnx2x_netif_stop(bp, 0);
10351
10352 del_timer_sync(&bp->timer);
10353 bp->stats_state = STATS_STATE_DISABLED;
10354 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10355
10356 /* Release IRQs */
10357 bnx2x_free_irq(bp);
10358
10359 if (CHIP_IS_E1(bp)) {
10360 struct mac_configuration_cmd *config =
10361 bnx2x_sp(bp, mcast_config);
10362
10363 for (i = 0; i < config->hdr.length_6b; i++)
10364 CAM_INVALIDATE(config->config_table[i]);
10365 }
10366
10367 /* Free SKBs, SGEs, TPA pool and driver internals */
10368 bnx2x_free_skbs(bp);
10369 for_each_queue(bp, i)
10370 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10371 bnx2x_free_mem(bp);
10372
10373 bp->state = BNX2X_STATE_CLOSED;
10374
10375 netif_carrier_off(bp->dev);
10376
10377 return 0;
10378}
10379
10380static void bnx2x_eeh_recover(struct bnx2x *bp)
10381{
10382 u32 val;
10383
10384 mutex_init(&bp->port.phy_mutex);
10385
10386 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10387 bp->link_params.shmem_base = bp->common.shmem_base;
10388 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10389
10390 if (!bp->common.shmem_base ||
10391 (bp->common.shmem_base < 0xA0000) ||
10392 (bp->common.shmem_base >= 0xC0000)) {
10393 BNX2X_DEV_INFO("MCP not active\n");
10394 bp->flags |= NO_MCP_FLAG;
10395 return;
10396 }
10397
10398 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10399 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10400 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10401 BNX2X_ERR("BAD MCP validity signature\n");
10402
10403 if (!BP_NOMCP(bp)) {
10404 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10405 & DRV_MSG_SEQ_NUMBER_MASK);
10406 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10407 }
10408}
10409
493adb1f
WX
10410/**
10411 * bnx2x_io_error_detected - called when PCI error is detected
10412 * @pdev: Pointer to PCI device
10413 * @state: The current pci connection state
10414 *
10415 * This function is called after a PCI bus error affecting
10416 * this device has been detected.
10417 */
10418static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10419 pci_channel_state_t state)
10420{
10421 struct net_device *dev = pci_get_drvdata(pdev);
10422 struct bnx2x *bp = netdev_priv(dev);
10423
10424 rtnl_lock();
10425
10426 netif_device_detach(dev);
10427
10428 if (netif_running(dev))
f8ef6e44 10429 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10430
10431 pci_disable_device(pdev);
10432
10433 rtnl_unlock();
10434
10435 /* Request a slot reset */
10436 return PCI_ERS_RESULT_NEED_RESET;
10437}
10438
10439/**
10440 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10441 * @pdev: Pointer to PCI device
10442 *
10443 * Restart the card from scratch, as if from a cold-boot.
10444 */
10445static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10446{
10447 struct net_device *dev = pci_get_drvdata(pdev);
10448 struct bnx2x *bp = netdev_priv(dev);
10449
10450 rtnl_lock();
10451
10452 if (pci_enable_device(pdev)) {
10453 dev_err(&pdev->dev,
10454 "Cannot re-enable PCI device after reset\n");
10455 rtnl_unlock();
10456 return PCI_ERS_RESULT_DISCONNECT;
10457 }
10458
10459 pci_set_master(pdev);
10460 pci_restore_state(pdev);
10461
10462 if (netif_running(dev))
10463 bnx2x_set_power_state(bp, PCI_D0);
10464
10465 rtnl_unlock();
10466
10467 return PCI_ERS_RESULT_RECOVERED;
10468}
10469
10470/**
10471 * bnx2x_io_resume - called when traffic can start flowing again
10472 * @pdev: Pointer to PCI device
10473 *
10474 * This callback is called when the error recovery driver tells us that
10475 * its OK to resume normal operation.
10476 */
10477static void bnx2x_io_resume(struct pci_dev *pdev)
10478{
10479 struct net_device *dev = pci_get_drvdata(pdev);
10480 struct bnx2x *bp = netdev_priv(dev);
10481
10482 rtnl_lock();
10483
f8ef6e44
YG
10484 bnx2x_eeh_recover(bp);
10485
493adb1f 10486 if (netif_running(dev))
f8ef6e44 10487 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10488
10489 netif_device_attach(dev);
10490
10491 rtnl_unlock();
10492}
10493
10494static struct pci_error_handlers bnx2x_err_handler = {
10495 .error_detected = bnx2x_io_error_detected,
10496 .slot_reset = bnx2x_io_slot_reset,
10497 .resume = bnx2x_io_resume,
10498};
10499
a2fbb9ea 10500static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10501 .name = DRV_MODULE_NAME,
10502 .id_table = bnx2x_pci_tbl,
10503 .probe = bnx2x_init_one,
10504 .remove = __devexit_p(bnx2x_remove_one),
10505 .suspend = bnx2x_suspend,
10506 .resume = bnx2x_resume,
10507 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10508};
10509
10510static int __init bnx2x_init(void)
10511{
10512 return pci_register_driver(&bnx2x_pci_driver);
10513}
10514
10515static void __exit bnx2x_cleanup(void)
10516{
10517 pci_unregister_driver(&bnx2x_pci_driver);
10518}
10519
10520module_init(bnx2x_init);
10521module_exit(bnx2x_cleanup);
10522