[PATCH] S2io: Support for runtime MTU change
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / s2io.c
CommitLineData
1da177e4
LT
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
20346722
K
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
1da177e4
LT
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 35 * Tx descriptors that can be associated with each corresponding FIFO.
1da177e4
LT
36 ************************************************************************/
37
38#include <linux/config.h>
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/errno.h>
42#include <linux/ioport.h>
43#include <linux/pci.h>
1e7f0bd8 44#include <linux/dma-mapping.h>
1da177e4
LT
45#include <linux/kernel.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/init.h>
50#include <linux/delay.h>
51#include <linux/stddef.h>
52#include <linux/ioctl.h>
53#include <linux/timex.h>
54#include <linux/sched.h>
55#include <linux/ethtool.h>
56#include <linux/version.h>
57#include <linux/workqueue.h>
58
1da177e4
LT
59#include <asm/system.h>
60#include <asm/uaccess.h>
20346722 61#include <asm/io.h>
1da177e4
LT
62
63/* local include */
64#include "s2io.h"
65#include "s2io-regs.h"
66
67/* S2io Driver name & version. */
20346722
K
68static char s2io_driver_name[] = "Neterion";
69static char s2io_driver_version[] = "Version 1.7.7";
1da177e4 70
5e25b9dd
K
71static inline int RXD_IS_UP2DT(RxD_t *rxdp)
72{
73 int ret;
74
75 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
77
78 return ret;
79}
80
20346722 81/*
1da177e4
LT
82 * Cards with following subsystem_id have a link state indication
83 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84 * macro below identifies these cards given the subsystem_id.
85 */
86#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
89
90#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
93#define PANIC 1
94#define LOW 2
95static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
96{
97 int level = 0;
20346722
K
98 mac_info_t *mac_control;
99
100 mac_control = &sp->mac_control;
101 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
1da177e4 102 level = LOW;
fe113638 103 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
1da177e4
LT
104 level = PANIC;
105 }
106 }
107
108 return level;
109}
110
111/* Ethtool related variables and Macros. */
112static char s2io_gstrings[][ETH_GSTRING_LEN] = {
113 "Register test\t(offline)",
114 "Eeprom test\t(offline)",
115 "Link test\t(online)",
116 "RLDRAM test\t(offline)",
117 "BIST Test\t(offline)"
118};
119
120static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
121 {"tmac_frms"},
122 {"tmac_data_octets"},
123 {"tmac_drop_frms"},
124 {"tmac_mcst_frms"},
125 {"tmac_bcst_frms"},
126 {"tmac_pause_ctrl_frms"},
127 {"tmac_any_err_frms"},
128 {"tmac_vld_ip_octets"},
129 {"tmac_vld_ip"},
130 {"tmac_drop_ip"},
131 {"tmac_icmp"},
132 {"tmac_rst_tcp"},
133 {"tmac_tcp"},
134 {"tmac_udp"},
135 {"rmac_vld_frms"},
136 {"rmac_data_octets"},
137 {"rmac_fcs_err_frms"},
138 {"rmac_drop_frms"},
139 {"rmac_vld_mcst_frms"},
140 {"rmac_vld_bcst_frms"},
141 {"rmac_in_rng_len_err_frms"},
142 {"rmac_long_frms"},
143 {"rmac_pause_ctrl_frms"},
144 {"rmac_discarded_frms"},
145 {"rmac_usized_frms"},
146 {"rmac_osized_frms"},
147 {"rmac_frag_frms"},
148 {"rmac_jabber_frms"},
149 {"rmac_ip"},
150 {"rmac_ip_octets"},
151 {"rmac_hdr_err_ip"},
152 {"rmac_drop_ip"},
153 {"rmac_icmp"},
154 {"rmac_tcp"},
155 {"rmac_udp"},
156 {"rmac_err_drp_udp"},
157 {"rmac_pause_cnt"},
158 {"rmac_accepted_ip"},
159 {"rmac_err_tcp"},
7ba013ac
K
160 {"\n DRIVER STATISTICS"},
161 {"single_bit_ecc_errs"},
162 {"double_bit_ecc_errs"},
1da177e4
LT
163};
164
165#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
166#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
167
168#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
169#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
170
20346722 171/*
1da177e4
LT
172 * Constants to be programmed into the Xena's registers, to configure
173 * the XAUI.
174 */
175
176#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
177#define END_SIGN 0x0
178
179static u64 default_mdio_cfg[] = {
180 /* Reset PMA PLL */
181 0xC001010000000000ULL, 0xC0010100000000E0ULL,
182 0xC0010100008000E4ULL,
183 /* Remove Reset from PMA PLL */
184 0xC001010000000000ULL, 0xC0010100000000E0ULL,
185 0xC0010100000000E4ULL,
186 END_SIGN
187};
188
189static u64 default_dtx_cfg[] = {
190 0x8000051500000000ULL, 0x80000515000000E0ULL,
191 0x80000515D93500E4ULL, 0x8001051500000000ULL,
192 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
193 0x8002051500000000ULL, 0x80020515000000E0ULL,
194 0x80020515F21000E4ULL,
195 /* Set PADLOOPBACKN */
196 0x8002051500000000ULL, 0x80020515000000E0ULL,
197 0x80020515B20000E4ULL, 0x8003051500000000ULL,
198 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
199 0x8004051500000000ULL, 0x80040515000000E0ULL,
200 0x80040515B20000E4ULL, 0x8005051500000000ULL,
201 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
202 SWITCH_SIGN,
203 /* Remove PADLOOPBACKN */
204 0x8002051500000000ULL, 0x80020515000000E0ULL,
205 0x80020515F20000E4ULL, 0x8003051500000000ULL,
206 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
207 0x8004051500000000ULL, 0x80040515000000E0ULL,
208 0x80040515F20000E4ULL, 0x8005051500000000ULL,
209 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
210 END_SIGN
211};
212
20346722 213/*
1da177e4
LT
214 * Constants for Fixing the MacAddress problem seen mostly on
215 * Alpha machines.
216 */
217static u64 fix_mac[] = {
218 0x0060000000000000ULL, 0x0060600000000000ULL,
219 0x0040600000000000ULL, 0x0000600000000000ULL,
220 0x0020600000000000ULL, 0x0060600000000000ULL,
221 0x0020600000000000ULL, 0x0060600000000000ULL,
222 0x0020600000000000ULL, 0x0060600000000000ULL,
223 0x0020600000000000ULL, 0x0060600000000000ULL,
224 0x0020600000000000ULL, 0x0060600000000000ULL,
225 0x0020600000000000ULL, 0x0060600000000000ULL,
226 0x0020600000000000ULL, 0x0060600000000000ULL,
227 0x0020600000000000ULL, 0x0060600000000000ULL,
228 0x0020600000000000ULL, 0x0060600000000000ULL,
229 0x0020600000000000ULL, 0x0060600000000000ULL,
230 0x0020600000000000ULL, 0x0000600000000000ULL,
231 0x0040600000000000ULL, 0x0060600000000000ULL,
232 END_SIGN
233};
234
235/* Module Loadable parameters. */
236static unsigned int tx_fifo_num = 1;
237static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
238 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
239static unsigned int rx_ring_num = 1;
240static unsigned int rx_ring_sz[MAX_RX_RINGS] =
241 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
20346722
K
242static unsigned int rts_frm_len[MAX_RX_RINGS] =
243 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
5e25b9dd 244static unsigned int use_continuous_tx_intrs = 1;
1da177e4
LT
245static unsigned int rmac_pause_time = 65535;
246static unsigned int mc_pause_threshold_q0q3 = 187;
247static unsigned int mc_pause_threshold_q4q7 = 187;
248static unsigned int shared_splits;
249static unsigned int tmac_util_period = 5;
250static unsigned int rmac_util_period = 5;
251#ifndef CONFIG_S2IO_NAPI
252static unsigned int indicate_max_pkts;
253#endif
254
20346722 255/*
1da177e4 256 * S2IO device table.
20346722 257 * This table lists all the devices that this driver supports.
1da177e4
LT
258 */
259static struct pci_device_id s2io_tbl[] __devinitdata = {
260 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
261 PCI_ANY_ID, PCI_ANY_ID},
262 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
263 PCI_ANY_ID, PCI_ANY_ID},
264 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
265 PCI_ANY_ID, PCI_ANY_ID},
266 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
267 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
268 {0,}
269};
270
271MODULE_DEVICE_TABLE(pci, s2io_tbl);
272
273static struct pci_driver s2io_driver = {
274 .name = "S2IO",
275 .id_table = s2io_tbl,
276 .probe = s2io_init_nic,
277 .remove = __devexit_p(s2io_rem_nic),
278};
279
280/* A simplifier macro used both by init and free shared_mem Fns(). */
281#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
282
283/**
284 * init_shared_mem - Allocation and Initialization of Memory
285 * @nic: Device private variable.
20346722
K
286 * Description: The function allocates all the memory areas shared
287 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
288 * Rx descriptors and the statistics block.
289 */
290
291static int init_shared_mem(struct s2io_nic *nic)
292{
293 u32 size;
294 void *tmp_v_addr, *tmp_v_addr_next;
295 dma_addr_t tmp_p_addr, tmp_p_addr_next;
296 RxD_block_t *pre_rxd_blk = NULL;
20346722 297 int i, j, blk_cnt, rx_sz, tx_sz;
1da177e4
LT
298 int lst_size, lst_per_page;
299 struct net_device *dev = nic->dev;
300#ifdef CONFIG_2BUFF_MODE
20346722 301 u64 tmp;
1da177e4
LT
302 buffAdd_t *ba;
303#endif
304
305 mac_info_t *mac_control;
306 struct config_param *config;
307
308 mac_control = &nic->mac_control;
309 config = &nic->config;
310
311
312 /* Allocation and initialization of TXDLs in FIOFs */
313 size = 0;
314 for (i = 0; i < config->tx_fifo_num; i++) {
315 size += config->tx_cfg[i].fifo_len;
316 }
317 if (size > MAX_AVAILABLE_TXDS) {
318 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
319 dev->name);
320 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
321 DBG_PRINT(ERR_DBG, "that can be used\n");
322 return FAILURE;
323 }
324
325 lst_size = (sizeof(TxD_t) * config->max_txds);
20346722 326 tx_sz = lst_size * size;
1da177e4
LT
327 lst_per_page = PAGE_SIZE / lst_size;
328
329 for (i = 0; i < config->tx_fifo_num; i++) {
330 int fifo_len = config->tx_cfg[i].fifo_len;
331 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
20346722
K
332 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
333 GFP_KERNEL);
334 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
335 DBG_PRINT(ERR_DBG,
336 "Malloc failed for list_info\n");
337 return -ENOMEM;
338 }
20346722 339 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
340 }
341 for (i = 0; i < config->tx_fifo_num; i++) {
342 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
343 lst_per_page);
20346722
K
344 mac_control->fifos[i].tx_curr_put_info.offset = 0;
345 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 346 config->tx_cfg[i].fifo_len - 1;
20346722
K
347 mac_control->fifos[i].tx_curr_get_info.offset = 0;
348 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 349 config->tx_cfg[i].fifo_len - 1;
20346722
K
350 mac_control->fifos[i].fifo_no = i;
351 mac_control->fifos[i].nic = nic;
352 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
353
1da177e4
LT
354 for (j = 0; j < page_num; j++) {
355 int k = 0;
356 dma_addr_t tmp_p;
357 void *tmp_v;
358 tmp_v = pci_alloc_consistent(nic->pdev,
359 PAGE_SIZE, &tmp_p);
360 if (!tmp_v) {
361 DBG_PRINT(ERR_DBG,
362 "pci_alloc_consistent ");
363 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
364 return -ENOMEM;
365 }
366 while (k < lst_per_page) {
367 int l = (j * lst_per_page) + k;
368 if (l == config->tx_cfg[i].fifo_len)
20346722
K
369 break;
370 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 371 tmp_v + (k * lst_size);
20346722 372 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
373 tmp_p + (k * lst_size);
374 k++;
375 }
376 }
377 }
1da177e4
LT
378
379 /* Allocation and initialization of RXDs in Rings */
380 size = 0;
381 for (i = 0; i < config->rx_ring_num; i++) {
382 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
383 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
384 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
385 i);
386 DBG_PRINT(ERR_DBG, "RxDs per Block");
387 return FAILURE;
388 }
389 size += config->rx_cfg[i].num_rxd;
20346722 390 mac_control->rings[i].block_count =
1da177e4 391 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722
K
392 mac_control->rings[i].pkt_cnt =
393 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
1da177e4 394 }
20346722
K
395 size = (size * (sizeof(RxD_t)));
396 rx_sz = size;
1da177e4
LT
397
398 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
399 mac_control->rings[i].rx_curr_get_info.block_index = 0;
400 mac_control->rings[i].rx_curr_get_info.offset = 0;
401 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 402 config->rx_cfg[i].num_rxd - 1;
20346722
K
403 mac_control->rings[i].rx_curr_put_info.block_index = 0;
404 mac_control->rings[i].rx_curr_put_info.offset = 0;
405 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 406 config->rx_cfg[i].num_rxd - 1;
20346722
K
407 mac_control->rings[i].nic = nic;
408 mac_control->rings[i].ring_no = i;
409
1da177e4
LT
410 blk_cnt =
411 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
412 /* Allocating all the Rx blocks */
413 for (j = 0; j < blk_cnt; j++) {
414#ifndef CONFIG_2BUFF_MODE
415 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
416#else
417 size = SIZE_OF_BLOCK;
418#endif
419 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
420 &tmp_p_addr);
421 if (tmp_v_addr == NULL) {
422 /*
20346722
K
423 * In case of failure, free_shared_mem()
424 * is called, which should free any
425 * memory that was alloced till the
1da177e4
LT
426 * failure happened.
427 */
20346722 428 mac_control->rings[i].rx_blocks[j].block_virt_addr =
1da177e4
LT
429 tmp_v_addr;
430 return -ENOMEM;
431 }
432 memset(tmp_v_addr, 0, size);
20346722
K
433 mac_control->rings[i].rx_blocks[j].block_virt_addr =
434 tmp_v_addr;
435 mac_control->rings[i].rx_blocks[j].block_dma_addr =
436 tmp_p_addr;
1da177e4
LT
437 }
438 /* Interlinking all Rx Blocks */
439 for (j = 0; j < blk_cnt; j++) {
20346722
K
440 tmp_v_addr =
441 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 442 tmp_v_addr_next =
20346722 443 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 444 blk_cnt].block_virt_addr;
20346722
K
445 tmp_p_addr =
446 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 447 tmp_p_addr_next =
20346722 448 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
449 blk_cnt].block_dma_addr;
450
451 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
20346722 452 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
1da177e4
LT
453 * marker.
454 */
455#ifndef CONFIG_2BUFF_MODE
456 pre_rxd_blk->reserved_2_pNext_RxD_block =
457 (unsigned long) tmp_v_addr_next;
458#endif
459 pre_rxd_blk->pNext_RxD_Blk_physical =
460 (u64) tmp_p_addr_next;
461 }
462 }
463
464#ifdef CONFIG_2BUFF_MODE
20346722 465 /*
1da177e4
LT
466 * Allocation of Storages for buffer addresses in 2BUFF mode
467 * and the buffers as well.
468 */
469 for (i = 0; i < config->rx_ring_num; i++) {
470 blk_cnt =
471 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722 472 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
1da177e4 473 GFP_KERNEL);
20346722 474 if (!mac_control->rings[i].ba)
1da177e4
LT
475 return -ENOMEM;
476 for (j = 0; j < blk_cnt; j++) {
477 int k = 0;
20346722 478 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
1da177e4
LT
479 (MAX_RXDS_PER_BLOCK + 1)),
480 GFP_KERNEL);
20346722 481 if (!mac_control->rings[i].ba[j])
1da177e4
LT
482 return -ENOMEM;
483 while (k != MAX_RXDS_PER_BLOCK) {
20346722 484 ba = &mac_control->rings[i].ba[j][k];
1da177e4 485
20346722 486 ba->ba_0_org = (void *) kmalloc
1da177e4
LT
487 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
488 if (!ba->ba_0_org)
489 return -ENOMEM;
20346722 490 tmp = (u64) ba->ba_0_org;
1da177e4 491 tmp += ALIGN_SIZE;
20346722 492 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
493 ba->ba_0 = (void *) tmp;
494
20346722 495 ba->ba_1_org = (void *) kmalloc
1da177e4
LT
496 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
497 if (!ba->ba_1_org)
498 return -ENOMEM;
20346722 499 tmp = (u64) ba->ba_1_org;
1da177e4 500 tmp += ALIGN_SIZE;
20346722 501 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
502 ba->ba_1 = (void *) tmp;
503 k++;
504 }
505 }
506 }
507#endif
508
509 /* Allocation and initialization of Statistics block */
510 size = sizeof(StatInfo_t);
511 mac_control->stats_mem = pci_alloc_consistent
512 (nic->pdev, size, &mac_control->stats_mem_phy);
513
514 if (!mac_control->stats_mem) {
20346722
K
515 /*
516 * In case of failure, free_shared_mem() is called, which
517 * should free any memory that was alloced till the
1da177e4
LT
518 * failure happened.
519 */
520 return -ENOMEM;
521 }
522 mac_control->stats_mem_sz = size;
523
524 tmp_v_addr = mac_control->stats_mem;
525 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
526 memset(tmp_v_addr, 0, size);
1da177e4
LT
527 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
528 (unsigned long long) tmp_p_addr);
529
530 return SUCCESS;
531}
532
20346722
K
533/**
534 * free_shared_mem - Free the allocated Memory
1da177e4
LT
535 * @nic: Device private variable.
536 * Description: This function is to free all memory locations allocated by
537 * the init_shared_mem() function and return it to the kernel.
538 */
539
540static void free_shared_mem(struct s2io_nic *nic)
541{
542 int i, j, blk_cnt, size;
543 void *tmp_v_addr;
544 dma_addr_t tmp_p_addr;
545 mac_info_t *mac_control;
546 struct config_param *config;
547 int lst_size, lst_per_page;
548
549
550 if (!nic)
551 return;
552
553 mac_control = &nic->mac_control;
554 config = &nic->config;
555
556 lst_size = (sizeof(TxD_t) * config->max_txds);
557 lst_per_page = PAGE_SIZE / lst_size;
558
559 for (i = 0; i < config->tx_fifo_num; i++) {
560 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
561 lst_per_page);
562 for (j = 0; j < page_num; j++) {
563 int mem_blks = (j * lst_per_page);
20346722
K
564 if (!mac_control->fifos[i].list_info[mem_blks].
565 list_virt_addr)
1da177e4
LT
566 break;
567 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
568 mac_control->fifos[i].
569 list_info[mem_blks].
1da177e4 570 list_virt_addr,
20346722
K
571 mac_control->fifos[i].
572 list_info[mem_blks].
1da177e4
LT
573 list_phy_addr);
574 }
20346722 575 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
576 }
577
578#ifndef CONFIG_2BUFF_MODE
579 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
580#else
581 size = SIZE_OF_BLOCK;
582#endif
583 for (i = 0; i < config->rx_ring_num; i++) {
20346722 584 blk_cnt = mac_control->rings[i].block_count;
1da177e4 585 for (j = 0; j < blk_cnt; j++) {
20346722
K
586 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
587 block_virt_addr;
588 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
589 block_dma_addr;
1da177e4
LT
590 if (tmp_v_addr == NULL)
591 break;
592 pci_free_consistent(nic->pdev, size,
593 tmp_v_addr, tmp_p_addr);
594 }
595 }
596
597#ifdef CONFIG_2BUFF_MODE
598 /* Freeing buffer storage addresses in 2BUFF mode. */
599 for (i = 0; i < config->rx_ring_num; i++) {
600 blk_cnt =
601 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
1da177e4
LT
602 for (j = 0; j < blk_cnt; j++) {
603 int k = 0;
20346722
K
604 if (!mac_control->rings[i].ba[j])
605 continue;
1da177e4 606 while (k != MAX_RXDS_PER_BLOCK) {
20346722 607 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
1da177e4
LT
608 kfree(ba->ba_0_org);
609 kfree(ba->ba_1_org);
610 k++;
611 }
20346722 612 kfree(mac_control->rings[i].ba[j]);
1da177e4 613 }
20346722
K
614 if (mac_control->rings[i].ba)
615 kfree(mac_control->rings[i].ba);
1da177e4 616 }
1da177e4
LT
617#endif
618
619 if (mac_control->stats_mem) {
620 pci_free_consistent(nic->pdev,
621 mac_control->stats_mem_sz,
622 mac_control->stats_mem,
623 mac_control->stats_mem_phy);
624 }
625}
626
20346722
K
627/**
628 * init_nic - Initialization of hardware
1da177e4 629 * @nic: device peivate variable
20346722
K
630 * Description: The function sequentially configures every block
631 * of the H/W from their reset values.
632 * Return Value: SUCCESS on success and
1da177e4
LT
633 * '-1' on failure (endian settings incorrect).
634 */
635
636static int init_nic(struct s2io_nic *nic)
637{
638 XENA_dev_config_t __iomem *bar0 = nic->bar0;
639 struct net_device *dev = nic->dev;
640 register u64 val64 = 0;
641 void __iomem *add;
642 u32 time;
643 int i, j;
644 mac_info_t *mac_control;
645 struct config_param *config;
646 int mdio_cnt = 0, dtx_cnt = 0;
647 unsigned long long mem_share;
20346722 648 int mem_size;
1da177e4
LT
649
650 mac_control = &nic->mac_control;
651 config = &nic->config;
652
5e25b9dd 653 /* to set the swapper controle on the card */
20346722 654 if(s2io_set_swapper(nic)) {
1da177e4
LT
655 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
656 return -1;
657 }
658
659 /* Remove XGXS from reset state */
660 val64 = 0;
661 writeq(val64, &bar0->sw_reset);
1da177e4 662 msleep(500);
20346722 663 val64 = readq(&bar0->sw_reset);
1da177e4
LT
664
665 /* Enable Receiving broadcasts */
666 add = &bar0->mac_cfg;
667 val64 = readq(&bar0->mac_cfg);
668 val64 |= MAC_RMAC_BCAST_ENABLE;
669 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
670 writel((u32) val64, add);
671 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
672 writel((u32) (val64 >> 32), (add + 4));
673
674 /* Read registers in all blocks */
675 val64 = readq(&bar0->mac_int_mask);
676 val64 = readq(&bar0->mc_int_mask);
677 val64 = readq(&bar0->xgxs_int_mask);
678
679 /* Set MTU */
680 val64 = dev->mtu;
681 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
682
20346722
K
683 /*
684 * Configuring the XAUI Interface of Xena.
1da177e4 685 * ***************************************
20346722
K
686 * To Configure the Xena's XAUI, one has to write a series
687 * of 64 bit values into two registers in a particular
688 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
689 * which will be defined in the array of configuration values
690 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
691 * to switch writing from one regsiter to another. We continue
1da177e4 692 * writing these values until we encounter the 'END_SIGN' macro.
20346722
K
693 * For example, After making a series of 21 writes into
694 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1da177e4
LT
695 * start writing into mdio_control until we encounter END_SIGN.
696 */
697 while (1) {
698 dtx_cfg:
699 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
700 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
701 dtx_cnt++;
702 goto mdio_cfg;
703 }
704 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
705 &bar0->dtx_control, UF);
706 val64 = readq(&bar0->dtx_control);
707 dtx_cnt++;
708 }
709 mdio_cfg:
710 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
711 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
712 mdio_cnt++;
713 goto dtx_cfg;
714 }
715 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
716 &bar0->mdio_control, UF);
717 val64 = readq(&bar0->mdio_control);
718 mdio_cnt++;
719 }
720 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
721 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
722 break;
723 } else {
724 goto dtx_cfg;
725 }
726 }
727
728 /* Tx DMA Initialization */
729 val64 = 0;
730 writeq(val64, &bar0->tx_fifo_partition_0);
731 writeq(val64, &bar0->tx_fifo_partition_1);
732 writeq(val64, &bar0->tx_fifo_partition_2);
733 writeq(val64, &bar0->tx_fifo_partition_3);
734
735
736 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
737 val64 |=
738 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
739 13) | vBIT(config->tx_cfg[i].fifo_priority,
740 ((i * 32) + 5), 3);
741
742 if (i == (config->tx_fifo_num - 1)) {
743 if (i % 2 == 0)
744 i++;
745 }
746
747 switch (i) {
748 case 1:
749 writeq(val64, &bar0->tx_fifo_partition_0);
750 val64 = 0;
751 break;
752 case 3:
753 writeq(val64, &bar0->tx_fifo_partition_1);
754 val64 = 0;
755 break;
756 case 5:
757 writeq(val64, &bar0->tx_fifo_partition_2);
758 val64 = 0;
759 break;
760 case 7:
761 writeq(val64, &bar0->tx_fifo_partition_3);
762 break;
763 }
764 }
765
766 /* Enable Tx FIFO partition 0. */
767 val64 = readq(&bar0->tx_fifo_partition_0);
768 val64 |= BIT(0); /* To enable the FIFO partition. */
769 writeq(val64, &bar0->tx_fifo_partition_0);
770
5e25b9dd
K
771 /*
772 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
773 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
774 */
775 if (get_xena_rev_id(nic->pdev) < 4)
776 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
777
1da177e4
LT
778 val64 = readq(&bar0->tx_fifo_partition_0);
779 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
780 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
781
20346722
K
782 /*
783 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
784 * integrity checking.
785 */
786 val64 = readq(&bar0->tx_pa_cfg);
787 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
788 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
789 writeq(val64, &bar0->tx_pa_cfg);
790
791 /* Rx DMA intialization. */
792 val64 = 0;
793 for (i = 0; i < config->rx_ring_num; i++) {
794 val64 |=
795 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
796 3);
797 }
798 writeq(val64, &bar0->rx_queue_priority);
799
20346722
K
800 /*
801 * Allocating equal share of memory to all the
1da177e4
LT
802 * configured Rings.
803 */
804 val64 = 0;
20346722 805 mem_size = 64;
1da177e4
LT
806 for (i = 0; i < config->rx_ring_num; i++) {
807 switch (i) {
808 case 0:
20346722
K
809 mem_share = (mem_size / config->rx_ring_num +
810 mem_size % config->rx_ring_num);
1da177e4
LT
811 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
812 continue;
813 case 1:
20346722 814 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
815 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
816 continue;
817 case 2:
20346722 818 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
819 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
820 continue;
821 case 3:
20346722 822 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
823 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
824 continue;
825 case 4:
20346722 826 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
827 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
828 continue;
829 case 5:
20346722 830 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
831 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
832 continue;
833 case 6:
20346722 834 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
835 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
836 continue;
837 case 7:
20346722 838 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
839 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
840 continue;
841 }
842 }
843 writeq(val64, &bar0->rx_queue_cfg);
844
20346722 845 /*
5e25b9dd
K
846 * Filling Tx round robin registers
847 * as per the number of FIFOs
1da177e4 848 */
5e25b9dd
K
849 switch (config->tx_fifo_num) {
850 case 1:
851 val64 = 0x0000000000000000ULL;
852 writeq(val64, &bar0->tx_w_round_robin_0);
853 writeq(val64, &bar0->tx_w_round_robin_1);
854 writeq(val64, &bar0->tx_w_round_robin_2);
855 writeq(val64, &bar0->tx_w_round_robin_3);
856 writeq(val64, &bar0->tx_w_round_robin_4);
857 break;
858 case 2:
859 val64 = 0x0000010000010000ULL;
860 writeq(val64, &bar0->tx_w_round_robin_0);
861 val64 = 0x0100000100000100ULL;
862 writeq(val64, &bar0->tx_w_round_robin_1);
863 val64 = 0x0001000001000001ULL;
864 writeq(val64, &bar0->tx_w_round_robin_2);
865 val64 = 0x0000010000010000ULL;
866 writeq(val64, &bar0->tx_w_round_robin_3);
867 val64 = 0x0100000000000000ULL;
868 writeq(val64, &bar0->tx_w_round_robin_4);
869 break;
870 case 3:
871 val64 = 0x0001000102000001ULL;
872 writeq(val64, &bar0->tx_w_round_robin_0);
873 val64 = 0x0001020000010001ULL;
874 writeq(val64, &bar0->tx_w_round_robin_1);
875 val64 = 0x0200000100010200ULL;
876 writeq(val64, &bar0->tx_w_round_robin_2);
877 val64 = 0x0001000102000001ULL;
878 writeq(val64, &bar0->tx_w_round_robin_3);
879 val64 = 0x0001020000000000ULL;
880 writeq(val64, &bar0->tx_w_round_robin_4);
881 break;
882 case 4:
883 val64 = 0x0001020300010200ULL;
884 writeq(val64, &bar0->tx_w_round_robin_0);
885 val64 = 0x0100000102030001ULL;
886 writeq(val64, &bar0->tx_w_round_robin_1);
887 val64 = 0x0200010000010203ULL;
888 writeq(val64, &bar0->tx_w_round_robin_2);
889 val64 = 0x0001020001000001ULL;
890 writeq(val64, &bar0->tx_w_round_robin_3);
891 val64 = 0x0203000100000000ULL;
892 writeq(val64, &bar0->tx_w_round_robin_4);
893 break;
894 case 5:
895 val64 = 0x0001000203000102ULL;
896 writeq(val64, &bar0->tx_w_round_robin_0);
897 val64 = 0x0001020001030004ULL;
898 writeq(val64, &bar0->tx_w_round_robin_1);
899 val64 = 0x0001000203000102ULL;
900 writeq(val64, &bar0->tx_w_round_robin_2);
901 val64 = 0x0001020001030004ULL;
902 writeq(val64, &bar0->tx_w_round_robin_3);
903 val64 = 0x0001000000000000ULL;
904 writeq(val64, &bar0->tx_w_round_robin_4);
905 break;
906 case 6:
907 val64 = 0x0001020304000102ULL;
908 writeq(val64, &bar0->tx_w_round_robin_0);
909 val64 = 0x0304050001020001ULL;
910 writeq(val64, &bar0->tx_w_round_robin_1);
911 val64 = 0x0203000100000102ULL;
912 writeq(val64, &bar0->tx_w_round_robin_2);
913 val64 = 0x0304000102030405ULL;
914 writeq(val64, &bar0->tx_w_round_robin_3);
915 val64 = 0x0001000200000000ULL;
916 writeq(val64, &bar0->tx_w_round_robin_4);
917 break;
918 case 7:
919 val64 = 0x0001020001020300ULL;
920 writeq(val64, &bar0->tx_w_round_robin_0);
921 val64 = 0x0102030400010203ULL;
922 writeq(val64, &bar0->tx_w_round_robin_1);
923 val64 = 0x0405060001020001ULL;
924 writeq(val64, &bar0->tx_w_round_robin_2);
925 val64 = 0x0304050000010200ULL;
926 writeq(val64, &bar0->tx_w_round_robin_3);
927 val64 = 0x0102030000000000ULL;
928 writeq(val64, &bar0->tx_w_round_robin_4);
929 break;
930 case 8:
931 val64 = 0x0001020300040105ULL;
932 writeq(val64, &bar0->tx_w_round_robin_0);
933 val64 = 0x0200030106000204ULL;
934 writeq(val64, &bar0->tx_w_round_robin_1);
935 val64 = 0x0103000502010007ULL;
936 writeq(val64, &bar0->tx_w_round_robin_2);
937 val64 = 0x0304010002060500ULL;
938 writeq(val64, &bar0->tx_w_round_robin_3);
939 val64 = 0x0103020400000000ULL;
940 writeq(val64, &bar0->tx_w_round_robin_4);
941 break;
942 }
943
944 /* Filling the Rx round robin registers as per the
945 * number of Rings and steering based on QoS.
946 */
947 switch (config->rx_ring_num) {
948 case 1:
949 val64 = 0x8080808080808080ULL;
950 writeq(val64, &bar0->rts_qos_steering);
951 break;
952 case 2:
953 val64 = 0x0000010000010000ULL;
954 writeq(val64, &bar0->rx_w_round_robin_0);
955 val64 = 0x0100000100000100ULL;
956 writeq(val64, &bar0->rx_w_round_robin_1);
957 val64 = 0x0001000001000001ULL;
958 writeq(val64, &bar0->rx_w_round_robin_2);
959 val64 = 0x0000010000010000ULL;
960 writeq(val64, &bar0->rx_w_round_robin_3);
961 val64 = 0x0100000000000000ULL;
962 writeq(val64, &bar0->rx_w_round_robin_4);
963
964 val64 = 0x8080808040404040ULL;
965 writeq(val64, &bar0->rts_qos_steering);
966 break;
967 case 3:
968 val64 = 0x0001000102000001ULL;
969 writeq(val64, &bar0->rx_w_round_robin_0);
970 val64 = 0x0001020000010001ULL;
971 writeq(val64, &bar0->rx_w_round_robin_1);
972 val64 = 0x0200000100010200ULL;
973 writeq(val64, &bar0->rx_w_round_robin_2);
974 val64 = 0x0001000102000001ULL;
975 writeq(val64, &bar0->rx_w_round_robin_3);
976 val64 = 0x0001020000000000ULL;
977 writeq(val64, &bar0->rx_w_round_robin_4);
978
979 val64 = 0x8080804040402020ULL;
980 writeq(val64, &bar0->rts_qos_steering);
981 break;
982 case 4:
983 val64 = 0x0001020300010200ULL;
984 writeq(val64, &bar0->rx_w_round_robin_0);
985 val64 = 0x0100000102030001ULL;
986 writeq(val64, &bar0->rx_w_round_robin_1);
987 val64 = 0x0200010000010203ULL;
988 writeq(val64, &bar0->rx_w_round_robin_2);
989 val64 = 0x0001020001000001ULL;
990 writeq(val64, &bar0->rx_w_round_robin_3);
991 val64 = 0x0203000100000000ULL;
992 writeq(val64, &bar0->rx_w_round_robin_4);
993
994 val64 = 0x8080404020201010ULL;
995 writeq(val64, &bar0->rts_qos_steering);
996 break;
997 case 5:
998 val64 = 0x0001000203000102ULL;
999 writeq(val64, &bar0->rx_w_round_robin_0);
1000 val64 = 0x0001020001030004ULL;
1001 writeq(val64, &bar0->rx_w_round_robin_1);
1002 val64 = 0x0001000203000102ULL;
1003 writeq(val64, &bar0->rx_w_round_robin_2);
1004 val64 = 0x0001020001030004ULL;
1005 writeq(val64, &bar0->rx_w_round_robin_3);
1006 val64 = 0x0001000000000000ULL;
1007 writeq(val64, &bar0->rx_w_round_robin_4);
1008
1009 val64 = 0x8080404020201008ULL;
1010 writeq(val64, &bar0->rts_qos_steering);
1011 break;
1012 case 6:
1013 val64 = 0x0001020304000102ULL;
1014 writeq(val64, &bar0->rx_w_round_robin_0);
1015 val64 = 0x0304050001020001ULL;
1016 writeq(val64, &bar0->rx_w_round_robin_1);
1017 val64 = 0x0203000100000102ULL;
1018 writeq(val64, &bar0->rx_w_round_robin_2);
1019 val64 = 0x0304000102030405ULL;
1020 writeq(val64, &bar0->rx_w_round_robin_3);
1021 val64 = 0x0001000200000000ULL;
1022 writeq(val64, &bar0->rx_w_round_robin_4);
1023
1024 val64 = 0x8080404020100804ULL;
1025 writeq(val64, &bar0->rts_qos_steering);
1026 break;
1027 case 7:
1028 val64 = 0x0001020001020300ULL;
1029 writeq(val64, &bar0->rx_w_round_robin_0);
1030 val64 = 0x0102030400010203ULL;
1031 writeq(val64, &bar0->rx_w_round_robin_1);
1032 val64 = 0x0405060001020001ULL;
1033 writeq(val64, &bar0->rx_w_round_robin_2);
1034 val64 = 0x0304050000010200ULL;
1035 writeq(val64, &bar0->rx_w_round_robin_3);
1036 val64 = 0x0102030000000000ULL;
1037 writeq(val64, &bar0->rx_w_round_robin_4);
1038
1039 val64 = 0x8080402010080402ULL;
1040 writeq(val64, &bar0->rts_qos_steering);
1041 break;
1042 case 8:
1043 val64 = 0x0001020300040105ULL;
1044 writeq(val64, &bar0->rx_w_round_robin_0);
1045 val64 = 0x0200030106000204ULL;
1046 writeq(val64, &bar0->rx_w_round_robin_1);
1047 val64 = 0x0103000502010007ULL;
1048 writeq(val64, &bar0->rx_w_round_robin_2);
1049 val64 = 0x0304010002060500ULL;
1050 writeq(val64, &bar0->rx_w_round_robin_3);
1051 val64 = 0x0103020400000000ULL;
1052 writeq(val64, &bar0->rx_w_round_robin_4);
1053
1054 val64 = 0x8040201008040201ULL;
1055 writeq(val64, &bar0->rts_qos_steering);
1056 break;
1057 }
1da177e4
LT
1058
1059 /* UDP Fix */
1060 val64 = 0;
20346722 1061 for (i = 0; i < 8; i++)
1da177e4
LT
1062 writeq(val64, &bar0->rts_frm_len_n[i]);
1063
5e25b9dd
K
1064 /* Set the default rts frame length for the rings configured */
1065 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1066 for (i = 0 ; i < config->rx_ring_num ; i++)
1067 writeq(val64, &bar0->rts_frm_len_n[i]);
1068
1069 /* Set the frame length for the configured rings
1070 * desired by the user
1071 */
1072 for (i = 0; i < config->rx_ring_num; i++) {
1073 /* If rts_frm_len[i] == 0 then it is assumed that user not
1074 * specified frame length steering.
1075 * If the user provides the frame length then program
1076 * the rts_frm_len register for those values or else
1077 * leave it as it is.
1078 */
1079 if (rts_frm_len[i] != 0) {
1080 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1081 &bar0->rts_frm_len_n[i]);
1082 }
1083 }
1da177e4 1084
20346722 1085 /* Program statistics memory */
1da177e4 1086 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1087
20346722 1088 /*
1da177e4
LT
1089 * Initializing the sampling rate for the device to calculate the
1090 * bandwidth utilization.
1091 */
1092 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1093 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1094 writeq(val64, &bar0->mac_link_util);
1095
1096
20346722
K
1097 /*
1098 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1099 * Scheme.
1100 */
20346722
K
1101 /*
1102 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1103 * 250 interrupts per sec. Continuous interrupts are enabled
1104 * by default.
1105 */
1106 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1107 TTI_DATA1_MEM_TX_URNG_A(0xA) |
1108 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd
K
1109 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1110 if (use_continuous_tx_intrs)
1111 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1112 writeq(val64, &bar0->tti_data1_mem);
1113
1114 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1115 TTI_DATA2_MEM_TX_UFC_B(0x20) |
5e25b9dd 1116 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1117 writeq(val64, &bar0->tti_data2_mem);
1118
1119 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1120 writeq(val64, &bar0->tti_command_mem);
1121
20346722 1122 /*
1da177e4
LT
1123 * Once the operation completes, the Strobe bit of the command
1124 * register will be reset. We poll for this particular condition
1125 * We wait for a maximum of 500ms for the operation to complete,
1126 * if it's not complete by then we return error.
1127 */
1128 time = 0;
1129 while (TRUE) {
1130 val64 = readq(&bar0->tti_command_mem);
1131 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1132 break;
1133 }
1134 if (time > 10) {
1135 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1136 dev->name);
1137 return -1;
1138 }
1139 msleep(50);
1140 time++;
1141 }
1142
1143 /* RTI Initialization */
1144 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1145 RTI_DATA1_MEM_RX_URNG_A(0xA) |
1146 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1147 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1148
1149 writeq(val64, &bar0->rti_data1_mem);
1150
1151 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1152 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1153 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1154 writeq(val64, &bar0->rti_data2_mem);
1155
1156 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1157 writeq(val64, &bar0->rti_command_mem);
1158
20346722 1159 /*
5e25b9dd
K
1160 * Once the operation completes, the Strobe bit of the
1161 * command register will be reset. We poll for this
1162 * particular condition. We wait for a maximum of 500ms
1163 * for the operation to complete, if it's not complete
1164 * by then we return error.
1da177e4
LT
1165 */
1166 time = 0;
1167 while (TRUE) {
1168 val64 = readq(&bar0->rti_command_mem);
20346722 1169 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1da177e4
LT
1170 break;
1171 }
1172 if (time > 10) {
1173 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1174 dev->name);
1175 return -1;
1176 }
1177 time++;
1178 msleep(50);
1179 }
1180
20346722
K
1181 /*
1182 * Initializing proper values as Pause threshold into all
1da177e4
LT
1183 * the 8 Queues on Rx side.
1184 */
1185 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1186 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1187
1188 /* Disable RMAC PAD STRIPPING */
20346722 1189 add = (void *) &bar0->mac_cfg;
1da177e4
LT
1190 val64 = readq(&bar0->mac_cfg);
1191 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1192 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1193 writel((u32) (val64), add);
1194 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1195 writel((u32) (val64 >> 32), (add + 4));
1196 val64 = readq(&bar0->mac_cfg);
1197
20346722
K
1198 /*
1199 * Set the time value to be inserted in the pause frame
1da177e4
LT
1200 * generated by xena.
1201 */
1202 val64 = readq(&bar0->rmac_pause_cfg);
1203 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1204 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1205 writeq(val64, &bar0->rmac_pause_cfg);
1206
20346722 1207 /*
1da177e4
LT
1208 * Set the Threshold Limit for Generating the pause frame
1209 * If the amount of data in any Queue exceeds ratio of
1210 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1211 * pause frame is generated
1212 */
1213 val64 = 0;
1214 for (i = 0; i < 4; i++) {
1215 val64 |=
1216 (((u64) 0xFF00 | nic->mac_control.
1217 mc_pause_threshold_q0q3)
1218 << (i * 2 * 8));
1219 }
1220 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1221
1222 val64 = 0;
1223 for (i = 0; i < 4; i++) {
1224 val64 |=
1225 (((u64) 0xFF00 | nic->mac_control.
1226 mc_pause_threshold_q4q7)
1227 << (i * 2 * 8));
1228 }
1229 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1230
20346722
K
1231 /*
1232 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1233 * exceeded the limit pointed by shared_splits
1234 */
1235 val64 = readq(&bar0->pic_control);
1236 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1237 writeq(val64, &bar0->pic_control);
1238
1239 return SUCCESS;
1240}
1241
20346722
K
1242/**
1243 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1244 * @nic: device private variable,
1245 * @mask: A mask indicating which Intr block must be modified and,
1246 * @flag: A flag indicating whether to enable or disable the Intrs.
1247 * Description: This function will either disable or enable the interrupts
20346722
K
1248 * depending on the flag argument. The mask argument can be used to
1249 * enable/disable any Intr block.
1da177e4
LT
1250 * Return Value: NONE.
1251 */
1252
1253static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1254{
1255 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1256 register u64 val64 = 0, temp64 = 0;
1257
1258 /* Top level interrupt classification */
1259 /* PIC Interrupts */
1260 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1261 /* Enable PIC Intrs in the general intr mask register */
1262 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1263 if (flag == ENABLE_INTRS) {
1264 temp64 = readq(&bar0->general_int_mask);
1265 temp64 &= ~((u64) val64);
1266 writeq(temp64, &bar0->general_int_mask);
20346722 1267 /*
1da177e4 1268 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
1269 * interrupts for now.
1270 * TODO
1da177e4
LT
1271 */
1272 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
20346722 1273 /*
1da177e4
LT
1274 * No MSI Support is available presently, so TTI and
1275 * RTI interrupts are also disabled.
1276 */
1277 } else if (flag == DISABLE_INTRS) {
20346722
K
1278 /*
1279 * Disable PIC Intrs in the general
1280 * intr mask register
1da177e4
LT
1281 */
1282 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1283 temp64 = readq(&bar0->general_int_mask);
1284 val64 |= temp64;
1285 writeq(val64, &bar0->general_int_mask);
1286 }
1287 }
1288
1289 /* DMA Interrupts */
1290 /* Enabling/Disabling Tx DMA interrupts */
1291 if (mask & TX_DMA_INTR) {
1292 /* Enable TxDMA Intrs in the general intr mask register */
1293 val64 = TXDMA_INT_M;
1294 if (flag == ENABLE_INTRS) {
1295 temp64 = readq(&bar0->general_int_mask);
1296 temp64 &= ~((u64) val64);
1297 writeq(temp64, &bar0->general_int_mask);
20346722
K
1298 /*
1299 * Keep all interrupts other than PFC interrupt
1da177e4
LT
1300 * and PCC interrupt disabled in DMA level.
1301 */
1302 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1303 TXDMA_PCC_INT_M);
1304 writeq(val64, &bar0->txdma_int_mask);
20346722
K
1305 /*
1306 * Enable only the MISC error 1 interrupt in PFC block
1da177e4
LT
1307 */
1308 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1309 writeq(val64, &bar0->pfc_err_mask);
20346722
K
1310 /*
1311 * Enable only the FB_ECC error interrupt in PCC block
1da177e4
LT
1312 */
1313 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1314 writeq(val64, &bar0->pcc_err_mask);
1315 } else if (flag == DISABLE_INTRS) {
20346722
K
1316 /*
1317 * Disable TxDMA Intrs in the general intr mask
1318 * register
1da177e4
LT
1319 */
1320 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1321 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1322 temp64 = readq(&bar0->general_int_mask);
1323 val64 |= temp64;
1324 writeq(val64, &bar0->general_int_mask);
1325 }
1326 }
1327
1328 /* Enabling/Disabling Rx DMA interrupts */
1329 if (mask & RX_DMA_INTR) {
1330 /* Enable RxDMA Intrs in the general intr mask register */
1331 val64 = RXDMA_INT_M;
1332 if (flag == ENABLE_INTRS) {
1333 temp64 = readq(&bar0->general_int_mask);
1334 temp64 &= ~((u64) val64);
1335 writeq(temp64, &bar0->general_int_mask);
20346722
K
1336 /*
1337 * All RxDMA block interrupts are disabled for now
1338 * TODO
1da177e4
LT
1339 */
1340 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1341 } else if (flag == DISABLE_INTRS) {
20346722
K
1342 /*
1343 * Disable RxDMA Intrs in the general intr mask
1344 * register
1da177e4
LT
1345 */
1346 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1347 temp64 = readq(&bar0->general_int_mask);
1348 val64 |= temp64;
1349 writeq(val64, &bar0->general_int_mask);
1350 }
1351 }
1352
1353 /* MAC Interrupts */
1354 /* Enabling/Disabling MAC interrupts */
1355 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1356 val64 = TXMAC_INT_M | RXMAC_INT_M;
1357 if (flag == ENABLE_INTRS) {
1358 temp64 = readq(&bar0->general_int_mask);
1359 temp64 &= ~((u64) val64);
1360 writeq(temp64, &bar0->general_int_mask);
20346722
K
1361 /*
1362 * All MAC block error interrupts are disabled for now
1da177e4
LT
1363 * except the link status change interrupt.
1364 * TODO
1365 */
1366 val64 = MAC_INT_STATUS_RMAC_INT;
1367 temp64 = readq(&bar0->mac_int_mask);
1368 temp64 &= ~((u64) val64);
1369 writeq(temp64, &bar0->mac_int_mask);
1370
1371 val64 = readq(&bar0->mac_rmac_err_mask);
1372 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1373 writeq(val64, &bar0->mac_rmac_err_mask);
1374 } else if (flag == DISABLE_INTRS) {
20346722
K
1375 /*
1376 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1377 */
1378 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1379 writeq(DISABLE_ALL_INTRS,
1380 &bar0->mac_rmac_err_mask);
1381
1382 temp64 = readq(&bar0->general_int_mask);
1383 val64 |= temp64;
1384 writeq(val64, &bar0->general_int_mask);
1385 }
1386 }
1387
1388 /* XGXS Interrupts */
1389 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1390 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1391 if (flag == ENABLE_INTRS) {
1392 temp64 = readq(&bar0->general_int_mask);
1393 temp64 &= ~((u64) val64);
1394 writeq(temp64, &bar0->general_int_mask);
20346722 1395 /*
1da177e4 1396 * All XGXS block error interrupts are disabled for now
20346722 1397 * TODO
1da177e4
LT
1398 */
1399 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1400 } else if (flag == DISABLE_INTRS) {
20346722
K
1401 /*
1402 * Disable MC Intrs in the general intr mask register
1da177e4
LT
1403 */
1404 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1405 temp64 = readq(&bar0->general_int_mask);
1406 val64 |= temp64;
1407 writeq(val64, &bar0->general_int_mask);
1408 }
1409 }
1410
1411 /* Memory Controller(MC) interrupts */
1412 if (mask & MC_INTR) {
1413 val64 = MC_INT_M;
1414 if (flag == ENABLE_INTRS) {
1415 temp64 = readq(&bar0->general_int_mask);
1416 temp64 &= ~((u64) val64);
1417 writeq(temp64, &bar0->general_int_mask);
20346722 1418 /*
5e25b9dd 1419 * Enable all MC Intrs.
1da177e4 1420 */
5e25b9dd
K
1421 writeq(0x0, &bar0->mc_int_mask);
1422 writeq(0x0, &bar0->mc_err_mask);
1da177e4
LT
1423 } else if (flag == DISABLE_INTRS) {
1424 /*
1425 * Disable MC Intrs in the general intr mask register
1426 */
1427 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1428 temp64 = readq(&bar0->general_int_mask);
1429 val64 |= temp64;
1430 writeq(val64, &bar0->general_int_mask);
1431 }
1432 }
1433
1434
1435 /* Tx traffic interrupts */
1436 if (mask & TX_TRAFFIC_INTR) {
1437 val64 = TXTRAFFIC_INT_M;
1438 if (flag == ENABLE_INTRS) {
1439 temp64 = readq(&bar0->general_int_mask);
1440 temp64 &= ~((u64) val64);
1441 writeq(temp64, &bar0->general_int_mask);
20346722 1442 /*
1da177e4 1443 * Enable all the Tx side interrupts
20346722 1444 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1445 */
1446 writeq(0x0, &bar0->tx_traffic_mask);
1447 } else if (flag == DISABLE_INTRS) {
20346722
K
1448 /*
1449 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1450 * register.
1451 */
1452 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1453 temp64 = readq(&bar0->general_int_mask);
1454 val64 |= temp64;
1455 writeq(val64, &bar0->general_int_mask);
1456 }
1457 }
1458
1459 /* Rx traffic interrupts */
1460 if (mask & RX_TRAFFIC_INTR) {
1461 val64 = RXTRAFFIC_INT_M;
1462 if (flag == ENABLE_INTRS) {
1463 temp64 = readq(&bar0->general_int_mask);
1464 temp64 &= ~((u64) val64);
1465 writeq(temp64, &bar0->general_int_mask);
1466 /* writing 0 Enables all 8 RX interrupt levels */
1467 writeq(0x0, &bar0->rx_traffic_mask);
1468 } else if (flag == DISABLE_INTRS) {
20346722
K
1469 /*
1470 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1471 * register.
1472 */
1473 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1474 temp64 = readq(&bar0->general_int_mask);
1475 val64 |= temp64;
1476 writeq(val64, &bar0->general_int_mask);
1477 }
1478 }
1479}
1480
5e25b9dd 1481static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
20346722
K
1482{
1483 int ret = 0;
1484
1485 if (flag == FALSE) {
5e25b9dd
K
1486 if (rev_id >= 4) {
1487 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1488 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1489 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1490 ret = 1;
1491 }
1492 } else {
1493 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1494 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1495 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1496 ret = 1;
1497 }
20346722
K
1498 }
1499 } else {
5e25b9dd
K
1500 if (rev_id >= 4) {
1501 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1502 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1503 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1504 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1505 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1506 ret = 1;
1507 }
1508 } else {
1509 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1510 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1511 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1512 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1513 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1514 ret = 1;
1515 }
20346722
K
1516 }
1517 }
1518
1519 return ret;
1520}
1521/**
1522 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4
LT
1523 * @val64 : Value read from adapter status register.
1524 * @flag : indicates if the adapter enable bit was ever written once
1525 * before.
1526 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1527 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1528 * differs and the calling function passes the input argument flag to
1529 * indicate this.
20346722 1530 * Return: 1 If xena is quiescence
1da177e4
LT
1531 * 0 If Xena is not quiescence
1532 */
1533
20346722 1534static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1da177e4
LT
1535{
1536 int ret = 0;
1537 u64 tmp64 = ~((u64) val64);
5e25b9dd 1538 int rev_id = get_xena_rev_id(sp->pdev);
1da177e4
LT
1539
1540 if (!
1541 (tmp64 &
1542 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1543 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1544 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1545 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1546 ADAPTER_STATUS_P_PLL_LOCK))) {
5e25b9dd 1547 ret = check_prc_pcc_state(val64, flag, rev_id);
1da177e4
LT
1548 }
1549
1550 return ret;
1551}
1552
1553/**
1554 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1555 * @sp: Pointer to device specifc structure
20346722 1556 * Description :
1da177e4
LT
1557 * New procedure to clear mac address reading problems on Alpha platforms
1558 *
1559 */
1560
20346722 1561void fix_mac_address(nic_t * sp)
1da177e4
LT
1562{
1563 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1564 u64 val64;
1565 int i = 0;
1566
1567 while (fix_mac[i] != END_SIGN) {
1568 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1569 udelay(10);
1da177e4
LT
1570 val64 = readq(&bar0->gpio_control);
1571 }
1572}
1573
1574/**
20346722 1575 * start_nic - Turns the device on
1da177e4 1576 * @nic : device private variable.
20346722
K
1577 * Description:
1578 * This function actually turns the device on. Before this function is
1579 * called,all Registers are configured from their reset states
1580 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1581 * calling this function, the device interrupts are cleared and the NIC is
1582 * literally switched on by writing into the adapter control register.
20346722 1583 * Return Value:
1da177e4
LT
1584 * SUCCESS on success and -1 on failure.
1585 */
1586
1587static int start_nic(struct s2io_nic *nic)
1588{
1589 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1590 struct net_device *dev = nic->dev;
1591 register u64 val64 = 0;
20346722
K
1592 u16 interruptible;
1593 u16 subid, i;
1da177e4
LT
1594 mac_info_t *mac_control;
1595 struct config_param *config;
1596
1597 mac_control = &nic->mac_control;
1598 config = &nic->config;
1599
1600 /* PRC Initialization and configuration */
1601 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1602 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1603 &bar0->prc_rxd0_n[i]);
1604
1605 val64 = readq(&bar0->prc_ctrl_n[i]);
1606#ifndef CONFIG_2BUFF_MODE
1607 val64 |= PRC_CTRL_RC_ENABLED;
1608#else
1609 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1610#endif
1611 writeq(val64, &bar0->prc_ctrl_n[i]);
1612 }
1613
1614#ifdef CONFIG_2BUFF_MODE
1615 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1616 val64 = readq(&bar0->rx_pa_cfg);
1617 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1618 writeq(val64, &bar0->rx_pa_cfg);
1619#endif
1620
20346722 1621 /*
1da177e4
LT
1622 * Enabling MC-RLDRAM. After enabling the device, we timeout
1623 * for around 100ms, which is approximately the time required
1624 * for the device to be ready for operation.
1625 */
1626 val64 = readq(&bar0->mc_rldram_mrs);
1627 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1628 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1629 val64 = readq(&bar0->mc_rldram_mrs);
1630
20346722 1631 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1632
1633 /* Enabling ECC Protection. */
1634 val64 = readq(&bar0->adapter_control);
1635 val64 &= ~ADAPTER_ECC_EN;
1636 writeq(val64, &bar0->adapter_control);
1637
20346722
K
1638 /*
1639 * Clearing any possible Link state change interrupts that
1da177e4
LT
1640 * could have popped up just before Enabling the card.
1641 */
1642 val64 = readq(&bar0->mac_rmac_err_reg);
1643 if (val64)
1644 writeq(val64, &bar0->mac_rmac_err_reg);
1645
20346722
K
1646 /*
1647 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
1648 * it.
1649 */
1650 val64 = readq(&bar0->adapter_status);
20346722 1651 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
1652 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1653 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1654 (unsigned long long) val64);
1655 return FAILURE;
1656 }
1657
1658 /* Enable select interrupts */
1659 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
5e25b9dd 1660 RX_MAC_INTR | MC_INTR;
1da177e4
LT
1661 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1662
20346722 1663 /*
1da177e4 1664 * With some switches, link might be already up at this point.
20346722
K
1665 * Because of this weird behavior, when we enable laser,
1666 * we may not get link. We need to handle this. We cannot
1667 * figure out which switch is misbehaving. So we are forced to
1668 * make a global change.
1da177e4
LT
1669 */
1670
1671 /* Enabling Laser. */
1672 val64 = readq(&bar0->adapter_control);
1673 val64 |= ADAPTER_EOI_TX_ON;
1674 writeq(val64, &bar0->adapter_control);
1675
1676 /* SXE-002: Initialize link and activity LED */
1677 subid = nic->pdev->subsystem_device;
1678 if ((subid & 0xFF) >= 0x07) {
1679 val64 = readq(&bar0->gpio_control);
1680 val64 |= 0x0000800000000000ULL;
1681 writeq(val64, &bar0->gpio_control);
1682 val64 = 0x0411040400000000ULL;
20346722 1683 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
1684 }
1685
20346722
K
1686 /*
1687 * Don't see link state interrupts on certain switches, so
1da177e4
LT
1688 * directly scheduling a link state task from here.
1689 */
1690 schedule_work(&nic->set_link_task);
1691
1da177e4
LT
1692 return SUCCESS;
1693}
1694
20346722
K
1695/**
1696 * free_tx_buffers - Free all queued Tx buffers
1da177e4 1697 * @nic : device private variable.
20346722 1698 * Description:
1da177e4 1699 * Free all queued Tx buffers.
20346722 1700 * Return Value: void
1da177e4
LT
1701*/
1702
1703static void free_tx_buffers(struct s2io_nic *nic)
1704{
1705 struct net_device *dev = nic->dev;
1706 struct sk_buff *skb;
1707 TxD_t *txdp;
1708 int i, j;
1709 mac_info_t *mac_control;
1710 struct config_param *config;
1ddc50d4 1711 int cnt = 0, frg_cnt;
1da177e4
LT
1712
1713 mac_control = &nic->mac_control;
1714 config = &nic->config;
1715
1716 for (i = 0; i < config->tx_fifo_num; i++) {
1717 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
20346722 1718 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1da177e4
LT
1719 list_virt_addr;
1720 skb =
1721 (struct sk_buff *) ((unsigned long) txdp->
1722 Host_Control);
1723 if (skb == NULL) {
1ddc50d4
K
1724 memset(txdp, 0, sizeof(TxD_t) *
1725 config->max_txds);
1da177e4
LT
1726 continue;
1727 }
1ddc50d4
K
1728 frg_cnt = skb_shinfo(skb)->nr_frags;
1729 pci_unmap_single(nic->pdev, (dma_addr_t)
1730 txdp->Buffer_Pointer,
1731 skb->len - skb->data_len,
1732 PCI_DMA_TODEVICE);
1733 if (frg_cnt) {
1734 TxD_t *temp;
1735 temp = txdp;
1736 txdp++;
1737 for (j = 0; j < frg_cnt; j++, txdp++) {
1738 skb_frag_t *frag =
1739 &skb_shinfo(skb)->frags[j];
1740 pci_unmap_page(nic->pdev,
1741 (dma_addr_t)
1742 txdp->
1743 Buffer_Pointer,
1744 frag->size,
1745 PCI_DMA_TODEVICE);
1746 }
1747 txdp = temp;
1748 }
1da177e4 1749 dev_kfree_skb(skb);
1ddc50d4 1750 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1da177e4
LT
1751 cnt++;
1752 }
1753 DBG_PRINT(INTR_DBG,
1754 "%s:forcibly freeing %d skbs on FIFO%d\n",
1755 dev->name, cnt, i);
20346722
K
1756 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1757 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
1758 }
1759}
1760
20346722
K
1761/**
1762 * stop_nic - To stop the nic
1da177e4 1763 * @nic ; device private variable.
20346722
K
1764 * Description:
1765 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
1766 * function does. This function is called to stop the device.
1767 * Return Value:
1768 * void.
1769 */
1770
1771static void stop_nic(struct s2io_nic *nic)
1772{
1773 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1774 register u64 val64 = 0;
1775 u16 interruptible, i;
1776 mac_info_t *mac_control;
1777 struct config_param *config;
1778
1779 mac_control = &nic->mac_control;
1780 config = &nic->config;
1781
1782 /* Disable all interrupts */
1783 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
5e25b9dd 1784 RX_MAC_INTR | MC_INTR;
1da177e4
LT
1785 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1786
1787 /* Disable PRCs */
1788 for (i = 0; i < config->rx_ring_num; i++) {
1789 val64 = readq(&bar0->prc_ctrl_n[i]);
1790 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1791 writeq(val64, &bar0->prc_ctrl_n[i]);
1792 }
1793}
1794
20346722
K
1795/**
1796 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 1797 * @nic: device private variable
20346722
K
1798 * @ring_no: ring number
1799 * Description:
1da177e4
LT
1800 * The function allocates Rx side skbs and puts the physical
1801 * address of these buffers into the RxD buffer pointers, so that the NIC
1802 * can DMA the received frame into these locations.
1803 * The NIC supports 3 receive modes, viz
1804 * 1. single buffer,
1805 * 2. three buffer and
1806 * 3. Five buffer modes.
20346722
K
1807 * Each mode defines how many fragments the received frame will be split
1808 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
1809 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1810 * is split into 3 fragments. As of now only single buffer mode is
1811 * supported.
1812 * Return Value:
1813 * SUCCESS on success or an appropriate -ve value on failure.
1814 */
1815
20346722 1816int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
1817{
1818 struct net_device *dev = nic->dev;
1819 struct sk_buff *skb;
1820 RxD_t *rxdp;
1821 int off, off1, size, block_no, block_no1;
1822 int offset, offset1;
1823 u32 alloc_tab = 0;
20346722 1824 u32 alloc_cnt;
1da177e4
LT
1825 mac_info_t *mac_control;
1826 struct config_param *config;
1827#ifdef CONFIG_2BUFF_MODE
1828 RxD_t *rxdpnext;
1829 int nextblk;
20346722 1830 u64 tmp;
1da177e4
LT
1831 buffAdd_t *ba;
1832 dma_addr_t rxdpphys;
1833#endif
1834#ifndef CONFIG_S2IO_NAPI
1835 unsigned long flags;
1836#endif
1837
1838 mac_control = &nic->mac_control;
1839 config = &nic->config;
20346722
K
1840 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1841 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4
LT
1842 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1843 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1844
1845 while (alloc_tab < alloc_cnt) {
20346722 1846 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 1847 block_index;
20346722 1848 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1da177e4 1849 block_index;
20346722
K
1850 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1851 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4
LT
1852#ifndef CONFIG_2BUFF_MODE
1853 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1854 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1855#else
1856 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1857 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1858#endif
1859
20346722 1860 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
1861 block_virt_addr + off;
1862 if ((offset == offset1) && (rxdp->Host_Control)) {
1863 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1864 DBG_PRINT(INTR_DBG, " info equated\n");
1865 goto end;
1866 }
1867#ifndef CONFIG_2BUFF_MODE
1868 if (rxdp->Control_1 == END_OF_BLOCK) {
20346722 1869 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 1870 block_index++;
20346722
K
1871 mac_control->rings[ring_no].rx_curr_put_info.
1872 block_index %= mac_control->rings[ring_no].block_count;
1873 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1874 block_index;
1da177e4
LT
1875 off++;
1876 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 1877 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4
LT
1878 off;
1879 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1880 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1881 dev->name, rxdp);
1882 }
1883#ifndef CONFIG_S2IO_NAPI
1884 spin_lock_irqsave(&nic->put_lock, flags);
20346722 1885 mac_control->rings[ring_no].put_pos =
1da177e4
LT
1886 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1887 spin_unlock_irqrestore(&nic->put_lock, flags);
1888#endif
1889#else
1890 if (rxdp->Host_Control == END_OF_BLOCK) {
20346722 1891 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 1892 block_index++;
20346722
K
1893 mac_control->rings[ring_no].rx_curr_put_info.block_index
1894 %= mac_control->rings[ring_no].block_count;
1895 block_no = mac_control->rings[ring_no].rx_curr_put_info
1896 .block_index;
1da177e4
LT
1897 off = 0;
1898 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1899 dev->name, block_no,
1900 (unsigned long long) rxdp->Control_1);
20346722 1901 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4 1902 off;
20346722 1903 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
1904 block_virt_addr;
1905 }
1906#ifndef CONFIG_S2IO_NAPI
1907 spin_lock_irqsave(&nic->put_lock, flags);
20346722 1908 mac_control->rings[ring_no].put_pos = (block_no *
1da177e4
LT
1909 (MAX_RXDS_PER_BLOCK + 1)) + off;
1910 spin_unlock_irqrestore(&nic->put_lock, flags);
1911#endif
1912#endif
1913
1914#ifndef CONFIG_2BUFF_MODE
1915 if (rxdp->Control_1 & RXD_OWN_XENA)
1916#else
1917 if (rxdp->Control_2 & BIT(0))
1918#endif
1919 {
20346722 1920 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4
LT
1921 offset = off;
1922 goto end;
1923 }
1924#ifdef CONFIG_2BUFF_MODE
20346722
K
1925 /*
1926 * RxDs Spanning cache lines will be replenished only
1927 * if the succeeding RxD is also owned by Host. It
1928 * will always be the ((8*i)+3) and ((8*i)+6)
1929 * descriptors for the 48 byte descriptor. The offending
1da177e4
LT
1930 * decsriptor is of-course the 3rd descriptor.
1931 */
20346722 1932 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
1933 block_dma_addr + (off * sizeof(RxD_t));
1934 if (((u64) (rxdpphys)) % 128 > 80) {
20346722 1935 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
1936 block_virt_addr + (off + 1);
1937 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1938 nextblk = (block_no + 1) %
20346722
K
1939 (mac_control->rings[ring_no].block_count);
1940 rxdpnext = mac_control->rings[ring_no].rx_blocks
1da177e4
LT
1941 [nextblk].block_virt_addr;
1942 }
1943 if (rxdpnext->Control_2 & BIT(0))
1944 goto end;
1945 }
1946#endif
1947
1948#ifndef CONFIG_2BUFF_MODE
1949 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1950#else
1951 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1952#endif
1953 if (!skb) {
1954 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1955 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1956 return -ENOMEM;
1957 }
1958#ifndef CONFIG_2BUFF_MODE
1959 skb_reserve(skb, NET_IP_ALIGN);
1960 memset(rxdp, 0, sizeof(RxD_t));
1961 rxdp->Buffer0_ptr = pci_map_single
1962 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1963 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1964 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1965 rxdp->Host_Control = (unsigned long) (skb);
1966 rxdp->Control_1 |= RXD_OWN_XENA;
1967 off++;
1968 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 1969 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 1970#else
20346722 1971 ba = &mac_control->rings[ring_no].ba[block_no][off];
1da177e4 1972 skb_reserve(skb, BUF0_LEN);
689be439
DM
1973 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1974 if (tmp)
1975 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1da177e4
LT
1976
1977 memset(rxdp, 0, sizeof(RxD_t));
1978 rxdp->Buffer2_ptr = pci_map_single
1979 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1980 PCI_DMA_FROMDEVICE);
1981 rxdp->Buffer0_ptr =
1982 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1983 PCI_DMA_FROMDEVICE);
1984 rxdp->Buffer1_ptr =
1985 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1986 PCI_DMA_FROMDEVICE);
1987
1988 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1989 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1990 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1991 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1992 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1993 rxdp->Control_1 |= RXD_OWN_XENA;
1994 off++;
20346722 1995 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 1996#endif
5e25b9dd 1997 rxdp->Control_2 |= SET_RXD_MARKER;
20346722 1998
1da177e4
LT
1999 atomic_inc(&nic->rx_bufs_left[ring_no]);
2000 alloc_tab++;
2001 }
2002
2003 end:
2004 return SUCCESS;
2005}
2006
2007/**
20346722 2008 * free_rx_buffers - Frees all Rx buffers
1da177e4 2009 * @sp: device private variable.
20346722 2010 * Description:
1da177e4
LT
2011 * This function will free all Rx buffers allocated by host.
2012 * Return Value:
2013 * NONE.
2014 */
2015
2016static void free_rx_buffers(struct s2io_nic *sp)
2017{
2018 struct net_device *dev = sp->dev;
2019 int i, j, blk = 0, off, buf_cnt = 0;
2020 RxD_t *rxdp;
2021 struct sk_buff *skb;
2022 mac_info_t *mac_control;
2023 struct config_param *config;
2024#ifdef CONFIG_2BUFF_MODE
2025 buffAdd_t *ba;
2026#endif
2027
2028 mac_control = &sp->mac_control;
2029 config = &sp->config;
2030
2031 for (i = 0; i < config->rx_ring_num; i++) {
2032 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2033 off = j % (MAX_RXDS_PER_BLOCK + 1);
20346722
K
2034 rxdp = mac_control->rings[i].rx_blocks[blk].
2035 block_virt_addr + off;
1da177e4
LT
2036
2037#ifndef CONFIG_2BUFF_MODE
2038 if (rxdp->Control_1 == END_OF_BLOCK) {
2039 rxdp =
2040 (RxD_t *) ((unsigned long) rxdp->
2041 Control_2);
2042 j++;
2043 blk++;
2044 }
2045#else
2046 if (rxdp->Host_Control == END_OF_BLOCK) {
2047 blk++;
2048 continue;
2049 }
2050#endif
2051
2052 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2053 memset(rxdp, 0, sizeof(RxD_t));
2054 continue;
2055 }
2056
2057 skb =
2058 (struct sk_buff *) ((unsigned long) rxdp->
2059 Host_Control);
2060 if (skb) {
2061#ifndef CONFIG_2BUFF_MODE
2062 pci_unmap_single(sp->pdev, (dma_addr_t)
2063 rxdp->Buffer0_ptr,
2064 dev->mtu +
2065 HEADER_ETHERNET_II_802_3_SIZE
2066 + HEADER_802_2_SIZE +
2067 HEADER_SNAP_SIZE,
2068 PCI_DMA_FROMDEVICE);
2069#else
20346722 2070 ba = &mac_control->rings[i].ba[blk][off];
1da177e4
LT
2071 pci_unmap_single(sp->pdev, (dma_addr_t)
2072 rxdp->Buffer0_ptr,
2073 BUF0_LEN,
2074 PCI_DMA_FROMDEVICE);
2075 pci_unmap_single(sp->pdev, (dma_addr_t)
2076 rxdp->Buffer1_ptr,
2077 BUF1_LEN,
2078 PCI_DMA_FROMDEVICE);
2079 pci_unmap_single(sp->pdev, (dma_addr_t)
2080 rxdp->Buffer2_ptr,
2081 dev->mtu + BUF0_LEN + 4,
2082 PCI_DMA_FROMDEVICE);
2083#endif
2084 dev_kfree_skb(skb);
2085 atomic_dec(&sp->rx_bufs_left[i]);
2086 buf_cnt++;
2087 }
2088 memset(rxdp, 0, sizeof(RxD_t));
2089 }
20346722
K
2090 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2091 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2092 mac_control->rings[i].rx_curr_put_info.offset = 0;
2093 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2094 atomic_set(&sp->rx_bufs_left[i], 0);
2095 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2096 dev->name, buf_cnt, i);
2097 }
2098}
2099
2100/**
2101 * s2io_poll - Rx interrupt handler for NAPI support
2102 * @dev : pointer to the device structure.
20346722 2103 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2104 * during one pass through the 'Poll" function.
2105 * Description:
2106 * Comes into picture only if NAPI support has been incorporated. It does
2107 * the same thing that rx_intr_handler does, but not in a interrupt context
2108 * also It will process only a given number of packets.
2109 * Return value:
2110 * 0 on success and 1 if there are No Rx packets to be processed.
2111 */
2112
20346722 2113#if defined(CONFIG_S2IO_NAPI)
1da177e4
LT
2114static int s2io_poll(struct net_device *dev, int *budget)
2115{
2116 nic_t *nic = dev->priv;
20346722 2117 int pkt_cnt = 0, org_pkts_to_process;
1da177e4
LT
2118 mac_info_t *mac_control;
2119 struct config_param *config;
20346722
K
2120 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2121 u64 val64;
2122 int i;
1da177e4 2123
7ba013ac 2124 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2125 mac_control = &nic->mac_control;
2126 config = &nic->config;
2127
20346722
K
2128 nic->pkts_to_process = *budget;
2129 if (nic->pkts_to_process > dev->quota)
2130 nic->pkts_to_process = dev->quota;
2131 org_pkts_to_process = nic->pkts_to_process;
1da177e4
LT
2132
2133 val64 = readq(&bar0->rx_traffic_int);
2134 writeq(val64, &bar0->rx_traffic_int);
2135
2136 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2137 rx_intr_handler(&mac_control->rings[i]);
2138 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2139 if (!nic->pkts_to_process) {
2140 /* Quota for the current iteration has been met */
2141 goto no_rx;
1da177e4 2142 }
1da177e4
LT
2143 }
2144 if (!pkt_cnt)
2145 pkt_cnt = 1;
2146
2147 dev->quota -= pkt_cnt;
2148 *budget -= pkt_cnt;
2149 netif_rx_complete(dev);
2150
2151 for (i = 0; i < config->rx_ring_num; i++) {
2152 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2153 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2154 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2155 break;
2156 }
2157 }
2158 /* Re enable the Rx interrupts. */
2159 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
7ba013ac 2160 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2161 return 0;
2162
20346722 2163no_rx:
1da177e4
LT
2164 dev->quota -= pkt_cnt;
2165 *budget -= pkt_cnt;
2166
2167 for (i = 0; i < config->rx_ring_num; i++) {
2168 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2169 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2170 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2171 break;
2172 }
2173 }
7ba013ac 2174 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2175 return 1;
2176}
20346722
K
2177#endif
2178
2179/**
1da177e4
LT
2180 * rx_intr_handler - Rx interrupt handler
2181 * @nic: device private variable.
20346722
K
2182 * Description:
2183 * If the interrupt is because of a received frame or if the
1da177e4 2184 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2185 * called. It picks out the RxD at which place the last Rx processing had
2186 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2187 * the offset.
2188 * Return Value:
2189 * NONE.
2190 */
20346722 2191static void rx_intr_handler(ring_info_t *ring_data)
1da177e4 2192{
20346722 2193 nic_t *nic = ring_data->nic;
1da177e4 2194 struct net_device *dev = (struct net_device *) nic->dev;
20346722 2195 int get_block, get_offset, put_block, put_offset, ring_bufs;
1da177e4
LT
2196 rx_curr_get_info_t get_info, put_info;
2197 RxD_t *rxdp;
2198 struct sk_buff *skb;
20346722
K
2199#ifndef CONFIG_S2IO_NAPI
2200 int pkt_cnt = 0;
1da177e4 2201#endif
7ba013ac
K
2202 spin_lock(&nic->rx_lock);
2203 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2204 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2205 __FUNCTION__, dev->name);
2206 spin_unlock(&nic->rx_lock);
2207 }
2208
20346722
K
2209 get_info = ring_data->rx_curr_get_info;
2210 get_block = get_info.block_index;
2211 put_info = ring_data->rx_curr_put_info;
2212 put_block = put_info.block_index;
2213 ring_bufs = get_info.ring_len+1;
2214 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
1da177e4 2215 get_info.offset;
20346722
K
2216 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2217 get_info.offset;
2218#ifndef CONFIG_S2IO_NAPI
2219 spin_lock(&nic->put_lock);
2220 put_offset = ring_data->put_pos;
2221 spin_unlock(&nic->put_lock);
2222#else
2223 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2224 put_info.offset;
2225#endif
5e25b9dd
K
2226 while (RXD_IS_UP2DT(rxdp) &&
2227 (((get_offset + 1) % ring_bufs) != put_offset)) {
20346722
K
2228 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2229 if (skb == NULL) {
2230 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2231 dev->name);
2232 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2233 spin_unlock(&nic->rx_lock);
20346722 2234 return;
1da177e4 2235 }
20346722
K
2236#ifndef CONFIG_2BUFF_MODE
2237 pci_unmap_single(nic->pdev, (dma_addr_t)
2238 rxdp->Buffer0_ptr,
2239 dev->mtu +
2240 HEADER_ETHERNET_II_802_3_SIZE +
2241 HEADER_802_2_SIZE +
2242 HEADER_SNAP_SIZE,
2243 PCI_DMA_FROMDEVICE);
1da177e4 2244#else
20346722
K
2245 pci_unmap_single(nic->pdev, (dma_addr_t)
2246 rxdp->Buffer0_ptr,
2247 BUF0_LEN, PCI_DMA_FROMDEVICE);
2248 pci_unmap_single(nic->pdev, (dma_addr_t)
2249 rxdp->Buffer1_ptr,
2250 BUF1_LEN, PCI_DMA_FROMDEVICE);
2251 pci_unmap_single(nic->pdev, (dma_addr_t)
2252 rxdp->Buffer2_ptr,
2253 dev->mtu + BUF0_LEN + 4,
2254 PCI_DMA_FROMDEVICE);
2255#endif
2256 rx_osm_handler(ring_data, rxdp);
2257 get_info.offset++;
2258 ring_data->rx_curr_get_info.offset =
1da177e4 2259 get_info.offset;
20346722
K
2260 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2261 get_info.offset;
2262 if (get_info.offset &&
2263 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2264 get_info.offset = 0;
2265 ring_data->rx_curr_get_info.offset
2266 = get_info.offset;
2267 get_block++;
2268 get_block %= ring_data->block_count;
2269 ring_data->rx_curr_get_info.block_index
2270 = get_block;
2271 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2272 }
1da177e4 2273
20346722 2274 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1da177e4 2275 get_info.offset;
20346722
K
2276#ifdef CONFIG_S2IO_NAPI
2277 nic->pkts_to_process -= 1;
2278 if (!nic->pkts_to_process)
2279 break;
2280#else
2281 pkt_cnt++;
1da177e4
LT
2282 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2283 break;
20346722 2284#endif
1da177e4 2285 }
7ba013ac 2286 spin_unlock(&nic->rx_lock);
1da177e4 2287}
20346722
K
2288
2289/**
1da177e4
LT
2290 * tx_intr_handler - Transmit interrupt handler
2291 * @nic : device private variable
20346722
K
2292 * Description:
2293 * If an interrupt was raised to indicate DMA complete of the
2294 * Tx packet, this function is called. It identifies the last TxD
2295 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2296 * DMA'ed into the NICs internal memory.
2297 * Return Value:
2298 * NONE
2299 */
2300
20346722 2301static void tx_intr_handler(fifo_info_t *fifo_data)
1da177e4 2302{
20346722 2303 nic_t *nic = fifo_data->nic;
1da177e4
LT
2304 struct net_device *dev = (struct net_device *) nic->dev;
2305 tx_curr_get_info_t get_info, put_info;
2306 struct sk_buff *skb;
2307 TxD_t *txdlp;
1da177e4 2308 u16 j, frg_cnt;
1da177e4 2309
20346722
K
2310 get_info = fifo_data->tx_curr_get_info;
2311 put_info = fifo_data->tx_curr_put_info;
2312 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2313 list_virt_addr;
2314 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2315 (get_info.offset != put_info.offset) &&
2316 (txdlp->Host_Control)) {
2317 /* Check for TxD errors */
2318 if (txdlp->Control_1 & TXD_T_CODE) {
2319 unsigned long long err;
2320 err = txdlp->Control_1 & TXD_T_CODE;
2321 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2322 err);
2323 }
1da177e4 2324
20346722
K
2325 skb = (struct sk_buff *) ((unsigned long)
2326 txdlp->Host_Control);
2327 if (skb == NULL) {
2328 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2329 __FUNCTION__);
2330 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2331 return;
2332 }
2333
2334 frg_cnt = skb_shinfo(skb)->nr_frags;
2335 nic->tx_pkt_count++;
2336
2337 pci_unmap_single(nic->pdev, (dma_addr_t)
2338 txdlp->Buffer_Pointer,
2339 skb->len - skb->data_len,
2340 PCI_DMA_TODEVICE);
2341 if (frg_cnt) {
2342 TxD_t *temp;
2343 temp = txdlp;
2344 txdlp++;
2345 for (j = 0; j < frg_cnt; j++, txdlp++) {
2346 skb_frag_t *frag =
2347 &skb_shinfo(skb)->frags[j];
2348 pci_unmap_page(nic->pdev,
2349 (dma_addr_t)
2350 txdlp->
2351 Buffer_Pointer,
2352 frag->size,
2353 PCI_DMA_TODEVICE);
1da177e4 2354 }
20346722 2355 txdlp = temp;
1da177e4 2356 }
20346722
K
2357 memset(txdlp, 0,
2358 (sizeof(TxD_t) * fifo_data->max_txds));
2359
2360 /* Updating the statistics block */
20346722
K
2361 nic->stats.tx_bytes += skb->len;
2362 dev_kfree_skb_irq(skb);
2363
2364 get_info.offset++;
2365 get_info.offset %= get_info.fifo_len + 1;
2366 txdlp = (TxD_t *) fifo_data->list_info
2367 [get_info.offset].list_virt_addr;
2368 fifo_data->tx_curr_get_info.offset =
2369 get_info.offset;
1da177e4
LT
2370 }
2371
2372 spin_lock(&nic->tx_lock);
2373 if (netif_queue_stopped(dev))
2374 netif_wake_queue(dev);
2375 spin_unlock(&nic->tx_lock);
2376}
2377
20346722 2378/**
1da177e4
LT
2379 * alarm_intr_handler - Alarm Interrrupt handler
2380 * @nic: device private variable
20346722 2381 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 2382 * complete, this function is called. If the interrupt was to indicate
20346722
K
2383 * a loss of link, the OSM link status handler is invoked for any other
2384 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
2385 * and a H/W reset is issued.
2386 * Return Value:
2387 * NONE
2388*/
2389
2390static void alarm_intr_handler(struct s2io_nic *nic)
2391{
2392 struct net_device *dev = (struct net_device *) nic->dev;
2393 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2394 register u64 val64 = 0, err_reg = 0;
2395
2396 /* Handling link status change error Intr */
2397 err_reg = readq(&bar0->mac_rmac_err_reg);
2398 writeq(err_reg, &bar0->mac_rmac_err_reg);
2399 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2400 schedule_work(&nic->set_link_task);
2401 }
2402
5e25b9dd
K
2403 /* Handling Ecc errors */
2404 val64 = readq(&bar0->mc_err_reg);
2405 writeq(val64, &bar0->mc_err_reg);
2406 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2407 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac
K
2408 nic->mac_control.stats_info->sw_stat.
2409 double_ecc_errs++;
5e25b9dd
K
2410 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2411 dev->name);
2412 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2413 netif_stop_queue(dev);
2414 schedule_work(&nic->rst_timer_task);
2415 } else {
7ba013ac
K
2416 nic->mac_control.stats_info->sw_stat.
2417 single_ecc_errs++;
5e25b9dd
K
2418 }
2419 }
2420
1da177e4
LT
2421 /* In case of a serious error, the device will be Reset. */
2422 val64 = readq(&bar0->serr_source);
2423 if (val64 & SERR_SOURCE_ANY) {
2424 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2425 DBG_PRINT(ERR_DBG, "serious error!!\n");
2426 netif_stop_queue(dev);
2427 schedule_work(&nic->rst_timer_task);
2428 }
2429
2430 /*
2431 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2432 * Error occurs, the adapter will be recycled by disabling the
20346722 2433 * adapter enable bit and enabling it again after the device
1da177e4
LT
2434 * becomes Quiescent.
2435 */
2436 val64 = readq(&bar0->pcc_err_reg);
2437 writeq(val64, &bar0->pcc_err_reg);
2438 if (val64 & PCC_FB_ECC_DB_ERR) {
2439 u64 ac = readq(&bar0->adapter_control);
2440 ac &= ~(ADAPTER_CNTL_EN);
2441 writeq(ac, &bar0->adapter_control);
2442 ac = readq(&bar0->adapter_control);
2443 schedule_work(&nic->set_link_task);
2444 }
2445
2446 /* Other type of interrupts are not being handled now, TODO */
2447}
2448
20346722 2449/**
1da177e4 2450 * wait_for_cmd_complete - waits for a command to complete.
20346722 2451 * @sp : private member of the device structure, which is a pointer to the
1da177e4 2452 * s2io_nic structure.
20346722
K
2453 * Description: Function that waits for a command to Write into RMAC
2454 * ADDR DATA registers to be completed and returns either success or
2455 * error depending on whether the command was complete or not.
1da177e4
LT
2456 * Return value:
2457 * SUCCESS on success and FAILURE on failure.
2458 */
2459
20346722 2460int wait_for_cmd_complete(nic_t * sp)
1da177e4
LT
2461{
2462 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2463 int ret = FAILURE, cnt = 0;
2464 u64 val64;
2465
2466 while (TRUE) {
2467 val64 = readq(&bar0->rmac_addr_cmd_mem);
2468 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2469 ret = SUCCESS;
2470 break;
2471 }
2472 msleep(50);
2473 if (cnt++ > 10)
2474 break;
2475 }
2476
2477 return ret;
2478}
2479
20346722
K
2480/**
2481 * s2io_reset - Resets the card.
1da177e4
LT
2482 * @sp : private member of the device structure.
2483 * Description: Function to Reset the card. This function then also
20346722 2484 * restores the previously saved PCI configuration space registers as
1da177e4
LT
2485 * the card reset also resets the configuration space.
2486 * Return value:
2487 * void.
2488 */
2489
20346722 2490void s2io_reset(nic_t * sp)
1da177e4
LT
2491{
2492 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2493 u64 val64;
5e25b9dd 2494 u16 subid, pci_cmd;
1da177e4
LT
2495
2496 val64 = SW_RESET_ALL;
2497 writeq(val64, &bar0->sw_reset);
2498
20346722
K
2499 /*
2500 * At this stage, if the PCI write is indeed completed, the
2501 * card is reset and so is the PCI Config space of the device.
2502 * So a read cannot be issued at this stage on any of the
1da177e4
LT
2503 * registers to ensure the write into "sw_reset" register
2504 * has gone through.
2505 * Question: Is there any system call that will explicitly force
2506 * all the write commands still pending on the bus to be pushed
2507 * through?
2508 * As of now I'am just giving a 250ms delay and hoping that the
2509 * PCI write to sw_reset register is done by this time.
2510 */
2511 msleep(250);
2512
2513 /* Restore the PCI state saved during initializarion. */
2514 pci_restore_state(sp->pdev);
20346722 2515
1da177e4
LT
2516 s2io_init_pci(sp);
2517
2518 msleep(250);
2519
20346722
K
2520 /* Set swapper to enable I/O register access */
2521 s2io_set_swapper(sp);
2522
5e25b9dd
K
2523 /* Clear certain PCI/PCI-X fields after reset */
2524 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2525 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2526 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2527
2528 val64 = readq(&bar0->txpic_int_reg);
2529 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2530 writeq(val64, &bar0->txpic_int_reg);
2531
2532 /* Clearing PCIX Ecc status register */
2533 pci_write_config_dword(sp->pdev, 0x68, 0);
2534
20346722
K
2535 /* Reset device statistics maintained by OS */
2536 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2537
1da177e4
LT
2538 /* SXE-002: Configure link and activity LED to turn it off */
2539 subid = sp->pdev->subsystem_device;
2540 if ((subid & 0xFF) >= 0x07) {
2541 val64 = readq(&bar0->gpio_control);
2542 val64 |= 0x0000800000000000ULL;
2543 writeq(val64, &bar0->gpio_control);
2544 val64 = 0x0411040400000000ULL;
20346722 2545 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
2546 }
2547
2548 sp->device_enabled_once = FALSE;
2549}
2550
2551/**
20346722
K
2552 * s2io_set_swapper - to set the swapper controle on the card
2553 * @sp : private member of the device structure,
1da177e4 2554 * pointer to the s2io_nic structure.
20346722 2555 * Description: Function to set the swapper control on the card
1da177e4
LT
2556 * correctly depending on the 'endianness' of the system.
2557 * Return value:
2558 * SUCCESS on success and FAILURE on failure.
2559 */
2560
20346722 2561int s2io_set_swapper(nic_t * sp)
1da177e4
LT
2562{
2563 struct net_device *dev = sp->dev;
2564 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2565 u64 val64, valt, valr;
2566
20346722 2567 /*
1da177e4
LT
2568 * Set proper endian settings and verify the same by reading
2569 * the PIF Feed-back register.
2570 */
2571
2572 val64 = readq(&bar0->pif_rd_swapper_fb);
2573 if (val64 != 0x0123456789ABCDEFULL) {
2574 int i = 0;
2575 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2576 0x8100008181000081ULL, /* FE=1, SE=0 */
2577 0x4200004242000042ULL, /* FE=0, SE=1 */
2578 0}; /* FE=0, SE=0 */
2579
2580 while(i<4) {
2581 writeq(value[i], &bar0->swapper_ctrl);
2582 val64 = readq(&bar0->pif_rd_swapper_fb);
2583 if (val64 == 0x0123456789ABCDEFULL)
2584 break;
2585 i++;
2586 }
2587 if (i == 4) {
2588 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2589 dev->name);
2590 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2591 (unsigned long long) val64);
2592 return FAILURE;
2593 }
2594 valr = value[i];
2595 } else {
2596 valr = readq(&bar0->swapper_ctrl);
2597 }
2598
2599 valt = 0x0123456789ABCDEFULL;
2600 writeq(valt, &bar0->xmsi_address);
2601 val64 = readq(&bar0->xmsi_address);
2602
2603 if(val64 != valt) {
2604 int i = 0;
2605 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2606 0x0081810000818100ULL, /* FE=1, SE=0 */
2607 0x0042420000424200ULL, /* FE=0, SE=1 */
2608 0}; /* FE=0, SE=0 */
2609
2610 while(i<4) {
2611 writeq((value[i] | valr), &bar0->swapper_ctrl);
2612 writeq(valt, &bar0->xmsi_address);
2613 val64 = readq(&bar0->xmsi_address);
2614 if(val64 == valt)
2615 break;
2616 i++;
2617 }
2618 if(i == 4) {
20346722 2619 unsigned long long x = val64;
1da177e4 2620 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 2621 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
2622 return FAILURE;
2623 }
2624 }
2625 val64 = readq(&bar0->swapper_ctrl);
2626 val64 &= 0xFFFF000000000000ULL;
2627
2628#ifdef __BIG_ENDIAN
20346722
K
2629 /*
2630 * The device by default set to a big endian format, so a
1da177e4
LT
2631 * big endian driver need not set anything.
2632 */
2633 val64 |= (SWAPPER_CTRL_TXP_FE |
2634 SWAPPER_CTRL_TXP_SE |
2635 SWAPPER_CTRL_TXD_R_FE |
2636 SWAPPER_CTRL_TXD_W_FE |
2637 SWAPPER_CTRL_TXF_R_FE |
2638 SWAPPER_CTRL_RXD_R_FE |
2639 SWAPPER_CTRL_RXD_W_FE |
2640 SWAPPER_CTRL_RXF_W_FE |
2641 SWAPPER_CTRL_XMSI_FE |
2642 SWAPPER_CTRL_XMSI_SE |
2643 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2644 writeq(val64, &bar0->swapper_ctrl);
2645#else
20346722 2646 /*
1da177e4 2647 * Initially we enable all bits to make it accessible by the
20346722 2648 * driver, then we selectively enable only those bits that
1da177e4
LT
2649 * we want to set.
2650 */
2651 val64 |= (SWAPPER_CTRL_TXP_FE |
2652 SWAPPER_CTRL_TXP_SE |
2653 SWAPPER_CTRL_TXD_R_FE |
2654 SWAPPER_CTRL_TXD_R_SE |
2655 SWAPPER_CTRL_TXD_W_FE |
2656 SWAPPER_CTRL_TXD_W_SE |
2657 SWAPPER_CTRL_TXF_R_FE |
2658 SWAPPER_CTRL_RXD_R_FE |
2659 SWAPPER_CTRL_RXD_R_SE |
2660 SWAPPER_CTRL_RXD_W_FE |
2661 SWAPPER_CTRL_RXD_W_SE |
2662 SWAPPER_CTRL_RXF_W_FE |
2663 SWAPPER_CTRL_XMSI_FE |
2664 SWAPPER_CTRL_XMSI_SE |
2665 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2666 writeq(val64, &bar0->swapper_ctrl);
2667#endif
2668 val64 = readq(&bar0->swapper_ctrl);
2669
20346722
K
2670 /*
2671 * Verifying if endian settings are accurate by reading a
1da177e4
LT
2672 * feedback register.
2673 */
2674 val64 = readq(&bar0->pif_rd_swapper_fb);
2675 if (val64 != 0x0123456789ABCDEFULL) {
2676 /* Endian settings are incorrect, calls for another dekko. */
2677 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2678 dev->name);
2679 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2680 (unsigned long long) val64);
2681 return FAILURE;
2682 }
2683
2684 return SUCCESS;
2685}
2686
2687/* ********************************************************* *
2688 * Functions defined below concern the OS part of the driver *
2689 * ********************************************************* */
2690
20346722 2691/**
1da177e4
LT
2692 * s2io_open - open entry point of the driver
2693 * @dev : pointer to the device structure.
2694 * Description:
2695 * This function is the open entry point of the driver. It mainly calls a
2696 * function to allocate Rx buffers and inserts them into the buffer
20346722 2697 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
2698 * Return value:
2699 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2700 * file on failure.
2701 */
2702
20346722 2703int s2io_open(struct net_device *dev)
1da177e4
LT
2704{
2705 nic_t *sp = dev->priv;
2706 int err = 0;
2707
20346722
K
2708 /*
2709 * Make sure you have link off by default every time
1da177e4
LT
2710 * Nic is initialized
2711 */
2712 netif_carrier_off(dev);
7ba013ac 2713 sp->last_link_state = 0; /* Unkown link state */
1da177e4
LT
2714
2715 /* Initialize H/W and enable interrupts */
2716 if (s2io_card_up(sp)) {
2717 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2718 dev->name);
20346722
K
2719 err = -ENODEV;
2720 goto hw_init_failed;
1da177e4
LT
2721 }
2722
2723 /* After proper initialization of H/W, register ISR */
20346722 2724 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
1da177e4
LT
2725 sp->name, dev);
2726 if (err) {
1da177e4
LT
2727 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2728 dev->name);
20346722 2729 goto isr_registration_failed;
1da177e4
LT
2730 }
2731
2732 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2733 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
20346722
K
2734 err = -ENODEV;
2735 goto setting_mac_address_failed;
1da177e4
LT
2736 }
2737
2738 netif_start_queue(dev);
2739 return 0;
20346722
K
2740
2741setting_mac_address_failed:
2742 free_irq(sp->pdev->irq, dev);
2743isr_registration_failed:
2744 s2io_reset(sp);
2745hw_init_failed:
2746 return err;
1da177e4
LT
2747}
2748
2749/**
2750 * s2io_close -close entry point of the driver
2751 * @dev : device pointer.
2752 * Description:
2753 * This is the stop entry point of the driver. It needs to undo exactly
2754 * whatever was done by the open entry point,thus it's usually referred to
2755 * as the close function.Among other things this function mainly stops the
2756 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2757 * Return value:
2758 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2759 * file on failure.
2760 */
2761
20346722 2762int s2io_close(struct net_device *dev)
1da177e4
LT
2763{
2764 nic_t *sp = dev->priv;
1da177e4
LT
2765 flush_scheduled_work();
2766 netif_stop_queue(dev);
2767 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2768 s2io_card_down(sp);
2769
20346722 2770 free_irq(sp->pdev->irq, dev);
1da177e4
LT
2771 sp->device_close_flag = TRUE; /* Device is shut down. */
2772 return 0;
2773}
2774
2775/**
2776 * s2io_xmit - Tx entry point of te driver
2777 * @skb : the socket buffer containing the Tx data.
2778 * @dev : device pointer.
2779 * Description :
2780 * This function is the Tx entry point of the driver. S2IO NIC supports
2781 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2782 * NOTE: when device cant queue the pkt,just the trans_start variable will
2783 * not be upadted.
2784 * Return value:
2785 * 0 on success & 1 on failure.
2786 */
2787
20346722 2788int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
2789{
2790 nic_t *sp = dev->priv;
2791 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2792 register u64 val64;
2793 TxD_t *txdp;
2794 TxFIFO_element_t __iomem *tx_fifo;
2795 unsigned long flags;
2796#ifdef NETIF_F_TSO
2797 int mss;
2798#endif
2799 mac_info_t *mac_control;
2800 struct config_param *config;
1da177e4
LT
2801
2802 mac_control = &sp->mac_control;
2803 config = &sp->config;
2804
20346722 2805 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 2806 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 2807 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 2808 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
2809 dev->name);
2810 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722
K
2811 dev_kfree_skb(skb);
2812 return 0;
1da177e4
LT
2813 }
2814
2815 queue = 0;
1da177e4 2816
20346722
K
2817 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2818 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2819 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2820 list_virt_addr;
2821
2822 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4
LT
2823 /* Avoid "put" pointer going beyond "get" pointer */
2824 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2825 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2826 netif_stop_queue(dev);
2827 dev_kfree_skb(skb);
2828 spin_unlock_irqrestore(&sp->tx_lock, flags);
2829 return 0;
2830 }
2831#ifdef NETIF_F_TSO
2832 mss = skb_shinfo(skb)->tso_size;
2833 if (mss) {
2834 txdp->Control_1 |= TXD_TCP_LSO_EN;
2835 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2836 }
2837#endif
2838
2839 frg_cnt = skb_shinfo(skb)->nr_frags;
2840 frg_len = skb->len - skb->data_len;
2841
1da177e4
LT
2842 txdp->Buffer_Pointer = pci_map_single
2843 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
20346722 2844 txdp->Host_Control = (unsigned long) skb;
1da177e4
LT
2845 if (skb->ip_summed == CHECKSUM_HW) {
2846 txdp->Control_2 |=
2847 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2848 TXD_TX_CKO_UDP_EN);
2849 }
2850
2851 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 2852
1da177e4
LT
2853 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2854 TXD_GATHER_CODE_FIRST);
2855 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2856
2857 /* For fragmented SKB. */
2858 for (i = 0; i < frg_cnt; i++) {
2859 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2860 txdp++;
2861 txdp->Buffer_Pointer = (u64) pci_map_page
2862 (sp->pdev, frag->page, frag->page_offset,
2863 frag->size, PCI_DMA_TODEVICE);
2864 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2865 }
2866 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2867
2868 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 2869 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
2870 writeq(val64, &tx_fifo->TxDL_Pointer);
2871
fe113638
K
2872 wmb();
2873
1da177e4
LT
2874 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2875 TX_FIFO_LAST_LIST);
20346722 2876
1da177e4
LT
2877#ifdef NETIF_F_TSO
2878 if (mss)
2879 val64 |= TX_FIFO_SPECIAL_FUNC;
2880#endif
2881 writeq(val64, &tx_fifo->List_Control);
2882
1da177e4 2883 put_off++;
20346722
K
2884 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2885 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
2886
2887 /* Avoid "put" pointer going beyond "get" pointer */
2888 if (((put_off + 1) % queue_len) == get_off) {
2889 DBG_PRINT(TX_DBG,
2890 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2891 put_off, get_off);
2892 netif_stop_queue(dev);
2893 }
2894
2895 dev->trans_start = jiffies;
2896 spin_unlock_irqrestore(&sp->tx_lock, flags);
2897
2898 return 0;
2899}
2900
2901/**
2902 * s2io_isr - ISR handler of the device .
2903 * @irq: the irq of the device.
2904 * @dev_id: a void pointer to the dev structure of the NIC.
2905 * @pt_regs: pointer to the registers pushed on the stack.
20346722
K
2906 * Description: This function is the ISR handler of the device. It
2907 * identifies the reason for the interrupt and calls the relevant
2908 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
2909 * recv buffers, if their numbers are below the panic value which is
2910 * presently set to 25% of the original number of rcv buffers allocated.
2911 * Return value:
20346722 2912 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
2913 * IRQ_NONE: will be returned if interrupt is not from our device
2914 */
2915static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2916{
2917 struct net_device *dev = (struct net_device *) dev_id;
2918 nic_t *sp = dev->priv;
2919 XENA_dev_config_t __iomem *bar0 = sp->bar0;
20346722 2920 int i;
fe113638 2921 u64 reason = 0, val64;
1da177e4
LT
2922 mac_info_t *mac_control;
2923 struct config_param *config;
2924
7ba013ac 2925 atomic_inc(&sp->isr_cnt);
1da177e4
LT
2926 mac_control = &sp->mac_control;
2927 config = &sp->config;
2928
20346722 2929 /*
1da177e4
LT
2930 * Identify the cause for interrupt and call the appropriate
2931 * interrupt handler. Causes for the interrupt could be;
2932 * 1. Rx of packet.
2933 * 2. Tx complete.
2934 * 3. Link down.
20346722 2935 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
2936 */
2937 reason = readq(&bar0->general_int_status);
2938
2939 if (!reason) {
2940 /* The interrupt was not raised by Xena. */
7ba013ac 2941 atomic_dec(&sp->isr_cnt);
1da177e4
LT
2942 return IRQ_NONE;
2943 }
2944
1da177e4
LT
2945 if (reason & (GEN_ERROR_INTR))
2946 alarm_intr_handler(sp);
2947
2948#ifdef CONFIG_S2IO_NAPI
2949 if (reason & GEN_INTR_RXTRAFFIC) {
2950 if (netif_rx_schedule_prep(dev)) {
2951 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2952 DISABLE_INTRS);
2953 __netif_rx_schedule(dev);
2954 }
2955 }
2956#else
2957 /* If Intr is because of Rx Traffic */
2958 if (reason & GEN_INTR_RXTRAFFIC) {
fe113638
K
2959 /*
2960 * rx_traffic_int reg is an R1 register, writing all 1's
2961 * will ensure that the actual interrupt causing bit get's
2962 * cleared and hence a read can be avoided.
2963 */
2964 val64 = 0xFFFFFFFFFFFFFFFFULL;
2965 writeq(val64, &bar0->rx_traffic_int);
20346722
K
2966 for (i = 0; i < config->rx_ring_num; i++) {
2967 rx_intr_handler(&mac_control->rings[i]);
2968 }
1da177e4
LT
2969 }
2970#endif
2971
20346722
K
2972 /* If Intr is because of Tx Traffic */
2973 if (reason & GEN_INTR_TXTRAFFIC) {
fe113638
K
2974 /*
2975 * tx_traffic_int reg is an R1 register, writing all 1's
2976 * will ensure that the actual interrupt causing bit get's
2977 * cleared and hence a read can be avoided.
2978 */
2979 val64 = 0xFFFFFFFFFFFFFFFFULL;
2980 writeq(val64, &bar0->tx_traffic_int);
2981
20346722
K
2982 for (i = 0; i < config->tx_fifo_num; i++)
2983 tx_intr_handler(&mac_control->fifos[i]);
2984 }
2985
2986 /*
2987 * If the Rx buffer count is below the panic threshold then
2988 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
2989 * else schedule a tasklet to reallocate the buffers.
2990 */
2991#ifndef CONFIG_S2IO_NAPI
2992 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2993 int ret;
1da177e4
LT
2994 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2995 int level = rx_buffer_level(sp, rxb_size, i);
2996
2997 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2998 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2999 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3000 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3001 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3002 dev->name);
3003 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3004 clear_bit(0, (&sp->tasklet_status));
7ba013ac 3005 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3006 return IRQ_HANDLED;
3007 }
3008 clear_bit(0, (&sp->tasklet_status));
3009 } else if (level == LOW) {
3010 tasklet_schedule(&sp->task);
3011 }
3012 }
3013#endif
3014
7ba013ac 3015 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3016 return IRQ_HANDLED;
3017}
3018
7ba013ac
K
3019/**
3020 * s2io_updt_stats -
3021 */
3022static void s2io_updt_stats(nic_t *sp)
3023{
3024 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3025 u64 val64;
3026 int cnt = 0;
3027
3028 if (atomic_read(&sp->card_state) == CARD_UP) {
3029 /* Apprx 30us on a 133 MHz bus */
3030 val64 = SET_UPDT_CLICKS(10) |
3031 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3032 writeq(val64, &bar0->stat_cfg);
3033 do {
3034 udelay(100);
3035 val64 = readq(&bar0->stat_cfg);
3036 if (!(val64 & BIT(0)))
3037 break;
3038 cnt++;
3039 if (cnt == 5)
3040 break; /* Updt failed */
3041 } while(1);
3042 }
3043}
3044
1da177e4 3045/**
20346722 3046 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
3047 * @dev : pointer to the device structure.
3048 * Description:
20346722 3049 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
3050 * structure and returns a pointer to the same.
3051 * Return value:
3052 * pointer to the updated net_device_stats structure.
3053 */
3054
20346722 3055struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4
LT
3056{
3057 nic_t *sp = dev->priv;
3058 mac_info_t *mac_control;
3059 struct config_param *config;
3060
20346722 3061
1da177e4
LT
3062 mac_control = &sp->mac_control;
3063 config = &sp->config;
3064
7ba013ac
K
3065 /* Configure Stats for immediate updt */
3066 s2io_updt_stats(sp);
3067
3068 sp->stats.tx_packets =
3069 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
3070 sp->stats.tx_errors =
3071 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3072 sp->stats.rx_errors =
3073 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3074 sp->stats.multicast =
3075 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 3076 sp->stats.rx_length_errors =
20346722 3077 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
3078
3079 return (&sp->stats);
3080}
3081
3082/**
3083 * s2io_set_multicast - entry point for multicast address enable/disable.
3084 * @dev : pointer to the device structure
3085 * Description:
20346722
K
3086 * This function is a driver entry point which gets called by the kernel
3087 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
3088 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3089 * determine, if multicast address must be enabled or if promiscuous mode
3090 * is to be disabled etc.
3091 * Return value:
3092 * void.
3093 */
3094
3095static void s2io_set_multicast(struct net_device *dev)
3096{
3097 int i, j, prev_cnt;
3098 struct dev_mc_list *mclist;
3099 nic_t *sp = dev->priv;
3100 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3101 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3102 0xfeffffffffffULL;
3103 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3104 void __iomem *add;
3105
3106 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3107 /* Enable all Multicast addresses */
3108 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3109 &bar0->rmac_addr_data0_mem);
3110 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3111 &bar0->rmac_addr_data1_mem);
3112 val64 = RMAC_ADDR_CMD_MEM_WE |
3113 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3114 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3115 writeq(val64, &bar0->rmac_addr_cmd_mem);
3116 /* Wait till command completes */
3117 wait_for_cmd_complete(sp);
3118
3119 sp->m_cast_flg = 1;
3120 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3121 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3122 /* Disable all Multicast addresses */
3123 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3124 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
3125 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3126 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3127 val64 = RMAC_ADDR_CMD_MEM_WE |
3128 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3129 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3130 writeq(val64, &bar0->rmac_addr_cmd_mem);
3131 /* Wait till command completes */
3132 wait_for_cmd_complete(sp);
3133
3134 sp->m_cast_flg = 0;
3135 sp->all_multi_pos = 0;
3136 }
3137
3138 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3139 /* Put the NIC into promiscuous mode */
3140 add = &bar0->mac_cfg;
3141 val64 = readq(&bar0->mac_cfg);
3142 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3143
3144 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3145 writel((u32) val64, add);
3146 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3147 writel((u32) (val64 >> 32), (add + 4));
3148
3149 val64 = readq(&bar0->mac_cfg);
3150 sp->promisc_flg = 1;
3151 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3152 dev->name);
3153 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3154 /* Remove the NIC from promiscuous mode */
3155 add = &bar0->mac_cfg;
3156 val64 = readq(&bar0->mac_cfg);
3157 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3158
3159 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3160 writel((u32) val64, add);
3161 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3162 writel((u32) (val64 >> 32), (add + 4));
3163
3164 val64 = readq(&bar0->mac_cfg);
3165 sp->promisc_flg = 0;
3166 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3167 dev->name);
3168 }
3169
3170 /* Update individual M_CAST address list */
3171 if ((!sp->m_cast_flg) && dev->mc_count) {
3172 if (dev->mc_count >
3173 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3174 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3175 dev->name);
3176 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3177 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3178 return;
3179 }
3180
3181 prev_cnt = sp->mc_addr_count;
3182 sp->mc_addr_count = dev->mc_count;
3183
3184 /* Clear out the previous list of Mc in the H/W. */
3185 for (i = 0; i < prev_cnt; i++) {
3186 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3187 &bar0->rmac_addr_data0_mem);
3188 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3189 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3190 val64 = RMAC_ADDR_CMD_MEM_WE |
3191 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3192 RMAC_ADDR_CMD_MEM_OFFSET
3193 (MAC_MC_ADDR_START_OFFSET + i);
3194 writeq(val64, &bar0->rmac_addr_cmd_mem);
3195
3196 /* Wait for command completes */
3197 if (wait_for_cmd_complete(sp)) {
3198 DBG_PRINT(ERR_DBG, "%s: Adding ",
3199 dev->name);
3200 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3201 return;
3202 }
3203 }
3204
3205 /* Create the new Rx filter list and update the same in H/W. */
3206 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3207 i++, mclist = mclist->next) {
3208 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3209 ETH_ALEN);
3210 for (j = 0; j < ETH_ALEN; j++) {
3211 mac_addr |= mclist->dmi_addr[j];
3212 mac_addr <<= 8;
3213 }
3214 mac_addr >>= 8;
3215 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3216 &bar0->rmac_addr_data0_mem);
3217 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3218 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3219 val64 = RMAC_ADDR_CMD_MEM_WE |
3220 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3221 RMAC_ADDR_CMD_MEM_OFFSET
3222 (i + MAC_MC_ADDR_START_OFFSET);
3223 writeq(val64, &bar0->rmac_addr_cmd_mem);
3224
3225 /* Wait for command completes */
3226 if (wait_for_cmd_complete(sp)) {
3227 DBG_PRINT(ERR_DBG, "%s: Adding ",
3228 dev->name);
3229 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3230 return;
3231 }
3232 }
3233 }
3234}
3235
3236/**
20346722 3237 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
3238 * @dev : pointer to the device structure.
3239 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 3240 * Description : This procedure will program the Xframe to receive
1da177e4 3241 * frames with new Mac Address
20346722 3242 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
3243 * as defined in errno.h file on failure.
3244 */
3245
3246int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3247{
3248 nic_t *sp = dev->priv;
3249 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3250 register u64 val64, mac_addr = 0;
3251 int i;
3252
20346722 3253 /*
1da177e4
LT
3254 * Set the new MAC address as the new unicast filter and reflect this
3255 * change on the device address registered with the OS. It will be
20346722 3256 * at offset 0.
1da177e4
LT
3257 */
3258 for (i = 0; i < ETH_ALEN; i++) {
3259 mac_addr <<= 8;
3260 mac_addr |= addr[i];
3261 }
3262
3263 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3264 &bar0->rmac_addr_data0_mem);
3265
3266 val64 =
3267 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3268 RMAC_ADDR_CMD_MEM_OFFSET(0);
3269 writeq(val64, &bar0->rmac_addr_cmd_mem);
3270 /* Wait till command completes */
3271 if (wait_for_cmd_complete(sp)) {
3272 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3273 return FAILURE;
3274 }
3275
3276 return SUCCESS;
3277}
3278
3279/**
20346722 3280 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
3281 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3282 * @info: pointer to the structure with parameters given by ethtool to set
3283 * link information.
3284 * Description:
20346722 3285 * The function sets different link parameters provided by the user onto
1da177e4
LT
3286 * the NIC.
3287 * Return value:
3288 * 0 on success.
3289*/
3290
3291static int s2io_ethtool_sset(struct net_device *dev,
3292 struct ethtool_cmd *info)
3293{
3294 nic_t *sp = dev->priv;
3295 if ((info->autoneg == AUTONEG_ENABLE) ||
3296 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3297 return -EINVAL;
3298 else {
3299 s2io_close(sp->dev);
3300 s2io_open(sp->dev);
3301 }
3302
3303 return 0;
3304}
3305
3306/**
20346722 3307 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
3308 * @sp : private member of the device structure, pointer to the
3309 * s2io_nic structure.
3310 * @info : pointer to the structure with parameters given by ethtool
3311 * to return link information.
3312 * Description:
3313 * Returns link specific information like speed, duplex etc.. to ethtool.
3314 * Return value :
3315 * return 0 on success.
3316 */
3317
3318static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3319{
3320 nic_t *sp = dev->priv;
3321 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3322 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3323 info->port = PORT_FIBRE;
3324 /* info->transceiver?? TODO */
3325
3326 if (netif_carrier_ok(sp->dev)) {
3327 info->speed = 10000;
3328 info->duplex = DUPLEX_FULL;
3329 } else {
3330 info->speed = -1;
3331 info->duplex = -1;
3332 }
3333
3334 info->autoneg = AUTONEG_DISABLE;
3335 return 0;
3336}
3337
3338/**
20346722
K
3339 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3340 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3341 * s2io_nic structure.
3342 * @info : pointer to the structure with parameters given by ethtool to
3343 * return driver information.
3344 * Description:
3345 * Returns driver specefic information like name, version etc.. to ethtool.
3346 * Return value:
3347 * void
3348 */
3349
3350static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3351 struct ethtool_drvinfo *info)
3352{
3353 nic_t *sp = dev->priv;
3354
3355 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3356 strncpy(info->version, s2io_driver_version,
3357 sizeof(s2io_driver_version));
3358 strncpy(info->fw_version, "", 32);
3359 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3360 info->regdump_len = XENA_REG_SPACE;
3361 info->eedump_len = XENA_EEPROM_SPACE;
3362 info->testinfo_len = S2IO_TEST_LEN;
3363 info->n_stats = S2IO_STAT_LEN;
3364}
3365
3366/**
3367 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 3368 * @sp: private member of the device structure, which is a pointer to the
1da177e4 3369 * s2io_nic structure.
20346722 3370 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
3371 * dumping the registers.
3372 * @reg_space: The input argumnet into which all the registers are dumped.
3373 * Description:
3374 * Dumps the entire register space of xFrame NIC into the user given
3375 * buffer area.
3376 * Return value :
3377 * void .
3378*/
3379
3380static void s2io_ethtool_gregs(struct net_device *dev,
3381 struct ethtool_regs *regs, void *space)
3382{
3383 int i;
3384 u64 reg;
3385 u8 *reg_space = (u8 *) space;
3386 nic_t *sp = dev->priv;
3387
3388 regs->len = XENA_REG_SPACE;
3389 regs->version = sp->pdev->subsystem_device;
3390
3391 for (i = 0; i < regs->len; i += 8) {
3392 reg = readq(sp->bar0 + i);
3393 memcpy((reg_space + i), &reg, 8);
3394 }
3395}
3396
3397/**
3398 * s2io_phy_id - timer function that alternates adapter LED.
20346722 3399 * @data : address of the private member of the device structure, which
1da177e4 3400 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
3401 * Description: This is actually the timer function that alternates the
3402 * adapter LED bit of the adapter control bit to set/reset every time on
3403 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
3404 * once every second.
3405*/
3406static void s2io_phy_id(unsigned long data)
3407{
3408 nic_t *sp = (nic_t *) data;
3409 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3410 u64 val64 = 0;
3411 u16 subid;
3412
3413 subid = sp->pdev->subsystem_device;
3414 if ((subid & 0xFF) >= 0x07) {
3415 val64 = readq(&bar0->gpio_control);
3416 val64 ^= GPIO_CTRL_GPIO_0;
3417 writeq(val64, &bar0->gpio_control);
3418 } else {
3419 val64 = readq(&bar0->adapter_control);
3420 val64 ^= ADAPTER_LED_ON;
3421 writeq(val64, &bar0->adapter_control);
3422 }
3423
3424 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3425}
3426
3427/**
3428 * s2io_ethtool_idnic - To physically identify the nic on the system.
3429 * @sp : private member of the device structure, which is a pointer to the
3430 * s2io_nic structure.
20346722 3431 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
3432 * ethtool.
3433 * Description: Used to physically identify the NIC on the system.
20346722 3434 * The Link LED will blink for a time specified by the user for
1da177e4 3435 * identification.
20346722 3436 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
3437 * identification is possible only if it's link is up.
3438 * Return value:
3439 * int , returns 0 on success
3440 */
3441
3442static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3443{
3444 u64 val64 = 0, last_gpio_ctrl_val;
3445 nic_t *sp = dev->priv;
3446 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3447 u16 subid;
3448
3449 subid = sp->pdev->subsystem_device;
3450 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3451 if ((subid & 0xFF) < 0x07) {
3452 val64 = readq(&bar0->adapter_control);
3453 if (!(val64 & ADAPTER_CNTL_EN)) {
3454 printk(KERN_ERR
3455 "Adapter Link down, cannot blink LED\n");
3456 return -EFAULT;
3457 }
3458 }
3459 if (sp->id_timer.function == NULL) {
3460 init_timer(&sp->id_timer);
3461 sp->id_timer.function = s2io_phy_id;
3462 sp->id_timer.data = (unsigned long) sp;
3463 }
3464 mod_timer(&sp->id_timer, jiffies);
3465 if (data)
20346722 3466 msleep_interruptible(data * HZ);
1da177e4 3467 else
20346722 3468 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
3469 del_timer_sync(&sp->id_timer);
3470
3471 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3472 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3473 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3474 }
3475
3476 return 0;
3477}
3478
3479/**
3480 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
3481 * @sp : private member of the device structure, which is a pointer to the
3482 * s2io_nic structure.
1da177e4
LT
3483 * @ep : pointer to the structure with pause parameters given by ethtool.
3484 * Description:
3485 * Returns the Pause frame generation and reception capability of the NIC.
3486 * Return value:
3487 * void
3488 */
3489static void s2io_ethtool_getpause_data(struct net_device *dev,
3490 struct ethtool_pauseparam *ep)
3491{
3492 u64 val64;
3493 nic_t *sp = dev->priv;
3494 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3495
3496 val64 = readq(&bar0->rmac_pause_cfg);
3497 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3498 ep->tx_pause = TRUE;
3499 if (val64 & RMAC_PAUSE_RX_ENABLE)
3500 ep->rx_pause = TRUE;
3501 ep->autoneg = FALSE;
3502}
3503
3504/**
3505 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 3506 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3507 * s2io_nic structure.
3508 * @ep : pointer to the structure with pause parameters given by ethtool.
3509 * Description:
3510 * It can be used to set or reset Pause frame generation or reception
3511 * support of the NIC.
3512 * Return value:
3513 * int, returns 0 on Success
3514 */
3515
3516static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 3517 struct ethtool_pauseparam *ep)
1da177e4
LT
3518{
3519 u64 val64;
3520 nic_t *sp = dev->priv;
3521 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3522
3523 val64 = readq(&bar0->rmac_pause_cfg);
3524 if (ep->tx_pause)
3525 val64 |= RMAC_PAUSE_GEN_ENABLE;
3526 else
3527 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3528 if (ep->rx_pause)
3529 val64 |= RMAC_PAUSE_RX_ENABLE;
3530 else
3531 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3532 writeq(val64, &bar0->rmac_pause_cfg);
3533 return 0;
3534}
3535
3536/**
3537 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 3538 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3539 * s2io_nic structure.
3540 * @off : offset at which the data must be written
3541 * @data : Its an output parameter where the data read at the given
20346722 3542 * offset is stored.
1da177e4 3543 * Description:
20346722 3544 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
3545 * read data.
3546 * NOTE: Will allow to read only part of the EEPROM visible through the
3547 * I2C bus.
3548 * Return value:
3549 * -1 on failure and 0 on success.
3550 */
3551
3552#define S2IO_DEV_ID 5
3553static int read_eeprom(nic_t * sp, int off, u32 * data)
3554{
3555 int ret = -1;
3556 u32 exit_cnt = 0;
3557 u64 val64;
3558 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3559
3560 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3561 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3562 I2C_CONTROL_CNTL_START;
3563 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3564
3565 while (exit_cnt < 5) {
3566 val64 = readq(&bar0->i2c_control);
3567 if (I2C_CONTROL_CNTL_END(val64)) {
3568 *data = I2C_CONTROL_GET_DATA(val64);
3569 ret = 0;
3570 break;
3571 }
3572 msleep(50);
3573 exit_cnt++;
3574 }
3575
3576 return ret;
3577}
3578
3579/**
3580 * write_eeprom - actually writes the relevant part of the data value.
3581 * @sp : private member of the device structure, which is a pointer to the
3582 * s2io_nic structure.
3583 * @off : offset at which the data must be written
3584 * @data : The data that is to be written
20346722 3585 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
3586 * the Eeprom. (max of 3)
3587 * Description:
3588 * Actually writes the relevant part of the data value into the Eeprom
3589 * through the I2C bus.
3590 * Return value:
3591 * 0 on success, -1 on failure.
3592 */
3593
3594static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3595{
3596 int exit_cnt = 0, ret = -1;
3597 u64 val64;
3598 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3599
3600 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3601 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3602 I2C_CONTROL_CNTL_START;
3603 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3604
3605 while (exit_cnt < 5) {
3606 val64 = readq(&bar0->i2c_control);
3607 if (I2C_CONTROL_CNTL_END(val64)) {
3608 if (!(val64 & I2C_CONTROL_NACK))
3609 ret = 0;
3610 break;
3611 }
3612 msleep(50);
3613 exit_cnt++;
3614 }
3615
3616 return ret;
3617}
3618
3619/**
3620 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3621 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 3622 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
3623 * containing all relevant information.
3624 * @data_buf : user defined value to be written into Eeprom.
3625 * Description: Reads the values stored in the Eeprom at given offset
3626 * for a given length. Stores these values int the input argument data
3627 * buffer 'data_buf' and returns these to the caller (ethtool.)
3628 * Return value:
3629 * int 0 on success
3630 */
3631
3632static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 3633 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4
LT
3634{
3635 u32 data, i, valid;
3636 nic_t *sp = dev->priv;
3637
3638 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3639
3640 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3641 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3642
3643 for (i = 0; i < eeprom->len; i += 4) {
3644 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3645 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3646 return -EFAULT;
3647 }
3648 valid = INV(data);
3649 memcpy((data_buf + i), &valid, 4);
3650 }
3651 return 0;
3652}
3653
3654/**
3655 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3656 * @sp : private member of the device structure, which is a pointer to the
3657 * s2io_nic structure.
20346722 3658 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
3659 * containing all relevant information.
3660 * @data_buf ; user defined value to be written into Eeprom.
3661 * Description:
3662 * Tries to write the user provided value in the Eeprom, at the offset
3663 * given by the user.
3664 * Return value:
3665 * 0 on success, -EFAULT on failure.
3666 */
3667
3668static int s2io_ethtool_seeprom(struct net_device *dev,
3669 struct ethtool_eeprom *eeprom,
3670 u8 * data_buf)
3671{
3672 int len = eeprom->len, cnt = 0;
3673 u32 valid = 0, data;
3674 nic_t *sp = dev->priv;
3675
3676 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3677 DBG_PRINT(ERR_DBG,
3678 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3679 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3680 eeprom->magic);
3681 return -EFAULT;
3682 }
3683
3684 while (len) {
3685 data = (u32) data_buf[cnt] & 0x000000FF;
3686 if (data) {
3687 valid = (u32) (data << 24);
3688 } else
3689 valid = data;
3690
3691 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3692 DBG_PRINT(ERR_DBG,
3693 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3694 DBG_PRINT(ERR_DBG,
3695 "write into the specified offset\n");
3696 return -EFAULT;
3697 }
3698 cnt++;
3699 len--;
3700 }
3701
3702 return 0;
3703}
3704
3705/**
20346722
K
3706 * s2io_register_test - reads and writes into all clock domains.
3707 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3708 * s2io_nic structure.
3709 * @data : variable that returns the result of each of the test conducted b
3710 * by the driver.
3711 * Description:
3712 * Read and write into all clock domains. The NIC has 3 clock domains,
3713 * see that registers in all the three regions are accessible.
3714 * Return value:
3715 * 0 on success.
3716 */
3717
3718static int s2io_register_test(nic_t * sp, uint64_t * data)
3719{
3720 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3721 u64 val64 = 0;
3722 int fail = 0;
3723
20346722
K
3724 val64 = readq(&bar0->pif_rd_swapper_fb);
3725 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
3726 fail = 1;
3727 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3728 }
3729
3730 val64 = readq(&bar0->rmac_pause_cfg);
3731 if (val64 != 0xc000ffff00000000ULL) {
3732 fail = 1;
3733 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3734 }
3735
3736 val64 = readq(&bar0->rx_queue_cfg);
3737 if (val64 != 0x0808080808080808ULL) {
3738 fail = 1;
3739 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3740 }
3741
3742 val64 = readq(&bar0->xgxs_efifo_cfg);
3743 if (val64 != 0x000000001923141EULL) {
3744 fail = 1;
3745 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3746 }
3747
3748 val64 = 0x5A5A5A5A5A5A5A5AULL;
3749 writeq(val64, &bar0->xmsi_data);
3750 val64 = readq(&bar0->xmsi_data);
3751 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3752 fail = 1;
3753 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3754 }
3755
3756 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3757 writeq(val64, &bar0->xmsi_data);
3758 val64 = readq(&bar0->xmsi_data);
3759 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3760 fail = 1;
3761 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3762 }
3763
3764 *data = fail;
3765 return 0;
3766}
3767
3768/**
20346722 3769 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
3770 * @sp : private member of the device structure, which is a pointer to the
3771 * s2io_nic structure.
3772 * @data:variable that returns the result of each of the test conducted by
3773 * the driver.
3774 * Description:
20346722 3775 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
3776 * register.
3777 * Return value:
3778 * 0 on success.
3779 */
3780
3781static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3782{
3783 int fail = 0;
3784 u32 ret_data;
3785
3786 /* Test Write Error at offset 0 */
3787 if (!write_eeprom(sp, 0, 0, 3))
3788 fail = 1;
3789
3790 /* Test Write at offset 4f0 */
3791 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3792 fail = 1;
3793 if (read_eeprom(sp, 0x4F0, &ret_data))
3794 fail = 1;
3795
3796 if (ret_data != 0x01234567)
3797 fail = 1;
3798
3799 /* Reset the EEPROM data go FFFF */
3800 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3801
3802 /* Test Write Request Error at offset 0x7c */
3803 if (!write_eeprom(sp, 0x07C, 0, 3))
3804 fail = 1;
3805
3806 /* Test Write Request at offset 0x7fc */
3807 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3808 fail = 1;
3809 if (read_eeprom(sp, 0x7FC, &ret_data))
3810 fail = 1;
3811
3812 if (ret_data != 0x01234567)
3813 fail = 1;
3814
3815 /* Reset the EEPROM data go FFFF */
3816 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3817
3818 /* Test Write Error at offset 0x80 */
3819 if (!write_eeprom(sp, 0x080, 0, 3))
3820 fail = 1;
3821
3822 /* Test Write Error at offset 0xfc */
3823 if (!write_eeprom(sp, 0x0FC, 0, 3))
3824 fail = 1;
3825
3826 /* Test Write Error at offset 0x100 */
3827 if (!write_eeprom(sp, 0x100, 0, 3))
3828 fail = 1;
3829
3830 /* Test Write Error at offset 4ec */
3831 if (!write_eeprom(sp, 0x4EC, 0, 3))
3832 fail = 1;
3833
3834 *data = fail;
3835 return 0;
3836}
3837
3838/**
3839 * s2io_bist_test - invokes the MemBist test of the card .
20346722 3840 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3841 * s2io_nic structure.
20346722 3842 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
3843 * the driver.
3844 * Description:
3845 * This invokes the MemBist test of the card. We give around
3846 * 2 secs time for the Test to complete. If it's still not complete
20346722 3847 * within this peiod, we consider that the test failed.
1da177e4
LT
3848 * Return value:
3849 * 0 on success and -1 on failure.
3850 */
3851
3852static int s2io_bist_test(nic_t * sp, uint64_t * data)
3853{
3854 u8 bist = 0;
3855 int cnt = 0, ret = -1;
3856
3857 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3858 bist |= PCI_BIST_START;
3859 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3860
3861 while (cnt < 20) {
3862 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3863 if (!(bist & PCI_BIST_START)) {
3864 *data = (bist & PCI_BIST_CODE_MASK);
3865 ret = 0;
3866 break;
3867 }
3868 msleep(100);
3869 cnt++;
3870 }
3871
3872 return ret;
3873}
3874
3875/**
20346722
K
3876 * s2io-link_test - verifies the link state of the nic
3877 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
3878 * s2io_nic structure.
3879 * @data: variable that returns the result of each of the test conducted by
3880 * the driver.
3881 * Description:
20346722 3882 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
3883 * argument 'data' appropriately.
3884 * Return value:
3885 * 0 on success.
3886 */
3887
3888static int s2io_link_test(nic_t * sp, uint64_t * data)
3889{
3890 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3891 u64 val64;
3892
3893 val64 = readq(&bar0->adapter_status);
3894 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3895 *data = 1;
3896
3897 return 0;
3898}
3899
3900/**
20346722
K
3901 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3902 * @sp - private member of the device structure, which is a pointer to the
1da177e4 3903 * s2io_nic structure.
20346722 3904 * @data - variable that returns the result of each of the test
1da177e4
LT
3905 * conducted by the driver.
3906 * Description:
20346722 3907 * This is one of the offline test that tests the read and write
1da177e4
LT
3908 * access to the RldRam chip on the NIC.
3909 * Return value:
3910 * 0 on success.
3911 */
3912
3913static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3914{
3915 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3916 u64 val64;
3917 int cnt, iteration = 0, test_pass = 0;
3918
3919 val64 = readq(&bar0->adapter_control);
3920 val64 &= ~ADAPTER_ECC_EN;
3921 writeq(val64, &bar0->adapter_control);
3922
3923 val64 = readq(&bar0->mc_rldram_test_ctrl);
3924 val64 |= MC_RLDRAM_TEST_MODE;
3925 writeq(val64, &bar0->mc_rldram_test_ctrl);
3926
3927 val64 = readq(&bar0->mc_rldram_mrs);
3928 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3929 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3930
3931 val64 |= MC_RLDRAM_MRS_ENABLE;
3932 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3933
3934 while (iteration < 2) {
3935 val64 = 0x55555555aaaa0000ULL;
3936 if (iteration == 1) {
3937 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3938 }
3939 writeq(val64, &bar0->mc_rldram_test_d0);
3940
3941 val64 = 0xaaaa5a5555550000ULL;
3942 if (iteration == 1) {
3943 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3944 }
3945 writeq(val64, &bar0->mc_rldram_test_d1);
3946
3947 val64 = 0x55aaaaaaaa5a0000ULL;
3948 if (iteration == 1) {
3949 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3950 }
3951 writeq(val64, &bar0->mc_rldram_test_d2);
3952
3953 val64 = (u64) (0x0000003fffff0000ULL);
3954 writeq(val64, &bar0->mc_rldram_test_add);
3955
3956
3957 val64 = MC_RLDRAM_TEST_MODE;
3958 writeq(val64, &bar0->mc_rldram_test_ctrl);
3959
3960 val64 |=
3961 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3962 MC_RLDRAM_TEST_GO;
3963 writeq(val64, &bar0->mc_rldram_test_ctrl);
3964
3965 for (cnt = 0; cnt < 5; cnt++) {
3966 val64 = readq(&bar0->mc_rldram_test_ctrl);
3967 if (val64 & MC_RLDRAM_TEST_DONE)
3968 break;
3969 msleep(200);
3970 }
3971
3972 if (cnt == 5)
3973 break;
3974
3975 val64 = MC_RLDRAM_TEST_MODE;
3976 writeq(val64, &bar0->mc_rldram_test_ctrl);
3977
3978 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3979 writeq(val64, &bar0->mc_rldram_test_ctrl);
3980
3981 for (cnt = 0; cnt < 5; cnt++) {
3982 val64 = readq(&bar0->mc_rldram_test_ctrl);
3983 if (val64 & MC_RLDRAM_TEST_DONE)
3984 break;
3985 msleep(500);
3986 }
3987
3988 if (cnt == 5)
3989 break;
3990
3991 val64 = readq(&bar0->mc_rldram_test_ctrl);
3992 if (val64 & MC_RLDRAM_TEST_PASS)
3993 test_pass = 1;
3994
3995 iteration++;
3996 }
3997
3998 if (!test_pass)
3999 *data = 1;
4000 else
4001 *data = 0;
4002
4003 return 0;
4004}
4005
4006/**
4007 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4008 * @sp : private member of the device structure, which is a pointer to the
4009 * s2io_nic structure.
4010 * @ethtest : pointer to a ethtool command specific structure that will be
4011 * returned to the user.
20346722 4012 * @data : variable that returns the result of each of the test
1da177e4
LT
4013 * conducted by the driver.
4014 * Description:
4015 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4016 * the health of the card.
4017 * Return value:
4018 * void
4019 */
4020
4021static void s2io_ethtool_test(struct net_device *dev,
4022 struct ethtool_test *ethtest,
4023 uint64_t * data)
4024{
4025 nic_t *sp = dev->priv;
4026 int orig_state = netif_running(sp->dev);
4027
4028 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4029 /* Offline Tests. */
20346722 4030 if (orig_state)
1da177e4 4031 s2io_close(sp->dev);
1da177e4
LT
4032
4033 if (s2io_register_test(sp, &data[0]))
4034 ethtest->flags |= ETH_TEST_FL_FAILED;
4035
4036 s2io_reset(sp);
1da177e4
LT
4037
4038 if (s2io_rldram_test(sp, &data[3]))
4039 ethtest->flags |= ETH_TEST_FL_FAILED;
4040
4041 s2io_reset(sp);
1da177e4
LT
4042
4043 if (s2io_eeprom_test(sp, &data[1]))
4044 ethtest->flags |= ETH_TEST_FL_FAILED;
4045
4046 if (s2io_bist_test(sp, &data[4]))
4047 ethtest->flags |= ETH_TEST_FL_FAILED;
4048
4049 if (orig_state)
4050 s2io_open(sp->dev);
4051
4052 data[2] = 0;
4053 } else {
4054 /* Online Tests. */
4055 if (!orig_state) {
4056 DBG_PRINT(ERR_DBG,
4057 "%s: is not up, cannot run test\n",
4058 dev->name);
4059 data[0] = -1;
4060 data[1] = -1;
4061 data[2] = -1;
4062 data[3] = -1;
4063 data[4] = -1;
4064 }
4065
4066 if (s2io_link_test(sp, &data[2]))
4067 ethtest->flags |= ETH_TEST_FL_FAILED;
4068
4069 data[0] = 0;
4070 data[1] = 0;
4071 data[3] = 0;
4072 data[4] = 0;
4073 }
4074}
4075
4076static void s2io_get_ethtool_stats(struct net_device *dev,
4077 struct ethtool_stats *estats,
4078 u64 * tmp_stats)
4079{
4080 int i = 0;
4081 nic_t *sp = dev->priv;
4082 StatInfo_t *stat_info = sp->mac_control.stats_info;
4083
7ba013ac 4084 s2io_updt_stats(sp);
1da177e4
LT
4085 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4086 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4087 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4088 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4089 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4090 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4091 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4092 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4093 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4094 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4095 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4096 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4097 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4098 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4099 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4100 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4101 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4102 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4103 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4104 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4105 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4106 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4107 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4108 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4109 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4110 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4111 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4112 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4113 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4114 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4115 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4116 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4117 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4118 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4119 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4120 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4121 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4122 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4123 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
7ba013ac
K
4124 tmp_stats[i++] = 0;
4125 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4126 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
1da177e4
LT
4127}
4128
20346722 4129int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
4130{
4131 return (XENA_REG_SPACE);
4132}
4133
4134
20346722 4135u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4
LT
4136{
4137 nic_t *sp = dev->priv;
4138
4139 return (sp->rx_csum);
4140}
20346722 4141int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4142{
4143 nic_t *sp = dev->priv;
4144
4145 if (data)
4146 sp->rx_csum = 1;
4147 else
4148 sp->rx_csum = 0;
4149
4150 return 0;
4151}
20346722 4152int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
4153{
4154 return (XENA_EEPROM_SPACE);
4155}
4156
20346722 4157int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
4158{
4159 return (S2IO_TEST_LEN);
4160}
20346722
K
4161void s2io_ethtool_get_strings(struct net_device *dev,
4162 u32 stringset, u8 * data)
1da177e4
LT
4163{
4164 switch (stringset) {
4165 case ETH_SS_TEST:
4166 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4167 break;
4168 case ETH_SS_STATS:
4169 memcpy(data, &ethtool_stats_keys,
4170 sizeof(ethtool_stats_keys));
4171 }
4172}
1da177e4
LT
4173static int s2io_ethtool_get_stats_count(struct net_device *dev)
4174{
4175 return (S2IO_STAT_LEN);
4176}
4177
20346722 4178int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4179{
4180 if (data)
4181 dev->features |= NETIF_F_IP_CSUM;
4182 else
4183 dev->features &= ~NETIF_F_IP_CSUM;
4184
4185 return 0;
4186}
4187
4188
4189static struct ethtool_ops netdev_ethtool_ops = {
4190 .get_settings = s2io_ethtool_gset,
4191 .set_settings = s2io_ethtool_sset,
4192 .get_drvinfo = s2io_ethtool_gdrvinfo,
4193 .get_regs_len = s2io_ethtool_get_regs_len,
4194 .get_regs = s2io_ethtool_gregs,
4195 .get_link = ethtool_op_get_link,
4196 .get_eeprom_len = s2io_get_eeprom_len,
4197 .get_eeprom = s2io_ethtool_geeprom,
4198 .set_eeprom = s2io_ethtool_seeprom,
4199 .get_pauseparam = s2io_ethtool_getpause_data,
4200 .set_pauseparam = s2io_ethtool_setpause_data,
4201 .get_rx_csum = s2io_ethtool_get_rx_csum,
4202 .set_rx_csum = s2io_ethtool_set_rx_csum,
4203 .get_tx_csum = ethtool_op_get_tx_csum,
4204 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4205 .get_sg = ethtool_op_get_sg,
4206 .set_sg = ethtool_op_set_sg,
4207#ifdef NETIF_F_TSO
4208 .get_tso = ethtool_op_get_tso,
4209 .set_tso = ethtool_op_set_tso,
4210#endif
4211 .self_test_count = s2io_ethtool_self_test_count,
4212 .self_test = s2io_ethtool_test,
4213 .get_strings = s2io_ethtool_get_strings,
4214 .phys_id = s2io_ethtool_idnic,
4215 .get_stats_count = s2io_ethtool_get_stats_count,
4216 .get_ethtool_stats = s2io_get_ethtool_stats
4217};
4218
4219/**
20346722 4220 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
4221 * @dev : Device pointer.
4222 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4223 * a proprietary structure used to pass information to the driver.
4224 * @cmd : This is used to distinguish between the different commands that
4225 * can be passed to the IOCTL functions.
4226 * Description:
20346722
K
4227 * Currently there are no special functionality supported in IOCTL, hence
4228 * function always return EOPNOTSUPPORTED
1da177e4
LT
4229 */
4230
20346722 4231int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
4232{
4233 return -EOPNOTSUPP;
4234}
4235
4236/**
4237 * s2io_change_mtu - entry point to change MTU size for the device.
4238 * @dev : device pointer.
4239 * @new_mtu : the new MTU size for the device.
4240 * Description: A driver entry point to change MTU size for the device.
4241 * Before changing the MTU the device must be stopped.
4242 * Return value:
4243 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4244 * file on failure.
4245 */
4246
20346722 4247int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4
LT
4248{
4249 nic_t *sp = dev->priv;
1da177e4
LT
4250
4251 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4252 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4253 dev->name);
4254 return -EPERM;
4255 }
4256
1da177e4 4257 dev->mtu = new_mtu;
d8892c6e
K
4258 if (netif_running(dev)) {
4259 s2io_card_down(sp);
4260 netif_stop_queue(dev);
4261 if (s2io_card_up(sp)) {
4262 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4263 __FUNCTION__);
4264 }
4265 if (netif_queue_stopped(dev))
4266 netif_wake_queue(dev);
4267 } else { /* Device is down */
4268 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4269 u64 val64 = new_mtu;
4270
4271 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4272 }
1da177e4
LT
4273
4274 return 0;
4275}
4276
4277/**
4278 * s2io_tasklet - Bottom half of the ISR.
4279 * @dev_adr : address of the device structure in dma_addr_t format.
4280 * Description:
4281 * This is the tasklet or the bottom half of the ISR. This is
20346722 4282 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 4283 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 4284 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
4285 * replenish the Rx buffers in the Rx buffer descriptors.
4286 * Return value:
4287 * void.
4288 */
4289
4290static void s2io_tasklet(unsigned long dev_addr)
4291{
4292 struct net_device *dev = (struct net_device *) dev_addr;
4293 nic_t *sp = dev->priv;
4294 int i, ret;
4295 mac_info_t *mac_control;
4296 struct config_param *config;
4297
4298 mac_control = &sp->mac_control;
4299 config = &sp->config;
4300
4301 if (!TASKLET_IN_USE) {
4302 for (i = 0; i < config->rx_ring_num; i++) {
4303 ret = fill_rx_buffers(sp, i);
4304 if (ret == -ENOMEM) {
4305 DBG_PRINT(ERR_DBG, "%s: Out of ",
4306 dev->name);
4307 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4308 break;
4309 } else if (ret == -EFILL) {
4310 DBG_PRINT(ERR_DBG,
4311 "%s: Rx Ring %d is full\n",
4312 dev->name, i);
4313 break;
4314 }
4315 }
4316 clear_bit(0, (&sp->tasklet_status));
4317 }
4318}
4319
4320/**
4321 * s2io_set_link - Set the LInk status
4322 * @data: long pointer to device private structue
4323 * Description: Sets the link status for the adapter
4324 */
4325
4326static void s2io_set_link(unsigned long data)
4327{
4328 nic_t *nic = (nic_t *) data;
4329 struct net_device *dev = nic->dev;
4330 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4331 register u64 val64;
4332 u16 subid;
4333
4334 if (test_and_set_bit(0, &(nic->link_state))) {
4335 /* The card is being reset, no point doing anything */
4336 return;
4337 }
4338
4339 subid = nic->pdev->subsystem_device;
20346722
K
4340 /*
4341 * Allow a small delay for the NICs self initiated
1da177e4
LT
4342 * cleanup to complete.
4343 */
4344 msleep(100);
4345
4346 val64 = readq(&bar0->adapter_status);
20346722 4347 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
4348 if (LINK_IS_UP(val64)) {
4349 val64 = readq(&bar0->adapter_control);
4350 val64 |= ADAPTER_CNTL_EN;
4351 writeq(val64, &bar0->adapter_control);
4352 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4353 val64 = readq(&bar0->gpio_control);
4354 val64 |= GPIO_CTRL_GPIO_0;
4355 writeq(val64, &bar0->gpio_control);
4356 val64 = readq(&bar0->gpio_control);
4357 } else {
4358 val64 |= ADAPTER_LED_ON;
4359 writeq(val64, &bar0->adapter_control);
4360 }
4361 val64 = readq(&bar0->adapter_status);
4362 if (!LINK_IS_UP(val64)) {
4363 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4364 DBG_PRINT(ERR_DBG, " Link down");
4365 DBG_PRINT(ERR_DBG, "after ");
4366 DBG_PRINT(ERR_DBG, "enabling ");
4367 DBG_PRINT(ERR_DBG, "device \n");
4368 }
4369 if (nic->device_enabled_once == FALSE) {
4370 nic->device_enabled_once = TRUE;
4371 }
4372 s2io_link(nic, LINK_UP);
4373 } else {
4374 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4375 val64 = readq(&bar0->gpio_control);
4376 val64 &= ~GPIO_CTRL_GPIO_0;
4377 writeq(val64, &bar0->gpio_control);
4378 val64 = readq(&bar0->gpio_control);
4379 }
4380 s2io_link(nic, LINK_DOWN);
4381 }
4382 } else { /* NIC is not Quiescent. */
4383 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4384 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4385 netif_stop_queue(dev);
4386 }
4387 clear_bit(0, &(nic->link_state));
4388}
4389
4390static void s2io_card_down(nic_t * sp)
4391{
4392 int cnt = 0;
4393 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4394 unsigned long flags;
4395 register u64 val64 = 0;
4396
4397 /* If s2io_set_link task is executing, wait till it completes. */
20346722 4398 while (test_and_set_bit(0, &(sp->link_state))) {
1da177e4 4399 msleep(50);
20346722 4400 }
1da177e4
LT
4401 atomic_set(&sp->card_state, CARD_DOWN);
4402
4403 /* disable Tx and Rx traffic on the NIC */
4404 stop_nic(sp);
4405
4406 /* Kill tasklet. */
4407 tasklet_kill(&sp->task);
4408
4409 /* Check if the device is Quiescent and then Reset the NIC */
4410 do {
4411 val64 = readq(&bar0->adapter_status);
20346722 4412 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
1da177e4
LT
4413 break;
4414 }
4415
4416 msleep(50);
4417 cnt++;
4418 if (cnt == 10) {
4419 DBG_PRINT(ERR_DBG,
4420 "s2io_close:Device not Quiescent ");
4421 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4422 (unsigned long long) val64);
4423 break;
4424 }
4425 } while (1);
1da177e4
LT
4426 s2io_reset(sp);
4427
7ba013ac
K
4428 /* Waiting till all Interrupt handlers are complete */
4429 cnt = 0;
4430 do {
4431 msleep(10);
4432 if (!atomic_read(&sp->isr_cnt))
4433 break;
4434 cnt++;
4435 } while(cnt < 5);
4436
4437 spin_lock_irqsave(&sp->tx_lock, flags);
4438 /* Free all Tx buffers */
1da177e4 4439 free_tx_buffers(sp);
7ba013ac
K
4440 spin_unlock_irqrestore(&sp->tx_lock, flags);
4441
4442 /* Free all Rx buffers */
4443 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 4444 free_rx_buffers(sp);
7ba013ac 4445 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 4446
1da177e4
LT
4447 clear_bit(0, &(sp->link_state));
4448}
4449
4450static int s2io_card_up(nic_t * sp)
4451{
4452 int i, ret;
4453 mac_info_t *mac_control;
4454 struct config_param *config;
4455 struct net_device *dev = (struct net_device *) sp->dev;
4456
4457 /* Initialize the H/W I/O registers */
4458 if (init_nic(sp) != 0) {
4459 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4460 dev->name);
4461 return -ENODEV;
4462 }
4463
20346722
K
4464 /*
4465 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
4466 * Rx ring and initializing buffers into 30 Rx blocks
4467 */
4468 mac_control = &sp->mac_control;
4469 config = &sp->config;
4470
4471 for (i = 0; i < config->rx_ring_num; i++) {
4472 if ((ret = fill_rx_buffers(sp, i))) {
4473 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4474 dev->name);
4475 s2io_reset(sp);
4476 free_rx_buffers(sp);
4477 return -ENOMEM;
4478 }
4479 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4480 atomic_read(&sp->rx_bufs_left[i]));
4481 }
4482
4483 /* Setting its receive mode */
4484 s2io_set_multicast(dev);
4485
4486 /* Enable tasklet for the device */
4487 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4488
4489 /* Enable Rx Traffic and interrupts on the NIC */
4490 if (start_nic(sp)) {
4491 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4492 tasklet_kill(&sp->task);
4493 s2io_reset(sp);
4494 free_irq(dev->irq, dev);
4495 free_rx_buffers(sp);
4496 return -ENODEV;
4497 }
4498
4499 atomic_set(&sp->card_state, CARD_UP);
4500 return 0;
4501}
4502
20346722 4503/**
1da177e4
LT
4504 * s2io_restart_nic - Resets the NIC.
4505 * @data : long pointer to the device private structure
4506 * Description:
4507 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 4508 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
4509 * the run time of the watch dog routine which is run holding a
4510 * spin lock.
4511 */
4512
4513static void s2io_restart_nic(unsigned long data)
4514{
4515 struct net_device *dev = (struct net_device *) data;
4516 nic_t *sp = dev->priv;
4517
4518 s2io_card_down(sp);
4519 if (s2io_card_up(sp)) {
4520 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4521 dev->name);
4522 }
4523 netif_wake_queue(dev);
4524 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4525 dev->name);
20346722 4526
1da177e4
LT
4527}
4528
20346722
K
4529/**
4530 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
4531 * @dev : Pointer to net device structure
4532 * Description:
4533 * This function is triggered if the Tx Queue is stopped
4534 * for a pre-defined amount of time when the Interface is still up.
4535 * If the Interface is jammed in such a situation, the hardware is
4536 * reset (by s2io_close) and restarted again (by s2io_open) to
4537 * overcome any problem that might have been caused in the hardware.
4538 * Return value:
4539 * void
4540 */
4541
4542static void s2io_tx_watchdog(struct net_device *dev)
4543{
4544 nic_t *sp = dev->priv;
4545
4546 if (netif_carrier_ok(dev)) {
4547 schedule_work(&sp->rst_timer_task);
4548 }
4549}
4550
4551/**
4552 * rx_osm_handler - To perform some OS related operations on SKB.
4553 * @sp: private member of the device structure,pointer to s2io_nic structure.
4554 * @skb : the socket buffer pointer.
4555 * @len : length of the packet
4556 * @cksum : FCS checksum of the frame.
4557 * @ring_no : the ring from which this RxD was extracted.
20346722 4558 * Description:
1da177e4
LT
4559 * This function is called by the Tx interrupt serivce routine to perform
4560 * some OS related operations on the SKB before passing it to the upper
4561 * layers. It mainly checks if the checksum is OK, if so adds it to the
4562 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4563 * to the upper layer. If the checksum is wrong, it increments the Rx
4564 * packet error count, frees the SKB and returns error.
4565 * Return value:
4566 * SUCCESS on success and -1 on failure.
4567 */
20346722 4568static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
1da177e4 4569{
20346722 4570 nic_t *sp = ring_data->nic;
1da177e4 4571 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
4572 struct sk_buff *skb = (struct sk_buff *)
4573 ((unsigned long) rxdp->Host_Control);
4574 int ring_no = ring_data->ring_no;
1da177e4
LT
4575 u16 l3_csum, l4_csum;
4576#ifdef CONFIG_2BUFF_MODE
20346722
K
4577 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4578 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4579 int get_block = ring_data->rx_curr_get_info.block_index;
4580 int get_off = ring_data->rx_curr_get_info.offset;
4581 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
1da177e4 4582 unsigned char *buff;
20346722
K
4583#else
4584 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
1da177e4 4585#endif
20346722
K
4586 skb->dev = dev;
4587 if (rxdp->Control_1 & RXD_T_CODE) {
4588 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4589 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4590 dev->name, err);
1ddc50d4
K
4591 dev_kfree_skb(skb);
4592 sp->stats.rx_crc_errors++;
4593 atomic_dec(&sp->rx_bufs_left[ring_no]);
4594 rxdp->Host_Control = 0;
4595 return 0;
20346722 4596 }
1da177e4 4597
20346722
K
4598 /* Updating statistics */
4599 rxdp->Host_Control = 0;
4600 sp->rx_pkt_count++;
4601 sp->stats.rx_packets++;
4602#ifndef CONFIG_2BUFF_MODE
4603 sp->stats.rx_bytes += len;
4604#else
4605 sp->stats.rx_bytes += buf0_len + buf2_len;
4606#endif
4607
4608#ifndef CONFIG_2BUFF_MODE
4609 skb_put(skb, len);
4610#else
4611 buff = skb_push(skb, buf0_len);
4612 memcpy(buff, ba->ba_0, buf0_len);
4613 skb_put(skb, buf2_len);
4614#endif
4615
4616 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4617 (sp->rx_csum)) {
4618 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
4619 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4620 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 4621 /*
1da177e4
LT
4622 * NIC verifies if the Checksum of the received
4623 * frame is Ok or not and accordingly returns
4624 * a flag in the RxD.
4625 */
4626 skb->ip_summed = CHECKSUM_UNNECESSARY;
4627 } else {
20346722
K
4628 /*
4629 * Packet with erroneous checksum, let the
1da177e4
LT
4630 * upper layers deal with it.
4631 */
4632 skb->ip_summed = CHECKSUM_NONE;
4633 }
4634 } else {
4635 skb->ip_summed = CHECKSUM_NONE;
4636 }
4637
1da177e4 4638 skb->protocol = eth_type_trans(skb, dev);
1da177e4
LT
4639#ifdef CONFIG_S2IO_NAPI
4640 netif_receive_skb(skb);
4641#else
4642 netif_rx(skb);
4643#endif
1da177e4 4644 dev->last_rx = jiffies;
1da177e4 4645 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
4646 return SUCCESS;
4647}
4648
4649/**
4650 * s2io_link - stops/starts the Tx queue.
4651 * @sp : private member of the device structure, which is a pointer to the
4652 * s2io_nic structure.
4653 * @link : inidicates whether link is UP/DOWN.
4654 * Description:
4655 * This function stops/starts the Tx queue depending on whether the link
20346722
K
4656 * status of the NIC is is down or up. This is called by the Alarm
4657 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
4658 * Return value:
4659 * void.
4660 */
4661
20346722 4662void s2io_link(nic_t * sp, int link)
1da177e4
LT
4663{
4664 struct net_device *dev = (struct net_device *) sp->dev;
4665
4666 if (link != sp->last_link_state) {
4667 if (link == LINK_DOWN) {
4668 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4669 netif_carrier_off(dev);
4670 } else {
4671 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4672 netif_carrier_on(dev);
4673 }
4674 }
4675 sp->last_link_state = link;
4676}
4677
4678/**
20346722
K
4679 * get_xena_rev_id - to identify revision ID of xena.
4680 * @pdev : PCI Dev structure
4681 * Description:
4682 * Function to identify the Revision ID of xena.
4683 * Return value:
4684 * returns the revision ID of the device.
4685 */
4686
4687int get_xena_rev_id(struct pci_dev *pdev)
4688{
4689 u8 id = 0;
4690 int ret;
4691 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4692 return id;
4693}
4694
4695/**
4696 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4697 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4698 * s2io_nic structure.
4699 * Description:
4700 * This function initializes a few of the PCI and PCI-X configuration registers
4701 * with recommended values.
4702 * Return value:
4703 * void
4704 */
4705
4706static void s2io_init_pci(nic_t * sp)
4707{
20346722 4708 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
4709
4710 /* Enable Data Parity Error Recovery in PCI-X command register. */
4711 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 4712 &(pcix_cmd));
1da177e4 4713 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 4714 (pcix_cmd | 1));
1da177e4 4715 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 4716 &(pcix_cmd));
1da177e4
LT
4717
4718 /* Set the PErr Response bit in PCI command register. */
4719 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4720 pci_write_config_word(sp->pdev, PCI_COMMAND,
4721 (pci_cmd | PCI_COMMAND_PARITY));
4722 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4723
1da177e4 4724 /* Forcibly disabling relaxed ordering capability of the card. */
20346722 4725 pcix_cmd &= 0xfffd;
1da177e4 4726 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 4727 pcix_cmd);
1da177e4 4728 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 4729 &(pcix_cmd));
1da177e4
LT
4730}
4731
4732MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4733MODULE_LICENSE("GPL");
4734module_param(tx_fifo_num, int, 0);
1da177e4 4735module_param(rx_ring_num, int, 0);
20346722
K
4736module_param_array(tx_fifo_len, uint, NULL, 0);
4737module_param_array(rx_ring_sz, uint, NULL, 0);
20346722 4738module_param_array(rts_frm_len, uint, NULL, 0);
5e25b9dd 4739module_param(use_continuous_tx_intrs, int, 1);
1da177e4
LT
4740module_param(rmac_pause_time, int, 0);
4741module_param(mc_pause_threshold_q0q3, int, 0);
4742module_param(mc_pause_threshold_q4q7, int, 0);
4743module_param(shared_splits, int, 0);
4744module_param(tmac_util_period, int, 0);
4745module_param(rmac_util_period, int, 0);
4746#ifndef CONFIG_S2IO_NAPI
4747module_param(indicate_max_pkts, int, 0);
4748#endif
20346722 4749
1da177e4 4750/**
20346722 4751 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
4752 * @pdev : structure containing the PCI related information of the device.
4753 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4754 * Description:
4755 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
4756 * All OS related initialization including memory and device structure and
4757 * initlaization of the device private variable is done. Also the swapper
4758 * control register is initialized to enable read and write into the I/O
1da177e4
LT
4759 * registers of the device.
4760 * Return value:
4761 * returns 0 on success and negative on failure.
4762 */
4763
4764static int __devinit
4765s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4766{
4767 nic_t *sp;
4768 struct net_device *dev;
1da177e4
LT
4769 int i, j, ret;
4770 int dma_flag = FALSE;
4771 u32 mac_up, mac_down;
4772 u64 val64 = 0, tmp64 = 0;
4773 XENA_dev_config_t __iomem *bar0 = NULL;
4774 u16 subid;
4775 mac_info_t *mac_control;
4776 struct config_param *config;
4777
20346722
K
4778#ifdef CONFIG_S2IO_NAPI
4779 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4780#endif
1da177e4
LT
4781
4782 if ((ret = pci_enable_device(pdev))) {
4783 DBG_PRINT(ERR_DBG,
4784 "s2io_init_nic: pci_enable_device failed\n");
4785 return ret;
4786 }
4787
1e7f0bd8 4788 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
4789 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4790 dma_flag = TRUE;
1da177e4 4791 if (pci_set_consistent_dma_mask
1e7f0bd8 4792 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
4793 DBG_PRINT(ERR_DBG,
4794 "Unable to obtain 64bit DMA for \
4795 consistent allocations\n");
4796 pci_disable_device(pdev);
4797 return -ENOMEM;
4798 }
1e7f0bd8 4799 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
4800 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4801 } else {
4802 pci_disable_device(pdev);
4803 return -ENOMEM;
4804 }
4805
4806 if (pci_request_regions(pdev, s2io_driver_name)) {
4807 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4808 pci_disable_device(pdev);
4809 return -ENODEV;
4810 }
4811
4812 dev = alloc_etherdev(sizeof(nic_t));
4813 if (dev == NULL) {
4814 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4815 pci_disable_device(pdev);
4816 pci_release_regions(pdev);
4817 return -ENODEV;
4818 }
4819
4820 pci_set_master(pdev);
4821 pci_set_drvdata(pdev, dev);
4822 SET_MODULE_OWNER(dev);
4823 SET_NETDEV_DEV(dev, &pdev->dev);
4824
4825 /* Private member variable initialized to s2io NIC structure */
4826 sp = dev->priv;
4827 memset(sp, 0, sizeof(nic_t));
4828 sp->dev = dev;
4829 sp->pdev = pdev;
1da177e4 4830 sp->high_dma_flag = dma_flag;
1da177e4 4831 sp->device_enabled_once = FALSE;
1da177e4
LT
4832
4833 /* Initialize some PCI/PCI-X fields of the NIC. */
4834 s2io_init_pci(sp);
4835
20346722 4836 /*
1da177e4 4837 * Setting the device configuration parameters.
20346722
K
4838 * Most of these parameters can be specified by the user during
4839 * module insertion as they are module loadable parameters. If
4840 * these parameters are not not specified during load time, they
1da177e4
LT
4841 * are initialized with default values.
4842 */
4843 mac_control = &sp->mac_control;
4844 config = &sp->config;
4845
4846 /* Tx side parameters. */
4847 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4848 config->tx_fifo_num = tx_fifo_num;
4849 for (i = 0; i < MAX_TX_FIFOS; i++) {
4850 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4851 config->tx_cfg[i].fifo_priority = i;
4852 }
4853
20346722
K
4854 /* mapping the QoS priority to the configured fifos */
4855 for (i = 0; i < MAX_TX_FIFOS; i++)
4856 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4857
1da177e4
LT
4858 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4859 for (i = 0; i < config->tx_fifo_num; i++) {
4860 config->tx_cfg[i].f_no_snoop =
4861 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4862 if (config->tx_cfg[i].fifo_len < 65) {
4863 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4864 break;
4865 }
4866 }
4867 config->max_txds = MAX_SKB_FRAGS;
4868
4869 /* Rx side parameters. */
4870 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4871 config->rx_ring_num = rx_ring_num;
4872 for (i = 0; i < MAX_RX_RINGS; i++) {
4873 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4874 (MAX_RXDS_PER_BLOCK + 1);
4875 config->rx_cfg[i].ring_priority = i;
4876 }
4877
4878 for (i = 0; i < rx_ring_num; i++) {
4879 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4880 config->rx_cfg[i].f_no_snoop =
4881 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4882 }
4883
4884 /* Setting Mac Control parameters */
4885 mac_control->rmac_pause_time = rmac_pause_time;
4886 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4887 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4888
4889
4890 /* Initialize Ring buffer parameters. */
4891 for (i = 0; i < config->rx_ring_num; i++)
4892 atomic_set(&sp->rx_bufs_left[i], 0);
4893
7ba013ac
K
4894 /* Initialize the number of ISRs currently running */
4895 atomic_set(&sp->isr_cnt, 0);
4896
1da177e4
LT
4897 /* initialize the shared memory used by the NIC and the host */
4898 if (init_shared_mem(sp)) {
4899 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4900 dev->name);
4901 ret = -ENOMEM;
4902 goto mem_alloc_failed;
4903 }
4904
4905 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4906 pci_resource_len(pdev, 0));
4907 if (!sp->bar0) {
4908 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4909 dev->name);
4910 ret = -ENOMEM;
4911 goto bar0_remap_failed;
4912 }
4913
4914 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4915 pci_resource_len(pdev, 2));
4916 if (!sp->bar1) {
4917 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4918 dev->name);
4919 ret = -ENOMEM;
4920 goto bar1_remap_failed;
4921 }
4922
4923 dev->irq = pdev->irq;
4924 dev->base_addr = (unsigned long) sp->bar0;
4925
4926 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4927 for (j = 0; j < MAX_TX_FIFOS; j++) {
4928 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4929 (sp->bar1 + (j * 0x00020000));
4930 }
4931
4932 /* Driver entry points */
4933 dev->open = &s2io_open;
4934 dev->stop = &s2io_close;
4935 dev->hard_start_xmit = &s2io_xmit;
4936 dev->get_stats = &s2io_get_stats;
4937 dev->set_multicast_list = &s2io_set_multicast;
4938 dev->do_ioctl = &s2io_ioctl;
4939 dev->change_mtu = &s2io_change_mtu;
4940 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
20346722 4941
1da177e4
LT
4942 /*
4943 * will use eth_mac_addr() for dev->set_mac_address
4944 * mac address will be set every time dev->open() is called
4945 */
20346722 4946#if defined(CONFIG_S2IO_NAPI)
1da177e4 4947 dev->poll = s2io_poll;
20346722 4948 dev->weight = 32;
1da177e4
LT
4949#endif
4950
4951 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4952 if (sp->high_dma_flag == TRUE)
4953 dev->features |= NETIF_F_HIGHDMA;
4954#ifdef NETIF_F_TSO
4955 dev->features |= NETIF_F_TSO;
4956#endif
4957
4958 dev->tx_timeout = &s2io_tx_watchdog;
4959 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4960 INIT_WORK(&sp->rst_timer_task,
4961 (void (*)(void *)) s2io_restart_nic, dev);
4962 INIT_WORK(&sp->set_link_task,
4963 (void (*)(void *)) s2io_set_link, sp);
4964
4965 pci_save_state(sp->pdev);
4966
4967 /* Setting swapper control on the NIC, for proper reset operation */
4968 if (s2io_set_swapper(sp)) {
4969 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4970 dev->name);
4971 ret = -EAGAIN;
4972 goto set_swap_failed;
4973 }
4974
20346722
K
4975 /*
4976 * Fix for all "FFs" MAC address problems observed on
4977 * Alpha platforms
4978 */
1da177e4
LT
4979 fix_mac_address(sp);
4980 s2io_reset(sp);
4981
4982 /*
1da177e4
LT
4983 * MAC address initialization.
4984 * For now only one mac address will be read and used.
4985 */
4986 bar0 = sp->bar0;
4987 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4988 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4989 writeq(val64, &bar0->rmac_addr_cmd_mem);
4990 wait_for_cmd_complete(sp);
4991
4992 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4993 mac_down = (u32) tmp64;
4994 mac_up = (u32) (tmp64 >> 32);
4995
4996 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4997
4998 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4999 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5000 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5001 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5002 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5003 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5004
5005 DBG_PRINT(INIT_DBG,
5006 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
5007 sp->def_mac_addr[0].mac_addr[0],
5008 sp->def_mac_addr[0].mac_addr[1],
5009 sp->def_mac_addr[0].mac_addr[2],
5010 sp->def_mac_addr[0].mac_addr[3],
5011 sp->def_mac_addr[0].mac_addr[4],
5012 sp->def_mac_addr[0].mac_addr[5]);
5013
5014 /* Set the factory defined MAC address initially */
5015 dev->addr_len = ETH_ALEN;
5016 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5017
5018 /*
20346722 5019 * Initialize the tasklet status and link state flags
1da177e4
LT
5020 * and the card statte parameter
5021 */
5022 atomic_set(&(sp->card_state), 0);
5023 sp->tasklet_status = 0;
5024 sp->link_state = 0;
5025
1da177e4
LT
5026 /* Initialize spinlocks */
5027 spin_lock_init(&sp->tx_lock);
5028#ifndef CONFIG_S2IO_NAPI
5029 spin_lock_init(&sp->put_lock);
5030#endif
7ba013ac 5031 spin_lock_init(&sp->rx_lock);
1da177e4 5032
20346722
K
5033 /*
5034 * SXE-002: Configure link and activity LED to init state
5035 * on driver load.
1da177e4
LT
5036 */
5037 subid = sp->pdev->subsystem_device;
5038 if ((subid & 0xFF) >= 0x07) {
5039 val64 = readq(&bar0->gpio_control);
5040 val64 |= 0x0000800000000000ULL;
5041 writeq(val64, &bar0->gpio_control);
5042 val64 = 0x0411040400000000ULL;
5043 writeq(val64, (void __iomem *) bar0 + 0x2700);
5044 val64 = readq(&bar0->gpio_control);
5045 }
5046
5047 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5048
5049 if (register_netdev(dev)) {
5050 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5051 ret = -ENODEV;
5052 goto register_failed;
5053 }
5054
7ba013ac
K
5055 /* Initialize device name */
5056 strcpy(sp->name, dev->name);
5057 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5058
20346722
K
5059 /*
5060 * Make Link state as off at this point, when the Link change
5061 * interrupt comes the state will be automatically changed to
1da177e4
LT
5062 * the right state.
5063 */
5064 netif_carrier_off(dev);
1da177e4
LT
5065
5066 return 0;
5067
5068 register_failed:
5069 set_swap_failed:
5070 iounmap(sp->bar1);
5071 bar1_remap_failed:
5072 iounmap(sp->bar0);
5073 bar0_remap_failed:
5074 mem_alloc_failed:
5075 free_shared_mem(sp);
5076 pci_disable_device(pdev);
5077 pci_release_regions(pdev);
5078 pci_set_drvdata(pdev, NULL);
5079 free_netdev(dev);
5080
5081 return ret;
5082}
5083
5084/**
20346722 5085 * s2io_rem_nic - Free the PCI device
1da177e4 5086 * @pdev: structure containing the PCI related information of the device.
20346722 5087 * Description: This function is called by the Pci subsystem to release a
1da177e4 5088 * PCI device and free up all resource held up by the device. This could
20346722 5089 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
5090 * from memory.
5091 */
5092
5093static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5094{
5095 struct net_device *dev =
5096 (struct net_device *) pci_get_drvdata(pdev);
5097 nic_t *sp;
5098
5099 if (dev == NULL) {
5100 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5101 return;
5102 }
5103
5104 sp = dev->priv;
5105 unregister_netdev(dev);
5106
5107 free_shared_mem(sp);
5108 iounmap(sp->bar0);
5109 iounmap(sp->bar1);
5110 pci_disable_device(pdev);
5111 pci_release_regions(pdev);
5112 pci_set_drvdata(pdev, NULL);
1da177e4
LT
5113 free_netdev(dev);
5114}
5115
5116/**
5117 * s2io_starter - Entry point for the driver
5118 * Description: This function is the entry point for the driver. It verifies
5119 * the module loadable parameters and initializes PCI configuration space.
5120 */
5121
5122int __init s2io_starter(void)
5123{
5124 return pci_module_init(&s2io_driver);
5125}
5126
5127/**
20346722 5128 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
5129 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5130 */
5131
20346722 5132void s2io_closer(void)
1da177e4
LT
5133{
5134 pci_unregister_driver(&s2io_driver);
5135 DBG_PRINT(INIT_DBG, "cleanup done\n");
5136}
5137
5138module_init(s2io_starter);
5139module_exit(s2io_closer);