[PATCH] S2io: Support for bimodal interrupts
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / s2io.c
CommitLineData
1da177e4
LT
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
20346722
K
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
1da177e4
LT
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 35 * Tx descriptors that can be associated with each corresponding FIFO.
1da177e4
LT
36 ************************************************************************/
37
38#include <linux/config.h>
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/errno.h>
42#include <linux/ioport.h>
43#include <linux/pci.h>
1e7f0bd8 44#include <linux/dma-mapping.h>
1da177e4
LT
45#include <linux/kernel.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/init.h>
50#include <linux/delay.h>
51#include <linux/stddef.h>
52#include <linux/ioctl.h>
53#include <linux/timex.h>
54#include <linux/sched.h>
55#include <linux/ethtool.h>
56#include <linux/version.h>
57#include <linux/workqueue.h>
be3a6b02 58#include <linux/if_vlan.h>
1da177e4 59
1da177e4
LT
60#include <asm/system.h>
61#include <asm/uaccess.h>
20346722 62#include <asm/io.h>
1da177e4
LT
63
64/* local include */
65#include "s2io.h"
66#include "s2io-regs.h"
67
68/* S2io Driver name & version. */
20346722
K
69static char s2io_driver_name[] = "Neterion";
70static char s2io_driver_version[] = "Version 1.7.7";
1da177e4 71
5e25b9dd
K
72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{
74 int ret;
75
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79 return ret;
80}
81
20346722 82/*
1da177e4
LT
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
86 */
541ae68f
K
87#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
91
92#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95#define PANIC 1
96#define LOW 2
97static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98{
99 int level = 0;
20346722
K
100 mac_info_t *mac_control;
101
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
1da177e4 104 level = LOW;
fe113638 105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
1da177e4
LT
106 level = PANIC;
107 }
108 }
109
110 return level;
111}
112
113/* Ethtool related variables and Macros. */
114static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
120};
121
122static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_frms"},
124 {"tmac_data_octets"},
125 {"tmac_drop_frms"},
126 {"tmac_mcst_frms"},
127 {"tmac_bcst_frms"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
131 {"tmac_vld_ip"},
132 {"tmac_drop_ip"},
133 {"tmac_icmp"},
134 {"tmac_rst_tcp"},
135 {"tmac_tcp"},
136 {"tmac_udp"},
137 {"rmac_vld_frms"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
140 {"rmac_drop_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
144 {"rmac_long_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
149 {"rmac_frag_frms"},
150 {"rmac_jabber_frms"},
151 {"rmac_ip"},
152 {"rmac_ip_octets"},
153 {"rmac_hdr_err_ip"},
154 {"rmac_drop_ip"},
155 {"rmac_icmp"},
156 {"rmac_tcp"},
157 {"rmac_udp"},
158 {"rmac_err_drp_udp"},
159 {"rmac_pause_cnt"},
160 {"rmac_accepted_ip"},
161 {"rmac_err_tcp"},
7ba013ac
K
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
1da177e4
LT
165};
166
167#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
172
25fff88e
K
173#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
178
be3a6b02
K
179/* Add the vlan */
180static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
182{
183 nic_t *nic = dev->priv;
184 unsigned long flags;
185
186 spin_lock_irqsave(&nic->tx_lock, flags);
187 nic->vlgrp = grp;
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
189}
190
191/* Unregister the vlan */
192static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193{
194 nic_t *nic = dev->priv;
195 unsigned long flags;
196
197 spin_lock_irqsave(&nic->tx_lock, flags);
198 if (nic->vlgrp)
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
201}
202
20346722 203/*
1da177e4
LT
204 * Constants to be programmed into the Xena's registers, to configure
205 * the XAUI.
206 */
207
208#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
209#define END_SIGN 0x0
210
541ae68f
K
211static u64 herc_act_dtx_cfg[] = {
212 /* Set address */
213 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
214 /* Write data */
215 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
216 /* Set address */
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218 /* Write data */
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220 /* Set address */
221 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
222 /* Write data */
223 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
224 /* Done */
225 END_SIGN
226};
227
228static u64 xena_mdio_cfg[] = {
1da177e4
LT
229 /* Reset PMA PLL */
230 0xC001010000000000ULL, 0xC0010100000000E0ULL,
231 0xC0010100008000E4ULL,
232 /* Remove Reset from PMA PLL */
233 0xC001010000000000ULL, 0xC0010100000000E0ULL,
234 0xC0010100000000E4ULL,
235 END_SIGN
236};
237
541ae68f 238static u64 xena_dtx_cfg[] = {
1da177e4
LT
239 0x8000051500000000ULL, 0x80000515000000E0ULL,
240 0x80000515D93500E4ULL, 0x8001051500000000ULL,
241 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
242 0x8002051500000000ULL, 0x80020515000000E0ULL,
243 0x80020515F21000E4ULL,
244 /* Set PADLOOPBACKN */
245 0x8002051500000000ULL, 0x80020515000000E0ULL,
246 0x80020515B20000E4ULL, 0x8003051500000000ULL,
247 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
248 0x8004051500000000ULL, 0x80040515000000E0ULL,
249 0x80040515B20000E4ULL, 0x8005051500000000ULL,
250 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
251 SWITCH_SIGN,
252 /* Remove PADLOOPBACKN */
253 0x8002051500000000ULL, 0x80020515000000E0ULL,
254 0x80020515F20000E4ULL, 0x8003051500000000ULL,
255 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
256 0x8004051500000000ULL, 0x80040515000000E0ULL,
257 0x80040515F20000E4ULL, 0x8005051500000000ULL,
258 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
259 END_SIGN
260};
261
20346722 262/*
1da177e4
LT
263 * Constants for Fixing the MacAddress problem seen mostly on
264 * Alpha machines.
265 */
266static u64 fix_mac[] = {
267 0x0060000000000000ULL, 0x0060600000000000ULL,
268 0x0040600000000000ULL, 0x0000600000000000ULL,
269 0x0020600000000000ULL, 0x0060600000000000ULL,
270 0x0020600000000000ULL, 0x0060600000000000ULL,
271 0x0020600000000000ULL, 0x0060600000000000ULL,
272 0x0020600000000000ULL, 0x0060600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0000600000000000ULL,
280 0x0040600000000000ULL, 0x0060600000000000ULL,
281 END_SIGN
282};
283
284/* Module Loadable parameters. */
285static unsigned int tx_fifo_num = 1;
286static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
287 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
288static unsigned int rx_ring_num = 1;
289static unsigned int rx_ring_sz[MAX_RX_RINGS] =
290 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
20346722
K
291static unsigned int rts_frm_len[MAX_RX_RINGS] =
292 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
5e25b9dd 293static unsigned int use_continuous_tx_intrs = 1;
1da177e4
LT
294static unsigned int rmac_pause_time = 65535;
295static unsigned int mc_pause_threshold_q0q3 = 187;
296static unsigned int mc_pause_threshold_q4q7 = 187;
297static unsigned int shared_splits;
298static unsigned int tmac_util_period = 5;
299static unsigned int rmac_util_period = 5;
b6e3f982 300static unsigned int bimodal = 0;
1da177e4
LT
301#ifndef CONFIG_S2IO_NAPI
302static unsigned int indicate_max_pkts;
303#endif
304
20346722 305/*
1da177e4 306 * S2IO device table.
20346722 307 * This table lists all the devices that this driver supports.
1da177e4
LT
308 */
309static struct pci_device_id s2io_tbl[] __devinitdata = {
310 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
311 PCI_ANY_ID, PCI_ANY_ID},
312 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
313 PCI_ANY_ID, PCI_ANY_ID},
314 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
315 PCI_ANY_ID, PCI_ANY_ID},
316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
317 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
318 {0,}
319};
320
321MODULE_DEVICE_TABLE(pci, s2io_tbl);
322
323static struct pci_driver s2io_driver = {
324 .name = "S2IO",
325 .id_table = s2io_tbl,
326 .probe = s2io_init_nic,
327 .remove = __devexit_p(s2io_rem_nic),
328};
329
330/* A simplifier macro used both by init and free shared_mem Fns(). */
331#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
332
333/**
334 * init_shared_mem - Allocation and Initialization of Memory
335 * @nic: Device private variable.
20346722
K
336 * Description: The function allocates all the memory areas shared
337 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
338 * Rx descriptors and the statistics block.
339 */
340
341static int init_shared_mem(struct s2io_nic *nic)
342{
343 u32 size;
344 void *tmp_v_addr, *tmp_v_addr_next;
345 dma_addr_t tmp_p_addr, tmp_p_addr_next;
346 RxD_block_t *pre_rxd_blk = NULL;
20346722 347 int i, j, blk_cnt, rx_sz, tx_sz;
1da177e4
LT
348 int lst_size, lst_per_page;
349 struct net_device *dev = nic->dev;
350#ifdef CONFIG_2BUFF_MODE
20346722 351 u64 tmp;
1da177e4
LT
352 buffAdd_t *ba;
353#endif
354
355 mac_info_t *mac_control;
356 struct config_param *config;
357
358 mac_control = &nic->mac_control;
359 config = &nic->config;
360
361
362 /* Allocation and initialization of TXDLs in FIOFs */
363 size = 0;
364 for (i = 0; i < config->tx_fifo_num; i++) {
365 size += config->tx_cfg[i].fifo_len;
366 }
367 if (size > MAX_AVAILABLE_TXDS) {
368 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
369 dev->name);
370 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
371 DBG_PRINT(ERR_DBG, "that can be used\n");
372 return FAILURE;
373 }
374
375 lst_size = (sizeof(TxD_t) * config->max_txds);
20346722 376 tx_sz = lst_size * size;
1da177e4
LT
377 lst_per_page = PAGE_SIZE / lst_size;
378
379 for (i = 0; i < config->tx_fifo_num; i++) {
380 int fifo_len = config->tx_cfg[i].fifo_len;
381 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
20346722
K
382 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
383 GFP_KERNEL);
384 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
385 DBG_PRINT(ERR_DBG,
386 "Malloc failed for list_info\n");
387 return -ENOMEM;
388 }
20346722 389 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
390 }
391 for (i = 0; i < config->tx_fifo_num; i++) {
392 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
393 lst_per_page);
20346722
K
394 mac_control->fifos[i].tx_curr_put_info.offset = 0;
395 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 396 config->tx_cfg[i].fifo_len - 1;
20346722
K
397 mac_control->fifos[i].tx_curr_get_info.offset = 0;
398 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 399 config->tx_cfg[i].fifo_len - 1;
20346722
K
400 mac_control->fifos[i].fifo_no = i;
401 mac_control->fifos[i].nic = nic;
402 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
403
1da177e4
LT
404 for (j = 0; j < page_num; j++) {
405 int k = 0;
406 dma_addr_t tmp_p;
407 void *tmp_v;
408 tmp_v = pci_alloc_consistent(nic->pdev,
409 PAGE_SIZE, &tmp_p);
410 if (!tmp_v) {
411 DBG_PRINT(ERR_DBG,
412 "pci_alloc_consistent ");
413 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
414 return -ENOMEM;
415 }
416 while (k < lst_per_page) {
417 int l = (j * lst_per_page) + k;
418 if (l == config->tx_cfg[i].fifo_len)
20346722
K
419 break;
420 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 421 tmp_v + (k * lst_size);
20346722 422 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
423 tmp_p + (k * lst_size);
424 k++;
425 }
426 }
427 }
1da177e4
LT
428
429 /* Allocation and initialization of RXDs in Rings */
430 size = 0;
431 for (i = 0; i < config->rx_ring_num; i++) {
432 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
433 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
434 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
435 i);
436 DBG_PRINT(ERR_DBG, "RxDs per Block");
437 return FAILURE;
438 }
439 size += config->rx_cfg[i].num_rxd;
20346722 440 mac_control->rings[i].block_count =
1da177e4 441 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722
K
442 mac_control->rings[i].pkt_cnt =
443 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
1da177e4 444 }
20346722
K
445 size = (size * (sizeof(RxD_t)));
446 rx_sz = size;
1da177e4
LT
447
448 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
449 mac_control->rings[i].rx_curr_get_info.block_index = 0;
450 mac_control->rings[i].rx_curr_get_info.offset = 0;
451 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 452 config->rx_cfg[i].num_rxd - 1;
20346722
K
453 mac_control->rings[i].rx_curr_put_info.block_index = 0;
454 mac_control->rings[i].rx_curr_put_info.offset = 0;
455 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 456 config->rx_cfg[i].num_rxd - 1;
20346722
K
457 mac_control->rings[i].nic = nic;
458 mac_control->rings[i].ring_no = i;
459
1da177e4
LT
460 blk_cnt =
461 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
462 /* Allocating all the Rx blocks */
463 for (j = 0; j < blk_cnt; j++) {
464#ifndef CONFIG_2BUFF_MODE
465 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
466#else
467 size = SIZE_OF_BLOCK;
468#endif
469 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
470 &tmp_p_addr);
471 if (tmp_v_addr == NULL) {
472 /*
20346722
K
473 * In case of failure, free_shared_mem()
474 * is called, which should free any
475 * memory that was alloced till the
1da177e4
LT
476 * failure happened.
477 */
20346722 478 mac_control->rings[i].rx_blocks[j].block_virt_addr =
1da177e4
LT
479 tmp_v_addr;
480 return -ENOMEM;
481 }
482 memset(tmp_v_addr, 0, size);
20346722
K
483 mac_control->rings[i].rx_blocks[j].block_virt_addr =
484 tmp_v_addr;
485 mac_control->rings[i].rx_blocks[j].block_dma_addr =
486 tmp_p_addr;
1da177e4
LT
487 }
488 /* Interlinking all Rx Blocks */
489 for (j = 0; j < blk_cnt; j++) {
20346722
K
490 tmp_v_addr =
491 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 492 tmp_v_addr_next =
20346722 493 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 494 blk_cnt].block_virt_addr;
20346722
K
495 tmp_p_addr =
496 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 497 tmp_p_addr_next =
20346722 498 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
499 blk_cnt].block_dma_addr;
500
501 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
20346722 502 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
1da177e4
LT
503 * marker.
504 */
505#ifndef CONFIG_2BUFF_MODE
506 pre_rxd_blk->reserved_2_pNext_RxD_block =
507 (unsigned long) tmp_v_addr_next;
508#endif
509 pre_rxd_blk->pNext_RxD_Blk_physical =
510 (u64) tmp_p_addr_next;
511 }
512 }
513
514#ifdef CONFIG_2BUFF_MODE
20346722 515 /*
1da177e4
LT
516 * Allocation of Storages for buffer addresses in 2BUFF mode
517 * and the buffers as well.
518 */
519 for (i = 0; i < config->rx_ring_num; i++) {
520 blk_cnt =
521 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722 522 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
1da177e4 523 GFP_KERNEL);
20346722 524 if (!mac_control->rings[i].ba)
1da177e4
LT
525 return -ENOMEM;
526 for (j = 0; j < blk_cnt; j++) {
527 int k = 0;
20346722 528 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
1da177e4
LT
529 (MAX_RXDS_PER_BLOCK + 1)),
530 GFP_KERNEL);
20346722 531 if (!mac_control->rings[i].ba[j])
1da177e4
LT
532 return -ENOMEM;
533 while (k != MAX_RXDS_PER_BLOCK) {
20346722 534 ba = &mac_control->rings[i].ba[j][k];
1da177e4 535
20346722 536 ba->ba_0_org = (void *) kmalloc
1da177e4
LT
537 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
538 if (!ba->ba_0_org)
539 return -ENOMEM;
20346722 540 tmp = (u64) ba->ba_0_org;
1da177e4 541 tmp += ALIGN_SIZE;
20346722 542 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
543 ba->ba_0 = (void *) tmp;
544
20346722 545 ba->ba_1_org = (void *) kmalloc
1da177e4
LT
546 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
547 if (!ba->ba_1_org)
548 return -ENOMEM;
20346722 549 tmp = (u64) ba->ba_1_org;
1da177e4 550 tmp += ALIGN_SIZE;
20346722 551 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
552 ba->ba_1 = (void *) tmp;
553 k++;
554 }
555 }
556 }
557#endif
558
559 /* Allocation and initialization of Statistics block */
560 size = sizeof(StatInfo_t);
561 mac_control->stats_mem = pci_alloc_consistent
562 (nic->pdev, size, &mac_control->stats_mem_phy);
563
564 if (!mac_control->stats_mem) {
20346722
K
565 /*
566 * In case of failure, free_shared_mem() is called, which
567 * should free any memory that was alloced till the
1da177e4
LT
568 * failure happened.
569 */
570 return -ENOMEM;
571 }
572 mac_control->stats_mem_sz = size;
573
574 tmp_v_addr = mac_control->stats_mem;
575 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
576 memset(tmp_v_addr, 0, size);
1da177e4
LT
577 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
578 (unsigned long long) tmp_p_addr);
579
580 return SUCCESS;
581}
582
20346722
K
583/**
584 * free_shared_mem - Free the allocated Memory
1da177e4
LT
585 * @nic: Device private variable.
586 * Description: This function is to free all memory locations allocated by
587 * the init_shared_mem() function and return it to the kernel.
588 */
589
590static void free_shared_mem(struct s2io_nic *nic)
591{
592 int i, j, blk_cnt, size;
593 void *tmp_v_addr;
594 dma_addr_t tmp_p_addr;
595 mac_info_t *mac_control;
596 struct config_param *config;
597 int lst_size, lst_per_page;
598
599
600 if (!nic)
601 return;
602
603 mac_control = &nic->mac_control;
604 config = &nic->config;
605
606 lst_size = (sizeof(TxD_t) * config->max_txds);
607 lst_per_page = PAGE_SIZE / lst_size;
608
609 for (i = 0; i < config->tx_fifo_num; i++) {
610 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
611 lst_per_page);
612 for (j = 0; j < page_num; j++) {
613 int mem_blks = (j * lst_per_page);
20346722
K
614 if (!mac_control->fifos[i].list_info[mem_blks].
615 list_virt_addr)
1da177e4
LT
616 break;
617 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
618 mac_control->fifos[i].
619 list_info[mem_blks].
1da177e4 620 list_virt_addr,
20346722
K
621 mac_control->fifos[i].
622 list_info[mem_blks].
1da177e4
LT
623 list_phy_addr);
624 }
20346722 625 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
626 }
627
628#ifndef CONFIG_2BUFF_MODE
629 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
630#else
631 size = SIZE_OF_BLOCK;
632#endif
633 for (i = 0; i < config->rx_ring_num; i++) {
20346722 634 blk_cnt = mac_control->rings[i].block_count;
1da177e4 635 for (j = 0; j < blk_cnt; j++) {
20346722
K
636 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
637 block_virt_addr;
638 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
639 block_dma_addr;
1da177e4
LT
640 if (tmp_v_addr == NULL)
641 break;
642 pci_free_consistent(nic->pdev, size,
643 tmp_v_addr, tmp_p_addr);
644 }
645 }
646
647#ifdef CONFIG_2BUFF_MODE
648 /* Freeing buffer storage addresses in 2BUFF mode. */
649 for (i = 0; i < config->rx_ring_num; i++) {
650 blk_cnt =
651 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
1da177e4
LT
652 for (j = 0; j < blk_cnt; j++) {
653 int k = 0;
20346722
K
654 if (!mac_control->rings[i].ba[j])
655 continue;
1da177e4 656 while (k != MAX_RXDS_PER_BLOCK) {
20346722 657 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
1da177e4
LT
658 kfree(ba->ba_0_org);
659 kfree(ba->ba_1_org);
660 k++;
661 }
20346722 662 kfree(mac_control->rings[i].ba[j]);
1da177e4 663 }
20346722
K
664 if (mac_control->rings[i].ba)
665 kfree(mac_control->rings[i].ba);
1da177e4 666 }
1da177e4
LT
667#endif
668
669 if (mac_control->stats_mem) {
670 pci_free_consistent(nic->pdev,
671 mac_control->stats_mem_sz,
672 mac_control->stats_mem,
673 mac_control->stats_mem_phy);
674 }
675}
676
541ae68f
K
677/**
678 * s2io_verify_pci_mode -
679 */
680
681static int s2io_verify_pci_mode(nic_t *nic)
682{
683 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
684 register u64 val64 = 0;
685 int mode;
686
687 val64 = readq(&bar0->pci_mode);
688 mode = (u8)GET_PCI_MODE(val64);
689
690 if ( val64 & PCI_MODE_UNKNOWN_MODE)
691 return -1; /* Unknown PCI mode */
692 return mode;
693}
694
695
696/**
697 * s2io_print_pci_mode -
698 */
699static int s2io_print_pci_mode(nic_t *nic)
700{
701 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
702 register u64 val64 = 0;
703 int mode;
704 struct config_param *config = &nic->config;
705
706 val64 = readq(&bar0->pci_mode);
707 mode = (u8)GET_PCI_MODE(val64);
708
709 if ( val64 & PCI_MODE_UNKNOWN_MODE)
710 return -1; /* Unknown PCI mode */
711
712 if (val64 & PCI_MODE_32_BITS) {
713 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
714 } else {
715 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
716 }
717
718 switch(mode) {
719 case PCI_MODE_PCI_33:
720 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
721 config->bus_speed = 33;
722 break;
723 case PCI_MODE_PCI_66:
724 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
725 config->bus_speed = 133;
726 break;
727 case PCI_MODE_PCIX_M1_66:
728 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
729 config->bus_speed = 133; /* Herc doubles the clock rate */
730 break;
731 case PCI_MODE_PCIX_M1_100:
732 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
733 config->bus_speed = 200;
734 break;
735 case PCI_MODE_PCIX_M1_133:
736 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
737 config->bus_speed = 266;
738 break;
739 case PCI_MODE_PCIX_M2_66:
740 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
741 config->bus_speed = 133;
742 break;
743 case PCI_MODE_PCIX_M2_100:
744 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
745 config->bus_speed = 200;
746 break;
747 case PCI_MODE_PCIX_M2_133:
748 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
749 config->bus_speed = 266;
750 break;
751 default:
752 return -1; /* Unsupported bus speed */
753 }
754
755 return mode;
756}
757
20346722
K
758/**
759 * init_nic - Initialization of hardware
1da177e4 760 * @nic: device peivate variable
20346722
K
761 * Description: The function sequentially configures every block
762 * of the H/W from their reset values.
763 * Return Value: SUCCESS on success and
1da177e4
LT
764 * '-1' on failure (endian settings incorrect).
765 */
766
767static int init_nic(struct s2io_nic *nic)
768{
769 XENA_dev_config_t __iomem *bar0 = nic->bar0;
770 struct net_device *dev = nic->dev;
771 register u64 val64 = 0;
772 void __iomem *add;
773 u32 time;
774 int i, j;
775 mac_info_t *mac_control;
776 struct config_param *config;
777 int mdio_cnt = 0, dtx_cnt = 0;
778 unsigned long long mem_share;
20346722 779 int mem_size;
1da177e4
LT
780
781 mac_control = &nic->mac_control;
782 config = &nic->config;
783
5e25b9dd 784 /* to set the swapper controle on the card */
20346722 785 if(s2io_set_swapper(nic)) {
1da177e4
LT
786 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
787 return -1;
788 }
789
541ae68f
K
790 /*
791 * Herc requires EOI to be removed from reset before XGXS, so..
792 */
793 if (nic->device_type & XFRAME_II_DEVICE) {
794 val64 = 0xA500000000ULL;
795 writeq(val64, &bar0->sw_reset);
796 msleep(500);
797 val64 = readq(&bar0->sw_reset);
798 }
799
1da177e4
LT
800 /* Remove XGXS from reset state */
801 val64 = 0;
802 writeq(val64, &bar0->sw_reset);
1da177e4 803 msleep(500);
20346722 804 val64 = readq(&bar0->sw_reset);
1da177e4
LT
805
806 /* Enable Receiving broadcasts */
807 add = &bar0->mac_cfg;
808 val64 = readq(&bar0->mac_cfg);
809 val64 |= MAC_RMAC_BCAST_ENABLE;
810 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
811 writel((u32) val64, add);
812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
813 writel((u32) (val64 >> 32), (add + 4));
814
815 /* Read registers in all blocks */
816 val64 = readq(&bar0->mac_int_mask);
817 val64 = readq(&bar0->mc_int_mask);
818 val64 = readq(&bar0->xgxs_int_mask);
819
820 /* Set MTU */
821 val64 = dev->mtu;
822 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
823
20346722
K
824 /*
825 * Configuring the XAUI Interface of Xena.
1da177e4 826 * ***************************************
20346722
K
827 * To Configure the Xena's XAUI, one has to write a series
828 * of 64 bit values into two registers in a particular
829 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
830 * which will be defined in the array of configuration values
541ae68f 831 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
20346722 832 * to switch writing from one regsiter to another. We continue
1da177e4 833 * writing these values until we encounter the 'END_SIGN' macro.
20346722
K
834 * For example, After making a series of 21 writes into
835 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1da177e4
LT
836 * start writing into mdio_control until we encounter END_SIGN.
837 */
541ae68f
K
838 if (nic->device_type & XFRAME_II_DEVICE) {
839 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
840 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1da177e4 841 &bar0->dtx_control, UF);
541ae68f
K
842 if (dtx_cnt & 0x1)
843 msleep(1); /* Necessary!! */
1da177e4
LT
844 dtx_cnt++;
845 }
541ae68f
K
846 } else {
847 while (1) {
848 dtx_cfg:
849 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
850 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
851 dtx_cnt++;
852 goto mdio_cfg;
853 }
854 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
855 &bar0->dtx_control, UF);
856 val64 = readq(&bar0->dtx_control);
857 dtx_cnt++;
858 }
859 mdio_cfg:
860 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
861 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
862 mdio_cnt++;
863 goto dtx_cfg;
864 }
865 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
866 &bar0->mdio_control, UF);
867 val64 = readq(&bar0->mdio_control);
1da177e4 868 mdio_cnt++;
541ae68f
K
869 }
870 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
871 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
872 break;
873 } else {
1da177e4
LT
874 goto dtx_cfg;
875 }
1da177e4
LT
876 }
877 }
878
879 /* Tx DMA Initialization */
880 val64 = 0;
881 writeq(val64, &bar0->tx_fifo_partition_0);
882 writeq(val64, &bar0->tx_fifo_partition_1);
883 writeq(val64, &bar0->tx_fifo_partition_2);
884 writeq(val64, &bar0->tx_fifo_partition_3);
885
886
887 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
888 val64 |=
889 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
890 13) | vBIT(config->tx_cfg[i].fifo_priority,
891 ((i * 32) + 5), 3);
892
893 if (i == (config->tx_fifo_num - 1)) {
894 if (i % 2 == 0)
895 i++;
896 }
897
898 switch (i) {
899 case 1:
900 writeq(val64, &bar0->tx_fifo_partition_0);
901 val64 = 0;
902 break;
903 case 3:
904 writeq(val64, &bar0->tx_fifo_partition_1);
905 val64 = 0;
906 break;
907 case 5:
908 writeq(val64, &bar0->tx_fifo_partition_2);
909 val64 = 0;
910 break;
911 case 7:
912 writeq(val64, &bar0->tx_fifo_partition_3);
913 break;
914 }
915 }
916
917 /* Enable Tx FIFO partition 0. */
918 val64 = readq(&bar0->tx_fifo_partition_0);
919 val64 |= BIT(0); /* To enable the FIFO partition. */
920 writeq(val64, &bar0->tx_fifo_partition_0);
921
5e25b9dd
K
922 /*
923 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
924 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
925 */
541ae68f
K
926 if ((nic->device_type == XFRAME_I_DEVICE) &&
927 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd
K
928 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
929
1da177e4
LT
930 val64 = readq(&bar0->tx_fifo_partition_0);
931 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
932 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
933
20346722
K
934 /*
935 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
936 * integrity checking.
937 */
938 val64 = readq(&bar0->tx_pa_cfg);
939 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
940 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
941 writeq(val64, &bar0->tx_pa_cfg);
942
943 /* Rx DMA intialization. */
944 val64 = 0;
945 for (i = 0; i < config->rx_ring_num; i++) {
946 val64 |=
947 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
948 3);
949 }
950 writeq(val64, &bar0->rx_queue_priority);
951
20346722
K
952 /*
953 * Allocating equal share of memory to all the
1da177e4
LT
954 * configured Rings.
955 */
956 val64 = 0;
541ae68f
K
957 if (nic->device_type & XFRAME_II_DEVICE)
958 mem_size = 32;
959 else
960 mem_size = 64;
961
1da177e4
LT
962 for (i = 0; i < config->rx_ring_num; i++) {
963 switch (i) {
964 case 0:
20346722
K
965 mem_share = (mem_size / config->rx_ring_num +
966 mem_size % config->rx_ring_num);
1da177e4
LT
967 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
968 continue;
969 case 1:
20346722 970 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
971 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
972 continue;
973 case 2:
20346722 974 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
975 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
976 continue;
977 case 3:
20346722 978 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
979 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
980 continue;
981 case 4:
20346722 982 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
983 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
984 continue;
985 case 5:
20346722 986 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
987 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
988 continue;
989 case 6:
20346722 990 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
991 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
992 continue;
993 case 7:
20346722 994 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
995 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
996 continue;
997 }
998 }
999 writeq(val64, &bar0->rx_queue_cfg);
1000
20346722 1001 /*
5e25b9dd
K
1002 * Filling Tx round robin registers
1003 * as per the number of FIFOs
1da177e4 1004 */
5e25b9dd
K
1005 switch (config->tx_fifo_num) {
1006 case 1:
1007 val64 = 0x0000000000000000ULL;
1008 writeq(val64, &bar0->tx_w_round_robin_0);
1009 writeq(val64, &bar0->tx_w_round_robin_1);
1010 writeq(val64, &bar0->tx_w_round_robin_2);
1011 writeq(val64, &bar0->tx_w_round_robin_3);
1012 writeq(val64, &bar0->tx_w_round_robin_4);
1013 break;
1014 case 2:
1015 val64 = 0x0000010000010000ULL;
1016 writeq(val64, &bar0->tx_w_round_robin_0);
1017 val64 = 0x0100000100000100ULL;
1018 writeq(val64, &bar0->tx_w_round_robin_1);
1019 val64 = 0x0001000001000001ULL;
1020 writeq(val64, &bar0->tx_w_round_robin_2);
1021 val64 = 0x0000010000010000ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_3);
1023 val64 = 0x0100000000000000ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_4);
1025 break;
1026 case 3:
1027 val64 = 0x0001000102000001ULL;
1028 writeq(val64, &bar0->tx_w_round_robin_0);
1029 val64 = 0x0001020000010001ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_1);
1031 val64 = 0x0200000100010200ULL;
1032 writeq(val64, &bar0->tx_w_round_robin_2);
1033 val64 = 0x0001000102000001ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_3);
1035 val64 = 0x0001020000000000ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_4);
1037 break;
1038 case 4:
1039 val64 = 0x0001020300010200ULL;
1040 writeq(val64, &bar0->tx_w_round_robin_0);
1041 val64 = 0x0100000102030001ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_1);
1043 val64 = 0x0200010000010203ULL;
1044 writeq(val64, &bar0->tx_w_round_robin_2);
1045 val64 = 0x0001020001000001ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_3);
1047 val64 = 0x0203000100000000ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_4);
1049 break;
1050 case 5:
1051 val64 = 0x0001000203000102ULL;
1052 writeq(val64, &bar0->tx_w_round_robin_0);
1053 val64 = 0x0001020001030004ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_1);
1055 val64 = 0x0001000203000102ULL;
1056 writeq(val64, &bar0->tx_w_round_robin_2);
1057 val64 = 0x0001020001030004ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_3);
1059 val64 = 0x0001000000000000ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_4);
1061 break;
1062 case 6:
1063 val64 = 0x0001020304000102ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_0);
1065 val64 = 0x0304050001020001ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_1);
1067 val64 = 0x0203000100000102ULL;
1068 writeq(val64, &bar0->tx_w_round_robin_2);
1069 val64 = 0x0304000102030405ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_3);
1071 val64 = 0x0001000200000000ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_4);
1073 break;
1074 case 7:
1075 val64 = 0x0001020001020300ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_0);
1077 val64 = 0x0102030400010203ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_1);
1079 val64 = 0x0405060001020001ULL;
1080 writeq(val64, &bar0->tx_w_round_robin_2);
1081 val64 = 0x0304050000010200ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_3);
1083 val64 = 0x0102030000000000ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_4);
1085 break;
1086 case 8:
1087 val64 = 0x0001020300040105ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_0);
1089 val64 = 0x0200030106000204ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_1);
1091 val64 = 0x0103000502010007ULL;
1092 writeq(val64, &bar0->tx_w_round_robin_2);
1093 val64 = 0x0304010002060500ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_3);
1095 val64 = 0x0103020400000000ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_4);
1097 break;
1098 }
1099
1100 /* Filling the Rx round robin registers as per the
1101 * number of Rings and steering based on QoS.
1102 */
1103 switch (config->rx_ring_num) {
1104 case 1:
1105 val64 = 0x8080808080808080ULL;
1106 writeq(val64, &bar0->rts_qos_steering);
1107 break;
1108 case 2:
1109 val64 = 0x0000010000010000ULL;
1110 writeq(val64, &bar0->rx_w_round_robin_0);
1111 val64 = 0x0100000100000100ULL;
1112 writeq(val64, &bar0->rx_w_round_robin_1);
1113 val64 = 0x0001000001000001ULL;
1114 writeq(val64, &bar0->rx_w_round_robin_2);
1115 val64 = 0x0000010000010000ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_3);
1117 val64 = 0x0100000000000000ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_4);
1119
1120 val64 = 0x8080808040404040ULL;
1121 writeq(val64, &bar0->rts_qos_steering);
1122 break;
1123 case 3:
1124 val64 = 0x0001000102000001ULL;
1125 writeq(val64, &bar0->rx_w_round_robin_0);
1126 val64 = 0x0001020000010001ULL;
1127 writeq(val64, &bar0->rx_w_round_robin_1);
1128 val64 = 0x0200000100010200ULL;
1129 writeq(val64, &bar0->rx_w_round_robin_2);
1130 val64 = 0x0001000102000001ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_3);
1132 val64 = 0x0001020000000000ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_4);
1134
1135 val64 = 0x8080804040402020ULL;
1136 writeq(val64, &bar0->rts_qos_steering);
1137 break;
1138 case 4:
1139 val64 = 0x0001020300010200ULL;
1140 writeq(val64, &bar0->rx_w_round_robin_0);
1141 val64 = 0x0100000102030001ULL;
1142 writeq(val64, &bar0->rx_w_round_robin_1);
1143 val64 = 0x0200010000010203ULL;
1144 writeq(val64, &bar0->rx_w_round_robin_2);
1145 val64 = 0x0001020001000001ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_3);
1147 val64 = 0x0203000100000000ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_4);
1149
1150 val64 = 0x8080404020201010ULL;
1151 writeq(val64, &bar0->rts_qos_steering);
1152 break;
1153 case 5:
1154 val64 = 0x0001000203000102ULL;
1155 writeq(val64, &bar0->rx_w_round_robin_0);
1156 val64 = 0x0001020001030004ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_1);
1158 val64 = 0x0001000203000102ULL;
1159 writeq(val64, &bar0->rx_w_round_robin_2);
1160 val64 = 0x0001020001030004ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_3);
1162 val64 = 0x0001000000000000ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_4);
1164
1165 val64 = 0x8080404020201008ULL;
1166 writeq(val64, &bar0->rts_qos_steering);
1167 break;
1168 case 6:
1169 val64 = 0x0001020304000102ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_0);
1171 val64 = 0x0304050001020001ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_1);
1173 val64 = 0x0203000100000102ULL;
1174 writeq(val64, &bar0->rx_w_round_robin_2);
1175 val64 = 0x0304000102030405ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_3);
1177 val64 = 0x0001000200000000ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_4);
1179
1180 val64 = 0x8080404020100804ULL;
1181 writeq(val64, &bar0->rts_qos_steering);
1182 break;
1183 case 7:
1184 val64 = 0x0001020001020300ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_0);
1186 val64 = 0x0102030400010203ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_1);
1188 val64 = 0x0405060001020001ULL;
1189 writeq(val64, &bar0->rx_w_round_robin_2);
1190 val64 = 0x0304050000010200ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_3);
1192 val64 = 0x0102030000000000ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_4);
1194
1195 val64 = 0x8080402010080402ULL;
1196 writeq(val64, &bar0->rts_qos_steering);
1197 break;
1198 case 8:
1199 val64 = 0x0001020300040105ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_0);
1201 val64 = 0x0200030106000204ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_1);
1203 val64 = 0x0103000502010007ULL;
1204 writeq(val64, &bar0->rx_w_round_robin_2);
1205 val64 = 0x0304010002060500ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_3);
1207 val64 = 0x0103020400000000ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_4);
1209
1210 val64 = 0x8040201008040201ULL;
1211 writeq(val64, &bar0->rts_qos_steering);
1212 break;
1213 }
1da177e4
LT
1214
1215 /* UDP Fix */
1216 val64 = 0;
20346722 1217 for (i = 0; i < 8; i++)
1da177e4
LT
1218 writeq(val64, &bar0->rts_frm_len_n[i]);
1219
5e25b9dd
K
1220 /* Set the default rts frame length for the rings configured */
1221 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1222 for (i = 0 ; i < config->rx_ring_num ; i++)
1223 writeq(val64, &bar0->rts_frm_len_n[i]);
1224
1225 /* Set the frame length for the configured rings
1226 * desired by the user
1227 */
1228 for (i = 0; i < config->rx_ring_num; i++) {
1229 /* If rts_frm_len[i] == 0 then it is assumed that user not
1230 * specified frame length steering.
1231 * If the user provides the frame length then program
1232 * the rts_frm_len register for those values or else
1233 * leave it as it is.
1234 */
1235 if (rts_frm_len[i] != 0) {
1236 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1237 &bar0->rts_frm_len_n[i]);
1238 }
1239 }
1da177e4 1240
20346722 1241 /* Program statistics memory */
1da177e4 1242 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1243
541ae68f
K
1244 if (nic->device_type == XFRAME_II_DEVICE) {
1245 val64 = STAT_BC(0x320);
1246 writeq(val64, &bar0->stat_byte_cnt);
1247 }
1248
20346722 1249 /*
1da177e4
LT
1250 * Initializing the sampling rate for the device to calculate the
1251 * bandwidth utilization.
1252 */
1253 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1254 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1255 writeq(val64, &bar0->mac_link_util);
1256
1257
20346722
K
1258 /*
1259 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1260 * Scheme.
1261 */
20346722
K
1262 /*
1263 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1264 * 250 interrupts per sec. Continuous interrupts are enabled
1265 * by default.
1266 */
541ae68f
K
1267 if (nic->device_type == XFRAME_II_DEVICE) {
1268 int count = (nic->config.bus_speed * 125)/2;
1269 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1270 } else {
1271
1272 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1273 }
1274 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1275 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1276 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f
K
1277 if (use_continuous_tx_intrs)
1278 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1279 writeq(val64, &bar0->tti_data1_mem);
1280
1281 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1282 TTI_DATA2_MEM_TX_UFC_B(0x20) |
5e25b9dd 1283 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1284 writeq(val64, &bar0->tti_data2_mem);
1285
1286 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1287 writeq(val64, &bar0->tti_command_mem);
1288
20346722 1289 /*
1da177e4
LT
1290 * Once the operation completes, the Strobe bit of the command
1291 * register will be reset. We poll for this particular condition
1292 * We wait for a maximum of 500ms for the operation to complete,
1293 * if it's not complete by then we return error.
1294 */
1295 time = 0;
1296 while (TRUE) {
1297 val64 = readq(&bar0->tti_command_mem);
1298 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1299 break;
1300 }
1301 if (time > 10) {
1302 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1303 dev->name);
1304 return -1;
1305 }
1306 msleep(50);
1307 time++;
1308 }
1309
b6e3f982
K
1310 if (nic->config.bimodal) {
1311 int k = 0;
1312 for (k = 0; k < config->rx_ring_num; k++) {
1313 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1314 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1315 writeq(val64, &bar0->tti_command_mem);
541ae68f 1316
541ae68f 1317 /*
b6e3f982
K
1318 * Once the operation completes, the Strobe bit of the command
1319 * register will be reset. We poll for this particular condition
1320 * We wait for a maximum of 500ms for the operation to complete,
1321 * if it's not complete by then we return error.
1322 */
1323 time = 0;
1324 while (TRUE) {
1325 val64 = readq(&bar0->tti_command_mem);
1326 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1327 break;
1328 }
1329 if (time > 10) {
1330 DBG_PRINT(ERR_DBG,
1331 "%s: TTI init Failed\n",
1332 dev->name);
1333 return -1;
1334 }
1335 time++;
1336 msleep(50);
1337 }
1338 }
541ae68f 1339 } else {
1da177e4 1340
b6e3f982
K
1341 /* RTI Initialization */
1342 if (nic->device_type == XFRAME_II_DEVICE) {
1343 /*
1344 * Programmed to generate Apprx 500 Intrs per
1345 * second
1346 */
1347 int count = (nic->config.bus_speed * 125)/4;
1348 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1349 } else {
1350 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1351 }
1352 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1353 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1354 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1355
b6e3f982 1356 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1357
b6e3f982
K
1358 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1359 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1360 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1361 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1362
b6e3f982
K
1363 for (i = 0; i < config->rx_ring_num; i++) {
1364 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1365 | RTI_CMD_MEM_OFFSET(i);
1366 writeq(val64, &bar0->rti_command_mem);
1367
1368 /*
1369 * Once the operation completes, the Strobe bit of the
1370 * command register will be reset. We poll for this
1371 * particular condition. We wait for a maximum of 500ms
1372 * for the operation to complete, if it's not complete
1373 * by then we return error.
1374 */
1375 time = 0;
1376 while (TRUE) {
1377 val64 = readq(&bar0->rti_command_mem);
1378 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1379 break;
1380 }
1381 if (time > 10) {
1382 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1383 dev->name);
1384 return -1;
1385 }
1386 time++;
1387 msleep(50);
1388 }
1da177e4 1389 }
1da177e4
LT
1390 }
1391
20346722
K
1392 /*
1393 * Initializing proper values as Pause threshold into all
1da177e4
LT
1394 * the 8 Queues on Rx side.
1395 */
1396 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1397 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1398
1399 /* Disable RMAC PAD STRIPPING */
20346722 1400 add = (void *) &bar0->mac_cfg;
1da177e4
LT
1401 val64 = readq(&bar0->mac_cfg);
1402 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1403 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1404 writel((u32) (val64), add);
1405 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1406 writel((u32) (val64 >> 32), (add + 4));
1407 val64 = readq(&bar0->mac_cfg);
1408
20346722
K
1409 /*
1410 * Set the time value to be inserted in the pause frame
1da177e4
LT
1411 * generated by xena.
1412 */
1413 val64 = readq(&bar0->rmac_pause_cfg);
1414 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1415 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1416 writeq(val64, &bar0->rmac_pause_cfg);
1417
20346722 1418 /*
1da177e4
LT
1419 * Set the Threshold Limit for Generating the pause frame
1420 * If the amount of data in any Queue exceeds ratio of
1421 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1422 * pause frame is generated
1423 */
1424 val64 = 0;
1425 for (i = 0; i < 4; i++) {
1426 val64 |=
1427 (((u64) 0xFF00 | nic->mac_control.
1428 mc_pause_threshold_q0q3)
1429 << (i * 2 * 8));
1430 }
1431 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1432
1433 val64 = 0;
1434 for (i = 0; i < 4; i++) {
1435 val64 |=
1436 (((u64) 0xFF00 | nic->mac_control.
1437 mc_pause_threshold_q4q7)
1438 << (i * 2 * 8));
1439 }
1440 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1441
20346722
K
1442 /*
1443 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1444 * exceeded the limit pointed by shared_splits
1445 */
1446 val64 = readq(&bar0->pic_control);
1447 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1448 writeq(val64, &bar0->pic_control);
1449
541ae68f
K
1450 /*
1451 * Programming the Herc to split every write transaction
1452 * that does not start on an ADB to reduce disconnects.
1453 */
1454 if (nic->device_type == XFRAME_II_DEVICE) {
1455 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1456 writeq(val64, &bar0->wreq_split_mask);
1457 }
1458
1da177e4
LT
1459 return SUCCESS;
1460}
1461
20346722
K
1462/**
1463 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1464 * @nic: device private variable,
1465 * @mask: A mask indicating which Intr block must be modified and,
1466 * @flag: A flag indicating whether to enable or disable the Intrs.
1467 * Description: This function will either disable or enable the interrupts
20346722
K
1468 * depending on the flag argument. The mask argument can be used to
1469 * enable/disable any Intr block.
1da177e4
LT
1470 * Return Value: NONE.
1471 */
1472
1473static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1474{
1475 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1476 register u64 val64 = 0, temp64 = 0;
1477
1478 /* Top level interrupt classification */
1479 /* PIC Interrupts */
1480 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1481 /* Enable PIC Intrs in the general intr mask register */
1482 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1483 if (flag == ENABLE_INTRS) {
1484 temp64 = readq(&bar0->general_int_mask);
1485 temp64 &= ~((u64) val64);
1486 writeq(temp64, &bar0->general_int_mask);
20346722 1487 /*
1da177e4 1488 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
1489 * interrupts for now.
1490 * TODO
1da177e4
LT
1491 */
1492 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
20346722 1493 /*
1da177e4
LT
1494 * No MSI Support is available presently, so TTI and
1495 * RTI interrupts are also disabled.
1496 */
1497 } else if (flag == DISABLE_INTRS) {
20346722
K
1498 /*
1499 * Disable PIC Intrs in the general
1500 * intr mask register
1da177e4
LT
1501 */
1502 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1503 temp64 = readq(&bar0->general_int_mask);
1504 val64 |= temp64;
1505 writeq(val64, &bar0->general_int_mask);
1506 }
1507 }
1508
1509 /* DMA Interrupts */
1510 /* Enabling/Disabling Tx DMA interrupts */
1511 if (mask & TX_DMA_INTR) {
1512 /* Enable TxDMA Intrs in the general intr mask register */
1513 val64 = TXDMA_INT_M;
1514 if (flag == ENABLE_INTRS) {
1515 temp64 = readq(&bar0->general_int_mask);
1516 temp64 &= ~((u64) val64);
1517 writeq(temp64, &bar0->general_int_mask);
20346722
K
1518 /*
1519 * Keep all interrupts other than PFC interrupt
1da177e4
LT
1520 * and PCC interrupt disabled in DMA level.
1521 */
1522 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1523 TXDMA_PCC_INT_M);
1524 writeq(val64, &bar0->txdma_int_mask);
20346722
K
1525 /*
1526 * Enable only the MISC error 1 interrupt in PFC block
1da177e4
LT
1527 */
1528 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1529 writeq(val64, &bar0->pfc_err_mask);
20346722
K
1530 /*
1531 * Enable only the FB_ECC error interrupt in PCC block
1da177e4
LT
1532 */
1533 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1534 writeq(val64, &bar0->pcc_err_mask);
1535 } else if (flag == DISABLE_INTRS) {
20346722
K
1536 /*
1537 * Disable TxDMA Intrs in the general intr mask
1538 * register
1da177e4
LT
1539 */
1540 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1541 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1542 temp64 = readq(&bar0->general_int_mask);
1543 val64 |= temp64;
1544 writeq(val64, &bar0->general_int_mask);
1545 }
1546 }
1547
1548 /* Enabling/Disabling Rx DMA interrupts */
1549 if (mask & RX_DMA_INTR) {
1550 /* Enable RxDMA Intrs in the general intr mask register */
1551 val64 = RXDMA_INT_M;
1552 if (flag == ENABLE_INTRS) {
1553 temp64 = readq(&bar0->general_int_mask);
1554 temp64 &= ~((u64) val64);
1555 writeq(temp64, &bar0->general_int_mask);
20346722
K
1556 /*
1557 * All RxDMA block interrupts are disabled for now
1558 * TODO
1da177e4
LT
1559 */
1560 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1561 } else if (flag == DISABLE_INTRS) {
20346722
K
1562 /*
1563 * Disable RxDMA Intrs in the general intr mask
1564 * register
1da177e4
LT
1565 */
1566 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1567 temp64 = readq(&bar0->general_int_mask);
1568 val64 |= temp64;
1569 writeq(val64, &bar0->general_int_mask);
1570 }
1571 }
1572
1573 /* MAC Interrupts */
1574 /* Enabling/Disabling MAC interrupts */
1575 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1576 val64 = TXMAC_INT_M | RXMAC_INT_M;
1577 if (flag == ENABLE_INTRS) {
1578 temp64 = readq(&bar0->general_int_mask);
1579 temp64 &= ~((u64) val64);
1580 writeq(temp64, &bar0->general_int_mask);
20346722
K
1581 /*
1582 * All MAC block error interrupts are disabled for now
1da177e4
LT
1583 * except the link status change interrupt.
1584 * TODO
1585 */
1586 val64 = MAC_INT_STATUS_RMAC_INT;
1587 temp64 = readq(&bar0->mac_int_mask);
1588 temp64 &= ~((u64) val64);
1589 writeq(temp64, &bar0->mac_int_mask);
1590
1591 val64 = readq(&bar0->mac_rmac_err_mask);
1592 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1593 writeq(val64, &bar0->mac_rmac_err_mask);
1594 } else if (flag == DISABLE_INTRS) {
20346722
K
1595 /*
1596 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1597 */
1598 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1599 writeq(DISABLE_ALL_INTRS,
1600 &bar0->mac_rmac_err_mask);
1601
1602 temp64 = readq(&bar0->general_int_mask);
1603 val64 |= temp64;
1604 writeq(val64, &bar0->general_int_mask);
1605 }
1606 }
1607
1608 /* XGXS Interrupts */
1609 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1610 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1611 if (flag == ENABLE_INTRS) {
1612 temp64 = readq(&bar0->general_int_mask);
1613 temp64 &= ~((u64) val64);
1614 writeq(temp64, &bar0->general_int_mask);
20346722 1615 /*
1da177e4 1616 * All XGXS block error interrupts are disabled for now
20346722 1617 * TODO
1da177e4
LT
1618 */
1619 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1620 } else if (flag == DISABLE_INTRS) {
20346722
K
1621 /*
1622 * Disable MC Intrs in the general intr mask register
1da177e4
LT
1623 */
1624 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1625 temp64 = readq(&bar0->general_int_mask);
1626 val64 |= temp64;
1627 writeq(val64, &bar0->general_int_mask);
1628 }
1629 }
1630
1631 /* Memory Controller(MC) interrupts */
1632 if (mask & MC_INTR) {
1633 val64 = MC_INT_M;
1634 if (flag == ENABLE_INTRS) {
1635 temp64 = readq(&bar0->general_int_mask);
1636 temp64 &= ~((u64) val64);
1637 writeq(temp64, &bar0->general_int_mask);
20346722 1638 /*
5e25b9dd 1639 * Enable all MC Intrs.
1da177e4 1640 */
5e25b9dd
K
1641 writeq(0x0, &bar0->mc_int_mask);
1642 writeq(0x0, &bar0->mc_err_mask);
1da177e4
LT
1643 } else if (flag == DISABLE_INTRS) {
1644 /*
1645 * Disable MC Intrs in the general intr mask register
1646 */
1647 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1648 temp64 = readq(&bar0->general_int_mask);
1649 val64 |= temp64;
1650 writeq(val64, &bar0->general_int_mask);
1651 }
1652 }
1653
1654
1655 /* Tx traffic interrupts */
1656 if (mask & TX_TRAFFIC_INTR) {
1657 val64 = TXTRAFFIC_INT_M;
1658 if (flag == ENABLE_INTRS) {
1659 temp64 = readq(&bar0->general_int_mask);
1660 temp64 &= ~((u64) val64);
1661 writeq(temp64, &bar0->general_int_mask);
20346722 1662 /*
1da177e4 1663 * Enable all the Tx side interrupts
20346722 1664 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1665 */
1666 writeq(0x0, &bar0->tx_traffic_mask);
1667 } else if (flag == DISABLE_INTRS) {
20346722
K
1668 /*
1669 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1670 * register.
1671 */
1672 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1673 temp64 = readq(&bar0->general_int_mask);
1674 val64 |= temp64;
1675 writeq(val64, &bar0->general_int_mask);
1676 }
1677 }
1678
1679 /* Rx traffic interrupts */
1680 if (mask & RX_TRAFFIC_INTR) {
1681 val64 = RXTRAFFIC_INT_M;
1682 if (flag == ENABLE_INTRS) {
1683 temp64 = readq(&bar0->general_int_mask);
1684 temp64 &= ~((u64) val64);
1685 writeq(temp64, &bar0->general_int_mask);
1686 /* writing 0 Enables all 8 RX interrupt levels */
1687 writeq(0x0, &bar0->rx_traffic_mask);
1688 } else if (flag == DISABLE_INTRS) {
20346722
K
1689 /*
1690 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1691 * register.
1692 */
1693 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1694 temp64 = readq(&bar0->general_int_mask);
1695 val64 |= temp64;
1696 writeq(val64, &bar0->general_int_mask);
1697 }
1698 }
1699}
1700
541ae68f 1701static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
20346722
K
1702{
1703 int ret = 0;
1704
1705 if (flag == FALSE) {
541ae68f 1706 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1707 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1708 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1709 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1710 ret = 1;
1711 }
541ae68f 1712 }else {
5e25b9dd
K
1713 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1714 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1715 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1716 ret = 1;
1717 }
20346722
K
1718 }
1719 } else {
541ae68f 1720 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1721 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1722 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1723 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1724 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1725 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1726 ret = 1;
1727 }
1728 } else {
1729 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1730 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1731 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1732 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1733 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1734 ret = 1;
1735 }
20346722
K
1736 }
1737 }
1738
1739 return ret;
1740}
1741/**
1742 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4
LT
1743 * @val64 : Value read from adapter status register.
1744 * @flag : indicates if the adapter enable bit was ever written once
1745 * before.
1746 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1747 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1748 * differs and the calling function passes the input argument flag to
1749 * indicate this.
20346722 1750 * Return: 1 If xena is quiescence
1da177e4
LT
1751 * 0 If Xena is not quiescence
1752 */
1753
20346722 1754static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1da177e4 1755{
541ae68f 1756 int ret = 0, herc;
1da177e4 1757 u64 tmp64 = ~((u64) val64);
5e25b9dd 1758 int rev_id = get_xena_rev_id(sp->pdev);
1da177e4 1759
541ae68f 1760 herc = (sp->device_type == XFRAME_II_DEVICE);
1da177e4
LT
1761 if (!
1762 (tmp64 &
1763 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1764 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1765 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1766 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1767 ADAPTER_STATUS_P_PLL_LOCK))) {
541ae68f 1768 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1da177e4
LT
1769 }
1770
1771 return ret;
1772}
1773
1774/**
1775 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1776 * @sp: Pointer to device specifc structure
20346722 1777 * Description :
1da177e4
LT
1778 * New procedure to clear mac address reading problems on Alpha platforms
1779 *
1780 */
1781
20346722 1782void fix_mac_address(nic_t * sp)
1da177e4
LT
1783{
1784 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1785 u64 val64;
1786 int i = 0;
1787
1788 while (fix_mac[i] != END_SIGN) {
1789 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1790 udelay(10);
1da177e4
LT
1791 val64 = readq(&bar0->gpio_control);
1792 }
1793}
1794
1795/**
20346722 1796 * start_nic - Turns the device on
1da177e4 1797 * @nic : device private variable.
20346722
K
1798 * Description:
1799 * This function actually turns the device on. Before this function is
1800 * called,all Registers are configured from their reset states
1801 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1802 * calling this function, the device interrupts are cleared and the NIC is
1803 * literally switched on by writing into the adapter control register.
20346722 1804 * Return Value:
1da177e4
LT
1805 * SUCCESS on success and -1 on failure.
1806 */
1807
1808static int start_nic(struct s2io_nic *nic)
1809{
1810 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1811 struct net_device *dev = nic->dev;
1812 register u64 val64 = 0;
20346722
K
1813 u16 interruptible;
1814 u16 subid, i;
1da177e4
LT
1815 mac_info_t *mac_control;
1816 struct config_param *config;
1817
1818 mac_control = &nic->mac_control;
1819 config = &nic->config;
1820
1821 /* PRC Initialization and configuration */
1822 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1823 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1824 &bar0->prc_rxd0_n[i]);
1825
1826 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982
K
1827 if (nic->config.bimodal)
1828 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1da177e4
LT
1829#ifndef CONFIG_2BUFF_MODE
1830 val64 |= PRC_CTRL_RC_ENABLED;
1831#else
1832 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1833#endif
1834 writeq(val64, &bar0->prc_ctrl_n[i]);
1835 }
1836
1837#ifdef CONFIG_2BUFF_MODE
1838 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1839 val64 = readq(&bar0->rx_pa_cfg);
1840 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1841 writeq(val64, &bar0->rx_pa_cfg);
1842#endif
1843
20346722 1844 /*
1da177e4
LT
1845 * Enabling MC-RLDRAM. After enabling the device, we timeout
1846 * for around 100ms, which is approximately the time required
1847 * for the device to be ready for operation.
1848 */
1849 val64 = readq(&bar0->mc_rldram_mrs);
1850 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1851 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1852 val64 = readq(&bar0->mc_rldram_mrs);
1853
20346722 1854 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1855
1856 /* Enabling ECC Protection. */
1857 val64 = readq(&bar0->adapter_control);
1858 val64 &= ~ADAPTER_ECC_EN;
1859 writeq(val64, &bar0->adapter_control);
1860
20346722
K
1861 /*
1862 * Clearing any possible Link state change interrupts that
1da177e4
LT
1863 * could have popped up just before Enabling the card.
1864 */
1865 val64 = readq(&bar0->mac_rmac_err_reg);
1866 if (val64)
1867 writeq(val64, &bar0->mac_rmac_err_reg);
1868
20346722
K
1869 /*
1870 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
1871 * it.
1872 */
1873 val64 = readq(&bar0->adapter_status);
20346722 1874 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
1875 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1876 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1877 (unsigned long long) val64);
1878 return FAILURE;
1879 }
1880
1881 /* Enable select interrupts */
1882 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
5e25b9dd 1883 RX_MAC_INTR | MC_INTR;
1da177e4
LT
1884 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1885
20346722 1886 /*
1da177e4 1887 * With some switches, link might be already up at this point.
20346722
K
1888 * Because of this weird behavior, when we enable laser,
1889 * we may not get link. We need to handle this. We cannot
1890 * figure out which switch is misbehaving. So we are forced to
1891 * make a global change.
1da177e4
LT
1892 */
1893
1894 /* Enabling Laser. */
1895 val64 = readq(&bar0->adapter_control);
1896 val64 |= ADAPTER_EOI_TX_ON;
1897 writeq(val64, &bar0->adapter_control);
1898
1899 /* SXE-002: Initialize link and activity LED */
1900 subid = nic->pdev->subsystem_device;
541ae68f
K
1901 if (((subid & 0xFF) >= 0x07) &&
1902 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
1903 val64 = readq(&bar0->gpio_control);
1904 val64 |= 0x0000800000000000ULL;
1905 writeq(val64, &bar0->gpio_control);
1906 val64 = 0x0411040400000000ULL;
20346722 1907 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
1908 }
1909
20346722
K
1910 /*
1911 * Don't see link state interrupts on certain switches, so
1da177e4
LT
1912 * directly scheduling a link state task from here.
1913 */
1914 schedule_work(&nic->set_link_task);
1915
1da177e4
LT
1916 return SUCCESS;
1917}
1918
20346722
K
1919/**
1920 * free_tx_buffers - Free all queued Tx buffers
1da177e4 1921 * @nic : device private variable.
20346722 1922 * Description:
1da177e4 1923 * Free all queued Tx buffers.
20346722 1924 * Return Value: void
1da177e4
LT
1925*/
1926
1927static void free_tx_buffers(struct s2io_nic *nic)
1928{
1929 struct net_device *dev = nic->dev;
1930 struct sk_buff *skb;
1931 TxD_t *txdp;
1932 int i, j;
1933 mac_info_t *mac_control;
1934 struct config_param *config;
1ddc50d4 1935 int cnt = 0, frg_cnt;
1da177e4
LT
1936
1937 mac_control = &nic->mac_control;
1938 config = &nic->config;
1939
1940 for (i = 0; i < config->tx_fifo_num; i++) {
1941 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
20346722 1942 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1da177e4
LT
1943 list_virt_addr;
1944 skb =
1945 (struct sk_buff *) ((unsigned long) txdp->
1946 Host_Control);
1947 if (skb == NULL) {
1ddc50d4
K
1948 memset(txdp, 0, sizeof(TxD_t) *
1949 config->max_txds);
1da177e4
LT
1950 continue;
1951 }
1ddc50d4
K
1952 frg_cnt = skb_shinfo(skb)->nr_frags;
1953 pci_unmap_single(nic->pdev, (dma_addr_t)
1954 txdp->Buffer_Pointer,
1955 skb->len - skb->data_len,
1956 PCI_DMA_TODEVICE);
1957 if (frg_cnt) {
1958 TxD_t *temp;
1959 temp = txdp;
1960 txdp++;
1961 for (j = 0; j < frg_cnt; j++, txdp++) {
1962 skb_frag_t *frag =
1963 &skb_shinfo(skb)->frags[j];
1964 pci_unmap_page(nic->pdev,
1965 (dma_addr_t)
1966 txdp->
1967 Buffer_Pointer,
1968 frag->size,
1969 PCI_DMA_TODEVICE);
1970 }
1971 txdp = temp;
1972 }
1da177e4 1973 dev_kfree_skb(skb);
1ddc50d4 1974 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1da177e4
LT
1975 cnt++;
1976 }
1977 DBG_PRINT(INTR_DBG,
1978 "%s:forcibly freeing %d skbs on FIFO%d\n",
1979 dev->name, cnt, i);
20346722
K
1980 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1981 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
1982 }
1983}
1984
20346722
K
1985/**
1986 * stop_nic - To stop the nic
1da177e4 1987 * @nic ; device private variable.
20346722
K
1988 * Description:
1989 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
1990 * function does. This function is called to stop the device.
1991 * Return Value:
1992 * void.
1993 */
1994
1995static void stop_nic(struct s2io_nic *nic)
1996{
1997 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1998 register u64 val64 = 0;
1999 u16 interruptible, i;
2000 mac_info_t *mac_control;
2001 struct config_param *config;
2002
2003 mac_control = &nic->mac_control;
2004 config = &nic->config;
2005
2006 /* Disable all interrupts */
2007 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
5e25b9dd 2008 RX_MAC_INTR | MC_INTR;
1da177e4
LT
2009 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2010
2011 /* Disable PRCs */
2012 for (i = 0; i < config->rx_ring_num; i++) {
2013 val64 = readq(&bar0->prc_ctrl_n[i]);
2014 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2015 writeq(val64, &bar0->prc_ctrl_n[i]);
2016 }
2017}
2018
20346722
K
2019/**
2020 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2021 * @nic: device private variable
20346722
K
2022 * @ring_no: ring number
2023 * Description:
1da177e4
LT
2024 * The function allocates Rx side skbs and puts the physical
2025 * address of these buffers into the RxD buffer pointers, so that the NIC
2026 * can DMA the received frame into these locations.
2027 * The NIC supports 3 receive modes, viz
2028 * 1. single buffer,
2029 * 2. three buffer and
2030 * 3. Five buffer modes.
20346722
K
2031 * Each mode defines how many fragments the received frame will be split
2032 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2033 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2034 * is split into 3 fragments. As of now only single buffer mode is
2035 * supported.
2036 * Return Value:
2037 * SUCCESS on success or an appropriate -ve value on failure.
2038 */
2039
20346722 2040int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2041{
2042 struct net_device *dev = nic->dev;
2043 struct sk_buff *skb;
2044 RxD_t *rxdp;
2045 int off, off1, size, block_no, block_no1;
2046 int offset, offset1;
2047 u32 alloc_tab = 0;
20346722 2048 u32 alloc_cnt;
1da177e4
LT
2049 mac_info_t *mac_control;
2050 struct config_param *config;
2051#ifdef CONFIG_2BUFF_MODE
2052 RxD_t *rxdpnext;
2053 int nextblk;
20346722 2054 u64 tmp;
1da177e4
LT
2055 buffAdd_t *ba;
2056 dma_addr_t rxdpphys;
2057#endif
2058#ifndef CONFIG_S2IO_NAPI
2059 unsigned long flags;
2060#endif
2061
2062 mac_control = &nic->mac_control;
2063 config = &nic->config;
20346722
K
2064 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2065 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4
LT
2066 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2067 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2068
2069 while (alloc_tab < alloc_cnt) {
20346722 2070 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2071 block_index;
20346722 2072 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1da177e4 2073 block_index;
20346722
K
2074 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2075 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4
LT
2076#ifndef CONFIG_2BUFF_MODE
2077 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2078 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2079#else
2080 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2081 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2082#endif
2083
20346722 2084 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2085 block_virt_addr + off;
2086 if ((offset == offset1) && (rxdp->Host_Control)) {
2087 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2088 DBG_PRINT(INTR_DBG, " info equated\n");
2089 goto end;
2090 }
2091#ifndef CONFIG_2BUFF_MODE
2092 if (rxdp->Control_1 == END_OF_BLOCK) {
20346722 2093 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2094 block_index++;
20346722
K
2095 mac_control->rings[ring_no].rx_curr_put_info.
2096 block_index %= mac_control->rings[ring_no].block_count;
2097 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2098 block_index;
1da177e4
LT
2099 off++;
2100 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2101 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4
LT
2102 off;
2103 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2104 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2105 dev->name, rxdp);
2106 }
2107#ifndef CONFIG_S2IO_NAPI
2108 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2109 mac_control->rings[ring_no].put_pos =
1da177e4
LT
2110 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2111 spin_unlock_irqrestore(&nic->put_lock, flags);
2112#endif
2113#else
2114 if (rxdp->Host_Control == END_OF_BLOCK) {
20346722 2115 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2116 block_index++;
20346722
K
2117 mac_control->rings[ring_no].rx_curr_put_info.block_index
2118 %= mac_control->rings[ring_no].block_count;
2119 block_no = mac_control->rings[ring_no].rx_curr_put_info
2120 .block_index;
1da177e4
LT
2121 off = 0;
2122 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2123 dev->name, block_no,
2124 (unsigned long long) rxdp->Control_1);
20346722 2125 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4 2126 off;
20346722 2127 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2128 block_virt_addr;
2129 }
2130#ifndef CONFIG_S2IO_NAPI
2131 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2132 mac_control->rings[ring_no].put_pos = (block_no *
1da177e4
LT
2133 (MAX_RXDS_PER_BLOCK + 1)) + off;
2134 spin_unlock_irqrestore(&nic->put_lock, flags);
2135#endif
2136#endif
2137
2138#ifndef CONFIG_2BUFF_MODE
2139 if (rxdp->Control_1 & RXD_OWN_XENA)
2140#else
2141 if (rxdp->Control_2 & BIT(0))
2142#endif
2143 {
20346722 2144 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4
LT
2145 offset = off;
2146 goto end;
2147 }
2148#ifdef CONFIG_2BUFF_MODE
20346722
K
2149 /*
2150 * RxDs Spanning cache lines will be replenished only
2151 * if the succeeding RxD is also owned by Host. It
2152 * will always be the ((8*i)+3) and ((8*i)+6)
2153 * descriptors for the 48 byte descriptor. The offending
1da177e4
LT
2154 * decsriptor is of-course the 3rd descriptor.
2155 */
20346722 2156 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2157 block_dma_addr + (off * sizeof(RxD_t));
2158 if (((u64) (rxdpphys)) % 128 > 80) {
20346722 2159 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2160 block_virt_addr + (off + 1);
2161 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2162 nextblk = (block_no + 1) %
20346722
K
2163 (mac_control->rings[ring_no].block_count);
2164 rxdpnext = mac_control->rings[ring_no].rx_blocks
1da177e4
LT
2165 [nextblk].block_virt_addr;
2166 }
2167 if (rxdpnext->Control_2 & BIT(0))
2168 goto end;
2169 }
2170#endif
2171
2172#ifndef CONFIG_2BUFF_MODE
2173 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2174#else
2175 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2176#endif
2177 if (!skb) {
2178 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2179 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2180 return -ENOMEM;
2181 }
2182#ifndef CONFIG_2BUFF_MODE
2183 skb_reserve(skb, NET_IP_ALIGN);
2184 memset(rxdp, 0, sizeof(RxD_t));
2185 rxdp->Buffer0_ptr = pci_map_single
2186 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2187 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2188 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2189 rxdp->Host_Control = (unsigned long) (skb);
2190 rxdp->Control_1 |= RXD_OWN_XENA;
2191 off++;
2192 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2193 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2194#else
20346722 2195 ba = &mac_control->rings[ring_no].ba[block_no][off];
1da177e4 2196 skb_reserve(skb, BUF0_LEN);
689be439
DM
2197 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2198 if (tmp)
2199 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1da177e4
LT
2200
2201 memset(rxdp, 0, sizeof(RxD_t));
2202 rxdp->Buffer2_ptr = pci_map_single
2203 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2204 PCI_DMA_FROMDEVICE);
2205 rxdp->Buffer0_ptr =
2206 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2207 PCI_DMA_FROMDEVICE);
2208 rxdp->Buffer1_ptr =
2209 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2210 PCI_DMA_FROMDEVICE);
2211
2212 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2213 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2214 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2215 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2216 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2217 rxdp->Control_1 |= RXD_OWN_XENA;
2218 off++;
20346722 2219 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2220#endif
5e25b9dd 2221 rxdp->Control_2 |= SET_RXD_MARKER;
20346722 2222
1da177e4
LT
2223 atomic_inc(&nic->rx_bufs_left[ring_no]);
2224 alloc_tab++;
2225 }
2226
2227 end:
2228 return SUCCESS;
2229}
2230
2231/**
20346722 2232 * free_rx_buffers - Frees all Rx buffers
1da177e4 2233 * @sp: device private variable.
20346722 2234 * Description:
1da177e4
LT
2235 * This function will free all Rx buffers allocated by host.
2236 * Return Value:
2237 * NONE.
2238 */
2239
2240static void free_rx_buffers(struct s2io_nic *sp)
2241{
2242 struct net_device *dev = sp->dev;
2243 int i, j, blk = 0, off, buf_cnt = 0;
2244 RxD_t *rxdp;
2245 struct sk_buff *skb;
2246 mac_info_t *mac_control;
2247 struct config_param *config;
2248#ifdef CONFIG_2BUFF_MODE
2249 buffAdd_t *ba;
2250#endif
2251
2252 mac_control = &sp->mac_control;
2253 config = &sp->config;
2254
2255 for (i = 0; i < config->rx_ring_num; i++) {
2256 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2257 off = j % (MAX_RXDS_PER_BLOCK + 1);
20346722
K
2258 rxdp = mac_control->rings[i].rx_blocks[blk].
2259 block_virt_addr + off;
1da177e4
LT
2260
2261#ifndef CONFIG_2BUFF_MODE
2262 if (rxdp->Control_1 == END_OF_BLOCK) {
2263 rxdp =
2264 (RxD_t *) ((unsigned long) rxdp->
2265 Control_2);
2266 j++;
2267 blk++;
2268 }
2269#else
2270 if (rxdp->Host_Control == END_OF_BLOCK) {
2271 blk++;
2272 continue;
2273 }
2274#endif
2275
2276 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2277 memset(rxdp, 0, sizeof(RxD_t));
2278 continue;
2279 }
2280
2281 skb =
2282 (struct sk_buff *) ((unsigned long) rxdp->
2283 Host_Control);
2284 if (skb) {
2285#ifndef CONFIG_2BUFF_MODE
2286 pci_unmap_single(sp->pdev, (dma_addr_t)
2287 rxdp->Buffer0_ptr,
2288 dev->mtu +
2289 HEADER_ETHERNET_II_802_3_SIZE
2290 + HEADER_802_2_SIZE +
2291 HEADER_SNAP_SIZE,
2292 PCI_DMA_FROMDEVICE);
2293#else
20346722 2294 ba = &mac_control->rings[i].ba[blk][off];
1da177e4
LT
2295 pci_unmap_single(sp->pdev, (dma_addr_t)
2296 rxdp->Buffer0_ptr,
2297 BUF0_LEN,
2298 PCI_DMA_FROMDEVICE);
2299 pci_unmap_single(sp->pdev, (dma_addr_t)
2300 rxdp->Buffer1_ptr,
2301 BUF1_LEN,
2302 PCI_DMA_FROMDEVICE);
2303 pci_unmap_single(sp->pdev, (dma_addr_t)
2304 rxdp->Buffer2_ptr,
2305 dev->mtu + BUF0_LEN + 4,
2306 PCI_DMA_FROMDEVICE);
2307#endif
2308 dev_kfree_skb(skb);
2309 atomic_dec(&sp->rx_bufs_left[i]);
2310 buf_cnt++;
2311 }
2312 memset(rxdp, 0, sizeof(RxD_t));
2313 }
20346722
K
2314 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2315 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2316 mac_control->rings[i].rx_curr_put_info.offset = 0;
2317 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2318 atomic_set(&sp->rx_bufs_left[i], 0);
2319 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2320 dev->name, buf_cnt, i);
2321 }
2322}
2323
2324/**
2325 * s2io_poll - Rx interrupt handler for NAPI support
2326 * @dev : pointer to the device structure.
20346722 2327 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2328 * during one pass through the 'Poll" function.
2329 * Description:
2330 * Comes into picture only if NAPI support has been incorporated. It does
2331 * the same thing that rx_intr_handler does, but not in a interrupt context
2332 * also It will process only a given number of packets.
2333 * Return value:
2334 * 0 on success and 1 if there are No Rx packets to be processed.
2335 */
2336
20346722 2337#if defined(CONFIG_S2IO_NAPI)
1da177e4
LT
2338static int s2io_poll(struct net_device *dev, int *budget)
2339{
2340 nic_t *nic = dev->priv;
20346722 2341 int pkt_cnt = 0, org_pkts_to_process;
1da177e4
LT
2342 mac_info_t *mac_control;
2343 struct config_param *config;
20346722
K
2344 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2345 u64 val64;
2346 int i;
1da177e4 2347
7ba013ac 2348 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2349 mac_control = &nic->mac_control;
2350 config = &nic->config;
2351
20346722
K
2352 nic->pkts_to_process = *budget;
2353 if (nic->pkts_to_process > dev->quota)
2354 nic->pkts_to_process = dev->quota;
2355 org_pkts_to_process = nic->pkts_to_process;
1da177e4
LT
2356
2357 val64 = readq(&bar0->rx_traffic_int);
2358 writeq(val64, &bar0->rx_traffic_int);
2359
2360 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2361 rx_intr_handler(&mac_control->rings[i]);
2362 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2363 if (!nic->pkts_to_process) {
2364 /* Quota for the current iteration has been met */
2365 goto no_rx;
1da177e4 2366 }
1da177e4
LT
2367 }
2368 if (!pkt_cnt)
2369 pkt_cnt = 1;
2370
2371 dev->quota -= pkt_cnt;
2372 *budget -= pkt_cnt;
2373 netif_rx_complete(dev);
2374
2375 for (i = 0; i < config->rx_ring_num; i++) {
2376 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2377 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2378 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2379 break;
2380 }
2381 }
2382 /* Re enable the Rx interrupts. */
2383 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
7ba013ac 2384 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2385 return 0;
2386
20346722 2387no_rx:
1da177e4
LT
2388 dev->quota -= pkt_cnt;
2389 *budget -= pkt_cnt;
2390
2391 for (i = 0; i < config->rx_ring_num; i++) {
2392 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2393 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2394 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2395 break;
2396 }
2397 }
7ba013ac 2398 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2399 return 1;
2400}
20346722
K
2401#endif
2402
2403/**
1da177e4
LT
2404 * rx_intr_handler - Rx interrupt handler
2405 * @nic: device private variable.
20346722
K
2406 * Description:
2407 * If the interrupt is because of a received frame or if the
1da177e4 2408 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2409 * called. It picks out the RxD at which place the last Rx processing had
2410 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2411 * the offset.
2412 * Return Value:
2413 * NONE.
2414 */
20346722 2415static void rx_intr_handler(ring_info_t *ring_data)
1da177e4 2416{
20346722 2417 nic_t *nic = ring_data->nic;
1da177e4 2418 struct net_device *dev = (struct net_device *) nic->dev;
20346722 2419 int get_block, get_offset, put_block, put_offset, ring_bufs;
1da177e4
LT
2420 rx_curr_get_info_t get_info, put_info;
2421 RxD_t *rxdp;
2422 struct sk_buff *skb;
20346722
K
2423#ifndef CONFIG_S2IO_NAPI
2424 int pkt_cnt = 0;
1da177e4 2425#endif
7ba013ac
K
2426 spin_lock(&nic->rx_lock);
2427 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2428 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2429 __FUNCTION__, dev->name);
2430 spin_unlock(&nic->rx_lock);
2431 }
2432
20346722
K
2433 get_info = ring_data->rx_curr_get_info;
2434 get_block = get_info.block_index;
2435 put_info = ring_data->rx_curr_put_info;
2436 put_block = put_info.block_index;
2437 ring_bufs = get_info.ring_len+1;
2438 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
1da177e4 2439 get_info.offset;
20346722
K
2440 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2441 get_info.offset;
2442#ifndef CONFIG_S2IO_NAPI
2443 spin_lock(&nic->put_lock);
2444 put_offset = ring_data->put_pos;
2445 spin_unlock(&nic->put_lock);
2446#else
2447 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2448 put_info.offset;
2449#endif
5e25b9dd
K
2450 while (RXD_IS_UP2DT(rxdp) &&
2451 (((get_offset + 1) % ring_bufs) != put_offset)) {
20346722
K
2452 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2453 if (skb == NULL) {
2454 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2455 dev->name);
2456 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2457 spin_unlock(&nic->rx_lock);
20346722 2458 return;
1da177e4 2459 }
20346722
K
2460#ifndef CONFIG_2BUFF_MODE
2461 pci_unmap_single(nic->pdev, (dma_addr_t)
2462 rxdp->Buffer0_ptr,
2463 dev->mtu +
2464 HEADER_ETHERNET_II_802_3_SIZE +
2465 HEADER_802_2_SIZE +
2466 HEADER_SNAP_SIZE,
2467 PCI_DMA_FROMDEVICE);
1da177e4 2468#else
20346722
K
2469 pci_unmap_single(nic->pdev, (dma_addr_t)
2470 rxdp->Buffer0_ptr,
2471 BUF0_LEN, PCI_DMA_FROMDEVICE);
2472 pci_unmap_single(nic->pdev, (dma_addr_t)
2473 rxdp->Buffer1_ptr,
2474 BUF1_LEN, PCI_DMA_FROMDEVICE);
2475 pci_unmap_single(nic->pdev, (dma_addr_t)
2476 rxdp->Buffer2_ptr,
2477 dev->mtu + BUF0_LEN + 4,
2478 PCI_DMA_FROMDEVICE);
2479#endif
2480 rx_osm_handler(ring_data, rxdp);
2481 get_info.offset++;
2482 ring_data->rx_curr_get_info.offset =
1da177e4 2483 get_info.offset;
20346722
K
2484 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2485 get_info.offset;
2486 if (get_info.offset &&
2487 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2488 get_info.offset = 0;
2489 ring_data->rx_curr_get_info.offset
2490 = get_info.offset;
2491 get_block++;
2492 get_block %= ring_data->block_count;
2493 ring_data->rx_curr_get_info.block_index
2494 = get_block;
2495 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2496 }
1da177e4 2497
20346722 2498 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1da177e4 2499 get_info.offset;
20346722
K
2500#ifdef CONFIG_S2IO_NAPI
2501 nic->pkts_to_process -= 1;
2502 if (!nic->pkts_to_process)
2503 break;
2504#else
2505 pkt_cnt++;
1da177e4
LT
2506 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2507 break;
20346722 2508#endif
1da177e4 2509 }
7ba013ac 2510 spin_unlock(&nic->rx_lock);
1da177e4 2511}
20346722
K
2512
2513/**
1da177e4
LT
2514 * tx_intr_handler - Transmit interrupt handler
2515 * @nic : device private variable
20346722
K
2516 * Description:
2517 * If an interrupt was raised to indicate DMA complete of the
2518 * Tx packet, this function is called. It identifies the last TxD
2519 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2520 * DMA'ed into the NICs internal memory.
2521 * Return Value:
2522 * NONE
2523 */
2524
20346722 2525static void tx_intr_handler(fifo_info_t *fifo_data)
1da177e4 2526{
20346722 2527 nic_t *nic = fifo_data->nic;
1da177e4
LT
2528 struct net_device *dev = (struct net_device *) nic->dev;
2529 tx_curr_get_info_t get_info, put_info;
2530 struct sk_buff *skb;
2531 TxD_t *txdlp;
1da177e4 2532 u16 j, frg_cnt;
1da177e4 2533
20346722
K
2534 get_info = fifo_data->tx_curr_get_info;
2535 put_info = fifo_data->tx_curr_put_info;
2536 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2537 list_virt_addr;
2538 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2539 (get_info.offset != put_info.offset) &&
2540 (txdlp->Host_Control)) {
2541 /* Check for TxD errors */
2542 if (txdlp->Control_1 & TXD_T_CODE) {
2543 unsigned long long err;
2544 err = txdlp->Control_1 & TXD_T_CODE;
2545 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2546 err);
2547 }
1da177e4 2548
20346722
K
2549 skb = (struct sk_buff *) ((unsigned long)
2550 txdlp->Host_Control);
2551 if (skb == NULL) {
2552 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2553 __FUNCTION__);
2554 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2555 return;
2556 }
2557
2558 frg_cnt = skb_shinfo(skb)->nr_frags;
2559 nic->tx_pkt_count++;
2560
2561 pci_unmap_single(nic->pdev, (dma_addr_t)
2562 txdlp->Buffer_Pointer,
2563 skb->len - skb->data_len,
2564 PCI_DMA_TODEVICE);
2565 if (frg_cnt) {
2566 TxD_t *temp;
2567 temp = txdlp;
2568 txdlp++;
2569 for (j = 0; j < frg_cnt; j++, txdlp++) {
2570 skb_frag_t *frag =
2571 &skb_shinfo(skb)->frags[j];
2572 pci_unmap_page(nic->pdev,
2573 (dma_addr_t)
2574 txdlp->
2575 Buffer_Pointer,
2576 frag->size,
2577 PCI_DMA_TODEVICE);
1da177e4 2578 }
20346722 2579 txdlp = temp;
1da177e4 2580 }
20346722
K
2581 memset(txdlp, 0,
2582 (sizeof(TxD_t) * fifo_data->max_txds));
2583
2584 /* Updating the statistics block */
20346722
K
2585 nic->stats.tx_bytes += skb->len;
2586 dev_kfree_skb_irq(skb);
2587
2588 get_info.offset++;
2589 get_info.offset %= get_info.fifo_len + 1;
2590 txdlp = (TxD_t *) fifo_data->list_info
2591 [get_info.offset].list_virt_addr;
2592 fifo_data->tx_curr_get_info.offset =
2593 get_info.offset;
1da177e4
LT
2594 }
2595
2596 spin_lock(&nic->tx_lock);
2597 if (netif_queue_stopped(dev))
2598 netif_wake_queue(dev);
2599 spin_unlock(&nic->tx_lock);
2600}
2601
20346722 2602/**
1da177e4
LT
2603 * alarm_intr_handler - Alarm Interrrupt handler
2604 * @nic: device private variable
20346722 2605 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 2606 * complete, this function is called. If the interrupt was to indicate
20346722
K
2607 * a loss of link, the OSM link status handler is invoked for any other
2608 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
2609 * and a H/W reset is issued.
2610 * Return Value:
2611 * NONE
2612*/
2613
2614static void alarm_intr_handler(struct s2io_nic *nic)
2615{
2616 struct net_device *dev = (struct net_device *) nic->dev;
2617 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2618 register u64 val64 = 0, err_reg = 0;
2619
2620 /* Handling link status change error Intr */
2621 err_reg = readq(&bar0->mac_rmac_err_reg);
2622 writeq(err_reg, &bar0->mac_rmac_err_reg);
2623 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2624 schedule_work(&nic->set_link_task);
2625 }
2626
5e25b9dd
K
2627 /* Handling Ecc errors */
2628 val64 = readq(&bar0->mc_err_reg);
2629 writeq(val64, &bar0->mc_err_reg);
2630 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2631 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac
K
2632 nic->mac_control.stats_info->sw_stat.
2633 double_ecc_errs++;
5e25b9dd
K
2634 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2635 dev->name);
2636 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2637 netif_stop_queue(dev);
2638 schedule_work(&nic->rst_timer_task);
2639 } else {
7ba013ac
K
2640 nic->mac_control.stats_info->sw_stat.
2641 single_ecc_errs++;
5e25b9dd
K
2642 }
2643 }
2644
1da177e4
LT
2645 /* In case of a serious error, the device will be Reset. */
2646 val64 = readq(&bar0->serr_source);
2647 if (val64 & SERR_SOURCE_ANY) {
2648 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2649 DBG_PRINT(ERR_DBG, "serious error!!\n");
2650 netif_stop_queue(dev);
2651 schedule_work(&nic->rst_timer_task);
2652 }
2653
2654 /*
2655 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2656 * Error occurs, the adapter will be recycled by disabling the
20346722 2657 * adapter enable bit and enabling it again after the device
1da177e4
LT
2658 * becomes Quiescent.
2659 */
2660 val64 = readq(&bar0->pcc_err_reg);
2661 writeq(val64, &bar0->pcc_err_reg);
2662 if (val64 & PCC_FB_ECC_DB_ERR) {
2663 u64 ac = readq(&bar0->adapter_control);
2664 ac &= ~(ADAPTER_CNTL_EN);
2665 writeq(ac, &bar0->adapter_control);
2666 ac = readq(&bar0->adapter_control);
2667 schedule_work(&nic->set_link_task);
2668 }
2669
2670 /* Other type of interrupts are not being handled now, TODO */
2671}
2672
20346722 2673/**
1da177e4 2674 * wait_for_cmd_complete - waits for a command to complete.
20346722 2675 * @sp : private member of the device structure, which is a pointer to the
1da177e4 2676 * s2io_nic structure.
20346722
K
2677 * Description: Function that waits for a command to Write into RMAC
2678 * ADDR DATA registers to be completed and returns either success or
2679 * error depending on whether the command was complete or not.
1da177e4
LT
2680 * Return value:
2681 * SUCCESS on success and FAILURE on failure.
2682 */
2683
20346722 2684int wait_for_cmd_complete(nic_t * sp)
1da177e4
LT
2685{
2686 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2687 int ret = FAILURE, cnt = 0;
2688 u64 val64;
2689
2690 while (TRUE) {
2691 val64 = readq(&bar0->rmac_addr_cmd_mem);
2692 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2693 ret = SUCCESS;
2694 break;
2695 }
2696 msleep(50);
2697 if (cnt++ > 10)
2698 break;
2699 }
2700
2701 return ret;
2702}
2703
20346722
K
2704/**
2705 * s2io_reset - Resets the card.
1da177e4
LT
2706 * @sp : private member of the device structure.
2707 * Description: Function to Reset the card. This function then also
20346722 2708 * restores the previously saved PCI configuration space registers as
1da177e4
LT
2709 * the card reset also resets the configuration space.
2710 * Return value:
2711 * void.
2712 */
2713
20346722 2714void s2io_reset(nic_t * sp)
1da177e4
LT
2715{
2716 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2717 u64 val64;
5e25b9dd 2718 u16 subid, pci_cmd;
1da177e4
LT
2719
2720 val64 = SW_RESET_ALL;
2721 writeq(val64, &bar0->sw_reset);
2722
20346722
K
2723 /*
2724 * At this stage, if the PCI write is indeed completed, the
2725 * card is reset and so is the PCI Config space of the device.
2726 * So a read cannot be issued at this stage on any of the
1da177e4
LT
2727 * registers to ensure the write into "sw_reset" register
2728 * has gone through.
2729 * Question: Is there any system call that will explicitly force
2730 * all the write commands still pending on the bus to be pushed
2731 * through?
2732 * As of now I'am just giving a 250ms delay and hoping that the
2733 * PCI write to sw_reset register is done by this time.
2734 */
2735 msleep(250);
2736
541ae68f 2737 if (!(sp->device_type & XFRAME_II_DEVICE)) {
1da177e4 2738 /* Restore the PCI state saved during initializarion. */
541ae68f
K
2739 pci_restore_state(sp->pdev);
2740 } else {
2741 pci_set_master(sp->pdev);
2742 }
1da177e4
LT
2743 s2io_init_pci(sp);
2744
2745 msleep(250);
2746
20346722
K
2747 /* Set swapper to enable I/O register access */
2748 s2io_set_swapper(sp);
2749
5e25b9dd
K
2750 /* Clear certain PCI/PCI-X fields after reset */
2751 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2752 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2753 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2754
2755 val64 = readq(&bar0->txpic_int_reg);
2756 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2757 writeq(val64, &bar0->txpic_int_reg);
2758
2759 /* Clearing PCIX Ecc status register */
2760 pci_write_config_dword(sp->pdev, 0x68, 0);
2761
20346722
K
2762 /* Reset device statistics maintained by OS */
2763 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2764
1da177e4
LT
2765 /* SXE-002: Configure link and activity LED to turn it off */
2766 subid = sp->pdev->subsystem_device;
541ae68f
K
2767 if (((subid & 0xFF) >= 0x07) &&
2768 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2769 val64 = readq(&bar0->gpio_control);
2770 val64 |= 0x0000800000000000ULL;
2771 writeq(val64, &bar0->gpio_control);
2772 val64 = 0x0411040400000000ULL;
20346722 2773 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
2774 }
2775
541ae68f
K
2776 /*
2777 * Clear spurious ECC interrupts that would have occured on
2778 * XFRAME II cards after reset.
2779 */
2780 if (sp->device_type == XFRAME_II_DEVICE) {
2781 val64 = readq(&bar0->pcc_err_reg);
2782 writeq(val64, &bar0->pcc_err_reg);
2783 }
2784
1da177e4
LT
2785 sp->device_enabled_once = FALSE;
2786}
2787
2788/**
20346722
K
2789 * s2io_set_swapper - to set the swapper controle on the card
2790 * @sp : private member of the device structure,
1da177e4 2791 * pointer to the s2io_nic structure.
20346722 2792 * Description: Function to set the swapper control on the card
1da177e4
LT
2793 * correctly depending on the 'endianness' of the system.
2794 * Return value:
2795 * SUCCESS on success and FAILURE on failure.
2796 */
2797
20346722 2798int s2io_set_swapper(nic_t * sp)
1da177e4
LT
2799{
2800 struct net_device *dev = sp->dev;
2801 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2802 u64 val64, valt, valr;
2803
20346722 2804 /*
1da177e4
LT
2805 * Set proper endian settings and verify the same by reading
2806 * the PIF Feed-back register.
2807 */
2808
2809 val64 = readq(&bar0->pif_rd_swapper_fb);
2810 if (val64 != 0x0123456789ABCDEFULL) {
2811 int i = 0;
2812 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2813 0x8100008181000081ULL, /* FE=1, SE=0 */
2814 0x4200004242000042ULL, /* FE=0, SE=1 */
2815 0}; /* FE=0, SE=0 */
2816
2817 while(i<4) {
2818 writeq(value[i], &bar0->swapper_ctrl);
2819 val64 = readq(&bar0->pif_rd_swapper_fb);
2820 if (val64 == 0x0123456789ABCDEFULL)
2821 break;
2822 i++;
2823 }
2824 if (i == 4) {
2825 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2826 dev->name);
2827 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2828 (unsigned long long) val64);
2829 return FAILURE;
2830 }
2831 valr = value[i];
2832 } else {
2833 valr = readq(&bar0->swapper_ctrl);
2834 }
2835
2836 valt = 0x0123456789ABCDEFULL;
2837 writeq(valt, &bar0->xmsi_address);
2838 val64 = readq(&bar0->xmsi_address);
2839
2840 if(val64 != valt) {
2841 int i = 0;
2842 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2843 0x0081810000818100ULL, /* FE=1, SE=0 */
2844 0x0042420000424200ULL, /* FE=0, SE=1 */
2845 0}; /* FE=0, SE=0 */
2846
2847 while(i<4) {
2848 writeq((value[i] | valr), &bar0->swapper_ctrl);
2849 writeq(valt, &bar0->xmsi_address);
2850 val64 = readq(&bar0->xmsi_address);
2851 if(val64 == valt)
2852 break;
2853 i++;
2854 }
2855 if(i == 4) {
20346722 2856 unsigned long long x = val64;
1da177e4 2857 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 2858 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
2859 return FAILURE;
2860 }
2861 }
2862 val64 = readq(&bar0->swapper_ctrl);
2863 val64 &= 0xFFFF000000000000ULL;
2864
2865#ifdef __BIG_ENDIAN
20346722
K
2866 /*
2867 * The device by default set to a big endian format, so a
1da177e4
LT
2868 * big endian driver need not set anything.
2869 */
2870 val64 |= (SWAPPER_CTRL_TXP_FE |
2871 SWAPPER_CTRL_TXP_SE |
2872 SWAPPER_CTRL_TXD_R_FE |
2873 SWAPPER_CTRL_TXD_W_FE |
2874 SWAPPER_CTRL_TXF_R_FE |
2875 SWAPPER_CTRL_RXD_R_FE |
2876 SWAPPER_CTRL_RXD_W_FE |
2877 SWAPPER_CTRL_RXF_W_FE |
2878 SWAPPER_CTRL_XMSI_FE |
2879 SWAPPER_CTRL_XMSI_SE |
2880 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2881 writeq(val64, &bar0->swapper_ctrl);
2882#else
20346722 2883 /*
1da177e4 2884 * Initially we enable all bits to make it accessible by the
20346722 2885 * driver, then we selectively enable only those bits that
1da177e4
LT
2886 * we want to set.
2887 */
2888 val64 |= (SWAPPER_CTRL_TXP_FE |
2889 SWAPPER_CTRL_TXP_SE |
2890 SWAPPER_CTRL_TXD_R_FE |
2891 SWAPPER_CTRL_TXD_R_SE |
2892 SWAPPER_CTRL_TXD_W_FE |
2893 SWAPPER_CTRL_TXD_W_SE |
2894 SWAPPER_CTRL_TXF_R_FE |
2895 SWAPPER_CTRL_RXD_R_FE |
2896 SWAPPER_CTRL_RXD_R_SE |
2897 SWAPPER_CTRL_RXD_W_FE |
2898 SWAPPER_CTRL_RXD_W_SE |
2899 SWAPPER_CTRL_RXF_W_FE |
2900 SWAPPER_CTRL_XMSI_FE |
2901 SWAPPER_CTRL_XMSI_SE |
2902 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2903 writeq(val64, &bar0->swapper_ctrl);
2904#endif
2905 val64 = readq(&bar0->swapper_ctrl);
2906
20346722
K
2907 /*
2908 * Verifying if endian settings are accurate by reading a
1da177e4
LT
2909 * feedback register.
2910 */
2911 val64 = readq(&bar0->pif_rd_swapper_fb);
2912 if (val64 != 0x0123456789ABCDEFULL) {
2913 /* Endian settings are incorrect, calls for another dekko. */
2914 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2915 dev->name);
2916 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2917 (unsigned long long) val64);
2918 return FAILURE;
2919 }
2920
2921 return SUCCESS;
2922}
2923
2924/* ********************************************************* *
2925 * Functions defined below concern the OS part of the driver *
2926 * ********************************************************* */
2927
20346722 2928/**
1da177e4
LT
2929 * s2io_open - open entry point of the driver
2930 * @dev : pointer to the device structure.
2931 * Description:
2932 * This function is the open entry point of the driver. It mainly calls a
2933 * function to allocate Rx buffers and inserts them into the buffer
20346722 2934 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
2935 * Return value:
2936 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2937 * file on failure.
2938 */
2939
20346722 2940int s2io_open(struct net_device *dev)
1da177e4
LT
2941{
2942 nic_t *sp = dev->priv;
2943 int err = 0;
2944
20346722
K
2945 /*
2946 * Make sure you have link off by default every time
1da177e4
LT
2947 * Nic is initialized
2948 */
2949 netif_carrier_off(dev);
7ba013ac 2950 sp->last_link_state = 0; /* Unkown link state */
1da177e4
LT
2951
2952 /* Initialize H/W and enable interrupts */
2953 if (s2io_card_up(sp)) {
2954 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2955 dev->name);
20346722
K
2956 err = -ENODEV;
2957 goto hw_init_failed;
1da177e4
LT
2958 }
2959
2960 /* After proper initialization of H/W, register ISR */
20346722 2961 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
1da177e4
LT
2962 sp->name, dev);
2963 if (err) {
1da177e4
LT
2964 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2965 dev->name);
20346722 2966 goto isr_registration_failed;
1da177e4
LT
2967 }
2968
2969 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2970 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
20346722
K
2971 err = -ENODEV;
2972 goto setting_mac_address_failed;
1da177e4
LT
2973 }
2974
2975 netif_start_queue(dev);
2976 return 0;
20346722
K
2977
2978setting_mac_address_failed:
2979 free_irq(sp->pdev->irq, dev);
2980isr_registration_failed:
25fff88e 2981 del_timer_sync(&sp->alarm_timer);
20346722
K
2982 s2io_reset(sp);
2983hw_init_failed:
2984 return err;
1da177e4
LT
2985}
2986
2987/**
2988 * s2io_close -close entry point of the driver
2989 * @dev : device pointer.
2990 * Description:
2991 * This is the stop entry point of the driver. It needs to undo exactly
2992 * whatever was done by the open entry point,thus it's usually referred to
2993 * as the close function.Among other things this function mainly stops the
2994 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2995 * Return value:
2996 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2997 * file on failure.
2998 */
2999
20346722 3000int s2io_close(struct net_device *dev)
1da177e4
LT
3001{
3002 nic_t *sp = dev->priv;
1da177e4
LT
3003 flush_scheduled_work();
3004 netif_stop_queue(dev);
3005 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3006 s2io_card_down(sp);
3007
20346722 3008 free_irq(sp->pdev->irq, dev);
1da177e4
LT
3009 sp->device_close_flag = TRUE; /* Device is shut down. */
3010 return 0;
3011}
3012
3013/**
3014 * s2io_xmit - Tx entry point of te driver
3015 * @skb : the socket buffer containing the Tx data.
3016 * @dev : device pointer.
3017 * Description :
3018 * This function is the Tx entry point of the driver. S2IO NIC supports
3019 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3020 * NOTE: when device cant queue the pkt,just the trans_start variable will
3021 * not be upadted.
3022 * Return value:
3023 * 0 on success & 1 on failure.
3024 */
3025
20346722 3026int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3027{
3028 nic_t *sp = dev->priv;
3029 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3030 register u64 val64;
3031 TxD_t *txdp;
3032 TxFIFO_element_t __iomem *tx_fifo;
3033 unsigned long flags;
3034#ifdef NETIF_F_TSO
3035 int mss;
3036#endif
be3a6b02
K
3037 u16 vlan_tag = 0;
3038 int vlan_priority = 0;
1da177e4
LT
3039 mac_info_t *mac_control;
3040 struct config_param *config;
1da177e4
LT
3041
3042 mac_control = &sp->mac_control;
3043 config = &sp->config;
3044
20346722 3045 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3046 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3047 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3048 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3049 dev->name);
3050 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722
K
3051 dev_kfree_skb(skb);
3052 return 0;
1da177e4
LT
3053 }
3054
3055 queue = 0;
1da177e4 3056
be3a6b02
K
3057 /* Get Fifo number to Transmit based on vlan priority */
3058 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3059 vlan_tag = vlan_tx_tag_get(skb);
3060 vlan_priority = vlan_tag >> 13;
3061 queue = config->fifo_mapping[vlan_priority];
3062 }
3063
20346722
K
3064 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3065 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3066 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3067 list_virt_addr;
3068
3069 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4
LT
3070 /* Avoid "put" pointer going beyond "get" pointer */
3071 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3072 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3073 netif_stop_queue(dev);
3074 dev_kfree_skb(skb);
3075 spin_unlock_irqrestore(&sp->tx_lock, flags);
3076 return 0;
3077 }
3078#ifdef NETIF_F_TSO
3079 mss = skb_shinfo(skb)->tso_size;
3080 if (mss) {
3081 txdp->Control_1 |= TXD_TCP_LSO_EN;
3082 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3083 }
3084#endif
3085
3086 frg_cnt = skb_shinfo(skb)->nr_frags;
3087 frg_len = skb->len - skb->data_len;
3088
1da177e4
LT
3089 txdp->Buffer_Pointer = pci_map_single
3090 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
20346722 3091 txdp->Host_Control = (unsigned long) skb;
1da177e4
LT
3092 if (skb->ip_summed == CHECKSUM_HW) {
3093 txdp->Control_2 |=
3094 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3095 TXD_TX_CKO_UDP_EN);
3096 }
3097
3098 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3099
be3a6b02
K
3100 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3101 txdp->Control_2 |= TXD_VLAN_ENABLE;
3102 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3103 }
3104
1da177e4
LT
3105 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3106 TXD_GATHER_CODE_FIRST);
3107 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3108
3109 /* For fragmented SKB. */
3110 for (i = 0; i < frg_cnt; i++) {
3111 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3112 txdp++;
3113 txdp->Buffer_Pointer = (u64) pci_map_page
3114 (sp->pdev, frag->page, frag->page_offset,
3115 frag->size, PCI_DMA_TODEVICE);
3116 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3117 }
3118 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3119
3120 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3121 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3122 writeq(val64, &tx_fifo->TxDL_Pointer);
3123
fe113638
K
3124 wmb();
3125
1da177e4
LT
3126 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3127 TX_FIFO_LAST_LIST);
20346722 3128
1da177e4
LT
3129#ifdef NETIF_F_TSO
3130 if (mss)
3131 val64 |= TX_FIFO_SPECIAL_FUNC;
3132#endif
3133 writeq(val64, &tx_fifo->List_Control);
3134
1da177e4 3135 put_off++;
20346722
K
3136 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3137 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3138
3139 /* Avoid "put" pointer going beyond "get" pointer */
3140 if (((put_off + 1) % queue_len) == get_off) {
3141 DBG_PRINT(TX_DBG,
3142 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3143 put_off, get_off);
3144 netif_stop_queue(dev);
3145 }
3146
3147 dev->trans_start = jiffies;
3148 spin_unlock_irqrestore(&sp->tx_lock, flags);
3149
3150 return 0;
3151}
3152
25fff88e
K
3153static void
3154s2io_alarm_handle(unsigned long data)
3155{
3156 nic_t *sp = (nic_t *)data;
3157
3158 alarm_intr_handler(sp);
3159 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3160}
3161
1da177e4
LT
3162/**
3163 * s2io_isr - ISR handler of the device .
3164 * @irq: the irq of the device.
3165 * @dev_id: a void pointer to the dev structure of the NIC.
3166 * @pt_regs: pointer to the registers pushed on the stack.
20346722
K
3167 * Description: This function is the ISR handler of the device. It
3168 * identifies the reason for the interrupt and calls the relevant
3169 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
3170 * recv buffers, if their numbers are below the panic value which is
3171 * presently set to 25% of the original number of rcv buffers allocated.
3172 * Return value:
20346722 3173 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
3174 * IRQ_NONE: will be returned if interrupt is not from our device
3175 */
3176static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3177{
3178 struct net_device *dev = (struct net_device *) dev_id;
3179 nic_t *sp = dev->priv;
3180 XENA_dev_config_t __iomem *bar0 = sp->bar0;
20346722 3181 int i;
fe113638 3182 u64 reason = 0, val64;
1da177e4
LT
3183 mac_info_t *mac_control;
3184 struct config_param *config;
3185
7ba013ac 3186 atomic_inc(&sp->isr_cnt);
1da177e4
LT
3187 mac_control = &sp->mac_control;
3188 config = &sp->config;
3189
20346722 3190 /*
1da177e4
LT
3191 * Identify the cause for interrupt and call the appropriate
3192 * interrupt handler. Causes for the interrupt could be;
3193 * 1. Rx of packet.
3194 * 2. Tx complete.
3195 * 3. Link down.
20346722 3196 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
3197 */
3198 reason = readq(&bar0->general_int_status);
3199
3200 if (!reason) {
3201 /* The interrupt was not raised by Xena. */
7ba013ac 3202 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3203 return IRQ_NONE;
3204 }
3205
1da177e4
LT
3206#ifdef CONFIG_S2IO_NAPI
3207 if (reason & GEN_INTR_RXTRAFFIC) {
3208 if (netif_rx_schedule_prep(dev)) {
3209 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3210 DISABLE_INTRS);
3211 __netif_rx_schedule(dev);
3212 }
3213 }
3214#else
3215 /* If Intr is because of Rx Traffic */
3216 if (reason & GEN_INTR_RXTRAFFIC) {
fe113638
K
3217 /*
3218 * rx_traffic_int reg is an R1 register, writing all 1's
3219 * will ensure that the actual interrupt causing bit get's
3220 * cleared and hence a read can be avoided.
3221 */
3222 val64 = 0xFFFFFFFFFFFFFFFFULL;
3223 writeq(val64, &bar0->rx_traffic_int);
20346722
K
3224 for (i = 0; i < config->rx_ring_num; i++) {
3225 rx_intr_handler(&mac_control->rings[i]);
3226 }
1da177e4
LT
3227 }
3228#endif
3229
20346722
K
3230 /* If Intr is because of Tx Traffic */
3231 if (reason & GEN_INTR_TXTRAFFIC) {
fe113638
K
3232 /*
3233 * tx_traffic_int reg is an R1 register, writing all 1's
3234 * will ensure that the actual interrupt causing bit get's
3235 * cleared and hence a read can be avoided.
3236 */
3237 val64 = 0xFFFFFFFFFFFFFFFFULL;
3238 writeq(val64, &bar0->tx_traffic_int);
3239
20346722
K
3240 for (i = 0; i < config->tx_fifo_num; i++)
3241 tx_intr_handler(&mac_control->fifos[i]);
3242 }
3243
3244 /*
3245 * If the Rx buffer count is below the panic threshold then
3246 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
3247 * else schedule a tasklet to reallocate the buffers.
3248 */
3249#ifndef CONFIG_S2IO_NAPI
3250 for (i = 0; i < config->rx_ring_num; i++) {
20346722 3251 int ret;
1da177e4
LT
3252 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3253 int level = rx_buffer_level(sp, rxb_size, i);
3254
3255 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3256 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3257 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3258 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3259 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3260 dev->name);
3261 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3262 clear_bit(0, (&sp->tasklet_status));
7ba013ac 3263 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3264 return IRQ_HANDLED;
3265 }
3266 clear_bit(0, (&sp->tasklet_status));
3267 } else if (level == LOW) {
3268 tasklet_schedule(&sp->task);
3269 }
3270 }
3271#endif
3272
7ba013ac 3273 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3274 return IRQ_HANDLED;
3275}
3276
7ba013ac
K
3277/**
3278 * s2io_updt_stats -
3279 */
3280static void s2io_updt_stats(nic_t *sp)
3281{
3282 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3283 u64 val64;
3284 int cnt = 0;
3285
3286 if (atomic_read(&sp->card_state) == CARD_UP) {
3287 /* Apprx 30us on a 133 MHz bus */
3288 val64 = SET_UPDT_CLICKS(10) |
3289 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3290 writeq(val64, &bar0->stat_cfg);
3291 do {
3292 udelay(100);
3293 val64 = readq(&bar0->stat_cfg);
3294 if (!(val64 & BIT(0)))
3295 break;
3296 cnt++;
3297 if (cnt == 5)
3298 break; /* Updt failed */
3299 } while(1);
3300 }
3301}
3302
1da177e4 3303/**
20346722 3304 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
3305 * @dev : pointer to the device structure.
3306 * Description:
20346722 3307 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
3308 * structure and returns a pointer to the same.
3309 * Return value:
3310 * pointer to the updated net_device_stats structure.
3311 */
3312
20346722 3313struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4
LT
3314{
3315 nic_t *sp = dev->priv;
3316 mac_info_t *mac_control;
3317 struct config_param *config;
3318
20346722 3319
1da177e4
LT
3320 mac_control = &sp->mac_control;
3321 config = &sp->config;
3322
7ba013ac
K
3323 /* Configure Stats for immediate updt */
3324 s2io_updt_stats(sp);
3325
3326 sp->stats.tx_packets =
3327 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
3328 sp->stats.tx_errors =
3329 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3330 sp->stats.rx_errors =
3331 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3332 sp->stats.multicast =
3333 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 3334 sp->stats.rx_length_errors =
20346722 3335 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
3336
3337 return (&sp->stats);
3338}
3339
3340/**
3341 * s2io_set_multicast - entry point for multicast address enable/disable.
3342 * @dev : pointer to the device structure
3343 * Description:
20346722
K
3344 * This function is a driver entry point which gets called by the kernel
3345 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
3346 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3347 * determine, if multicast address must be enabled or if promiscuous mode
3348 * is to be disabled etc.
3349 * Return value:
3350 * void.
3351 */
3352
3353static void s2io_set_multicast(struct net_device *dev)
3354{
3355 int i, j, prev_cnt;
3356 struct dev_mc_list *mclist;
3357 nic_t *sp = dev->priv;
3358 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3359 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3360 0xfeffffffffffULL;
3361 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3362 void __iomem *add;
3363
3364 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3365 /* Enable all Multicast addresses */
3366 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3367 &bar0->rmac_addr_data0_mem);
3368 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3369 &bar0->rmac_addr_data1_mem);
3370 val64 = RMAC_ADDR_CMD_MEM_WE |
3371 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3372 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3373 writeq(val64, &bar0->rmac_addr_cmd_mem);
3374 /* Wait till command completes */
3375 wait_for_cmd_complete(sp);
3376
3377 sp->m_cast_flg = 1;
3378 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3379 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3380 /* Disable all Multicast addresses */
3381 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3382 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
3383 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3384 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3385 val64 = RMAC_ADDR_CMD_MEM_WE |
3386 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3387 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3388 writeq(val64, &bar0->rmac_addr_cmd_mem);
3389 /* Wait till command completes */
3390 wait_for_cmd_complete(sp);
3391
3392 sp->m_cast_flg = 0;
3393 sp->all_multi_pos = 0;
3394 }
3395
3396 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3397 /* Put the NIC into promiscuous mode */
3398 add = &bar0->mac_cfg;
3399 val64 = readq(&bar0->mac_cfg);
3400 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3401
3402 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3403 writel((u32) val64, add);
3404 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3405 writel((u32) (val64 >> 32), (add + 4));
3406
3407 val64 = readq(&bar0->mac_cfg);
3408 sp->promisc_flg = 1;
3409 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3410 dev->name);
3411 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3412 /* Remove the NIC from promiscuous mode */
3413 add = &bar0->mac_cfg;
3414 val64 = readq(&bar0->mac_cfg);
3415 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3416
3417 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3418 writel((u32) val64, add);
3419 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3420 writel((u32) (val64 >> 32), (add + 4));
3421
3422 val64 = readq(&bar0->mac_cfg);
3423 sp->promisc_flg = 0;
3424 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3425 dev->name);
3426 }
3427
3428 /* Update individual M_CAST address list */
3429 if ((!sp->m_cast_flg) && dev->mc_count) {
3430 if (dev->mc_count >
3431 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3432 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3433 dev->name);
3434 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3435 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3436 return;
3437 }
3438
3439 prev_cnt = sp->mc_addr_count;
3440 sp->mc_addr_count = dev->mc_count;
3441
3442 /* Clear out the previous list of Mc in the H/W. */
3443 for (i = 0; i < prev_cnt; i++) {
3444 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3445 &bar0->rmac_addr_data0_mem);
3446 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3447 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3448 val64 = RMAC_ADDR_CMD_MEM_WE |
3449 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3450 RMAC_ADDR_CMD_MEM_OFFSET
3451 (MAC_MC_ADDR_START_OFFSET + i);
3452 writeq(val64, &bar0->rmac_addr_cmd_mem);
3453
3454 /* Wait for command completes */
3455 if (wait_for_cmd_complete(sp)) {
3456 DBG_PRINT(ERR_DBG, "%s: Adding ",
3457 dev->name);
3458 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3459 return;
3460 }
3461 }
3462
3463 /* Create the new Rx filter list and update the same in H/W. */
3464 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3465 i++, mclist = mclist->next) {
3466 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3467 ETH_ALEN);
3468 for (j = 0; j < ETH_ALEN; j++) {
3469 mac_addr |= mclist->dmi_addr[j];
3470 mac_addr <<= 8;
3471 }
3472 mac_addr >>= 8;
3473 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3474 &bar0->rmac_addr_data0_mem);
3475 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3476 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3477 val64 = RMAC_ADDR_CMD_MEM_WE |
3478 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3479 RMAC_ADDR_CMD_MEM_OFFSET
3480 (i + MAC_MC_ADDR_START_OFFSET);
3481 writeq(val64, &bar0->rmac_addr_cmd_mem);
3482
3483 /* Wait for command completes */
3484 if (wait_for_cmd_complete(sp)) {
3485 DBG_PRINT(ERR_DBG, "%s: Adding ",
3486 dev->name);
3487 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3488 return;
3489 }
3490 }
3491 }
3492}
3493
3494/**
20346722 3495 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
3496 * @dev : pointer to the device structure.
3497 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 3498 * Description : This procedure will program the Xframe to receive
1da177e4 3499 * frames with new Mac Address
20346722 3500 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
3501 * as defined in errno.h file on failure.
3502 */
3503
3504int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3505{
3506 nic_t *sp = dev->priv;
3507 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3508 register u64 val64, mac_addr = 0;
3509 int i;
3510
20346722 3511 /*
1da177e4
LT
3512 * Set the new MAC address as the new unicast filter and reflect this
3513 * change on the device address registered with the OS. It will be
20346722 3514 * at offset 0.
1da177e4
LT
3515 */
3516 for (i = 0; i < ETH_ALEN; i++) {
3517 mac_addr <<= 8;
3518 mac_addr |= addr[i];
3519 }
3520
3521 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3522 &bar0->rmac_addr_data0_mem);
3523
3524 val64 =
3525 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3526 RMAC_ADDR_CMD_MEM_OFFSET(0);
3527 writeq(val64, &bar0->rmac_addr_cmd_mem);
3528 /* Wait till command completes */
3529 if (wait_for_cmd_complete(sp)) {
3530 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3531 return FAILURE;
3532 }
3533
3534 return SUCCESS;
3535}
3536
3537/**
20346722 3538 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
3539 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3540 * @info: pointer to the structure with parameters given by ethtool to set
3541 * link information.
3542 * Description:
20346722 3543 * The function sets different link parameters provided by the user onto
1da177e4
LT
3544 * the NIC.
3545 * Return value:
3546 * 0 on success.
3547*/
3548
3549static int s2io_ethtool_sset(struct net_device *dev,
3550 struct ethtool_cmd *info)
3551{
3552 nic_t *sp = dev->priv;
3553 if ((info->autoneg == AUTONEG_ENABLE) ||
3554 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3555 return -EINVAL;
3556 else {
3557 s2io_close(sp->dev);
3558 s2io_open(sp->dev);
3559 }
3560
3561 return 0;
3562}
3563
3564/**
20346722 3565 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
3566 * @sp : private member of the device structure, pointer to the
3567 * s2io_nic structure.
3568 * @info : pointer to the structure with parameters given by ethtool
3569 * to return link information.
3570 * Description:
3571 * Returns link specific information like speed, duplex etc.. to ethtool.
3572 * Return value :
3573 * return 0 on success.
3574 */
3575
3576static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3577{
3578 nic_t *sp = dev->priv;
3579 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3580 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3581 info->port = PORT_FIBRE;
3582 /* info->transceiver?? TODO */
3583
3584 if (netif_carrier_ok(sp->dev)) {
3585 info->speed = 10000;
3586 info->duplex = DUPLEX_FULL;
3587 } else {
3588 info->speed = -1;
3589 info->duplex = -1;
3590 }
3591
3592 info->autoneg = AUTONEG_DISABLE;
3593 return 0;
3594}
3595
3596/**
20346722
K
3597 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3598 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3599 * s2io_nic structure.
3600 * @info : pointer to the structure with parameters given by ethtool to
3601 * return driver information.
3602 * Description:
3603 * Returns driver specefic information like name, version etc.. to ethtool.
3604 * Return value:
3605 * void
3606 */
3607
3608static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3609 struct ethtool_drvinfo *info)
3610{
3611 nic_t *sp = dev->priv;
3612
3613 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3614 strncpy(info->version, s2io_driver_version,
3615 sizeof(s2io_driver_version));
3616 strncpy(info->fw_version, "", 32);
3617 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3618 info->regdump_len = XENA_REG_SPACE;
3619 info->eedump_len = XENA_EEPROM_SPACE;
3620 info->testinfo_len = S2IO_TEST_LEN;
3621 info->n_stats = S2IO_STAT_LEN;
3622}
3623
3624/**
3625 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 3626 * @sp: private member of the device structure, which is a pointer to the
1da177e4 3627 * s2io_nic structure.
20346722 3628 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
3629 * dumping the registers.
3630 * @reg_space: The input argumnet into which all the registers are dumped.
3631 * Description:
3632 * Dumps the entire register space of xFrame NIC into the user given
3633 * buffer area.
3634 * Return value :
3635 * void .
3636*/
3637
3638static void s2io_ethtool_gregs(struct net_device *dev,
3639 struct ethtool_regs *regs, void *space)
3640{
3641 int i;
3642 u64 reg;
3643 u8 *reg_space = (u8 *) space;
3644 nic_t *sp = dev->priv;
3645
3646 regs->len = XENA_REG_SPACE;
3647 regs->version = sp->pdev->subsystem_device;
3648
3649 for (i = 0; i < regs->len; i += 8) {
3650 reg = readq(sp->bar0 + i);
3651 memcpy((reg_space + i), &reg, 8);
3652 }
3653}
3654
3655/**
3656 * s2io_phy_id - timer function that alternates adapter LED.
20346722 3657 * @data : address of the private member of the device structure, which
1da177e4 3658 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
3659 * Description: This is actually the timer function that alternates the
3660 * adapter LED bit of the adapter control bit to set/reset every time on
3661 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
3662 * once every second.
3663*/
3664static void s2io_phy_id(unsigned long data)
3665{
3666 nic_t *sp = (nic_t *) data;
3667 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3668 u64 val64 = 0;
3669 u16 subid;
3670
3671 subid = sp->pdev->subsystem_device;
541ae68f
K
3672 if ((sp->device_type == XFRAME_II_DEVICE) ||
3673 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
3674 val64 = readq(&bar0->gpio_control);
3675 val64 ^= GPIO_CTRL_GPIO_0;
3676 writeq(val64, &bar0->gpio_control);
3677 } else {
3678 val64 = readq(&bar0->adapter_control);
3679 val64 ^= ADAPTER_LED_ON;
3680 writeq(val64, &bar0->adapter_control);
3681 }
3682
3683 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3684}
3685
3686/**
3687 * s2io_ethtool_idnic - To physically identify the nic on the system.
3688 * @sp : private member of the device structure, which is a pointer to the
3689 * s2io_nic structure.
20346722 3690 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
3691 * ethtool.
3692 * Description: Used to physically identify the NIC on the system.
20346722 3693 * The Link LED will blink for a time specified by the user for
1da177e4 3694 * identification.
20346722 3695 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
3696 * identification is possible only if it's link is up.
3697 * Return value:
3698 * int , returns 0 on success
3699 */
3700
3701static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3702{
3703 u64 val64 = 0, last_gpio_ctrl_val;
3704 nic_t *sp = dev->priv;
3705 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3706 u16 subid;
3707
3708 subid = sp->pdev->subsystem_device;
3709 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
3710 if ((sp->device_type == XFRAME_I_DEVICE) &&
3711 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
3712 val64 = readq(&bar0->adapter_control);
3713 if (!(val64 & ADAPTER_CNTL_EN)) {
3714 printk(KERN_ERR
3715 "Adapter Link down, cannot blink LED\n");
3716 return -EFAULT;
3717 }
3718 }
3719 if (sp->id_timer.function == NULL) {
3720 init_timer(&sp->id_timer);
3721 sp->id_timer.function = s2io_phy_id;
3722 sp->id_timer.data = (unsigned long) sp;
3723 }
3724 mod_timer(&sp->id_timer, jiffies);
3725 if (data)
20346722 3726 msleep_interruptible(data * HZ);
1da177e4 3727 else
20346722 3728 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
3729 del_timer_sync(&sp->id_timer);
3730
541ae68f 3731 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
3732 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3733 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3734 }
3735
3736 return 0;
3737}
3738
3739/**
3740 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
3741 * @sp : private member of the device structure, which is a pointer to the
3742 * s2io_nic structure.
1da177e4
LT
3743 * @ep : pointer to the structure with pause parameters given by ethtool.
3744 * Description:
3745 * Returns the Pause frame generation and reception capability of the NIC.
3746 * Return value:
3747 * void
3748 */
3749static void s2io_ethtool_getpause_data(struct net_device *dev,
3750 struct ethtool_pauseparam *ep)
3751{
3752 u64 val64;
3753 nic_t *sp = dev->priv;
3754 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3755
3756 val64 = readq(&bar0->rmac_pause_cfg);
3757 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3758 ep->tx_pause = TRUE;
3759 if (val64 & RMAC_PAUSE_RX_ENABLE)
3760 ep->rx_pause = TRUE;
3761 ep->autoneg = FALSE;
3762}
3763
3764/**
3765 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 3766 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3767 * s2io_nic structure.
3768 * @ep : pointer to the structure with pause parameters given by ethtool.
3769 * Description:
3770 * It can be used to set or reset Pause frame generation or reception
3771 * support of the NIC.
3772 * Return value:
3773 * int, returns 0 on Success
3774 */
3775
3776static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 3777 struct ethtool_pauseparam *ep)
1da177e4
LT
3778{
3779 u64 val64;
3780 nic_t *sp = dev->priv;
3781 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3782
3783 val64 = readq(&bar0->rmac_pause_cfg);
3784 if (ep->tx_pause)
3785 val64 |= RMAC_PAUSE_GEN_ENABLE;
3786 else
3787 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3788 if (ep->rx_pause)
3789 val64 |= RMAC_PAUSE_RX_ENABLE;
3790 else
3791 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3792 writeq(val64, &bar0->rmac_pause_cfg);
3793 return 0;
3794}
3795
3796/**
3797 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 3798 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3799 * s2io_nic structure.
3800 * @off : offset at which the data must be written
3801 * @data : Its an output parameter where the data read at the given
20346722 3802 * offset is stored.
1da177e4 3803 * Description:
20346722 3804 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
3805 * read data.
3806 * NOTE: Will allow to read only part of the EEPROM visible through the
3807 * I2C bus.
3808 * Return value:
3809 * -1 on failure and 0 on success.
3810 */
3811
3812#define S2IO_DEV_ID 5
3813static int read_eeprom(nic_t * sp, int off, u32 * data)
3814{
3815 int ret = -1;
3816 u32 exit_cnt = 0;
3817 u64 val64;
3818 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3819
3820 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3821 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3822 I2C_CONTROL_CNTL_START;
3823 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3824
3825 while (exit_cnt < 5) {
3826 val64 = readq(&bar0->i2c_control);
3827 if (I2C_CONTROL_CNTL_END(val64)) {
3828 *data = I2C_CONTROL_GET_DATA(val64);
3829 ret = 0;
3830 break;
3831 }
3832 msleep(50);
3833 exit_cnt++;
3834 }
3835
3836 return ret;
3837}
3838
3839/**
3840 * write_eeprom - actually writes the relevant part of the data value.
3841 * @sp : private member of the device structure, which is a pointer to the
3842 * s2io_nic structure.
3843 * @off : offset at which the data must be written
3844 * @data : The data that is to be written
20346722 3845 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
3846 * the Eeprom. (max of 3)
3847 * Description:
3848 * Actually writes the relevant part of the data value into the Eeprom
3849 * through the I2C bus.
3850 * Return value:
3851 * 0 on success, -1 on failure.
3852 */
3853
3854static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3855{
3856 int exit_cnt = 0, ret = -1;
3857 u64 val64;
3858 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3859
3860 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3861 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3862 I2C_CONTROL_CNTL_START;
3863 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3864
3865 while (exit_cnt < 5) {
3866 val64 = readq(&bar0->i2c_control);
3867 if (I2C_CONTROL_CNTL_END(val64)) {
3868 if (!(val64 & I2C_CONTROL_NACK))
3869 ret = 0;
3870 break;
3871 }
3872 msleep(50);
3873 exit_cnt++;
3874 }
3875
3876 return ret;
3877}
3878
3879/**
3880 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3881 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 3882 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
3883 * containing all relevant information.
3884 * @data_buf : user defined value to be written into Eeprom.
3885 * Description: Reads the values stored in the Eeprom at given offset
3886 * for a given length. Stores these values int the input argument data
3887 * buffer 'data_buf' and returns these to the caller (ethtool.)
3888 * Return value:
3889 * int 0 on success
3890 */
3891
3892static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 3893 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4
LT
3894{
3895 u32 data, i, valid;
3896 nic_t *sp = dev->priv;
3897
3898 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3899
3900 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3901 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3902
3903 for (i = 0; i < eeprom->len; i += 4) {
3904 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3905 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3906 return -EFAULT;
3907 }
3908 valid = INV(data);
3909 memcpy((data_buf + i), &valid, 4);
3910 }
3911 return 0;
3912}
3913
3914/**
3915 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3916 * @sp : private member of the device structure, which is a pointer to the
3917 * s2io_nic structure.
20346722 3918 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
3919 * containing all relevant information.
3920 * @data_buf ; user defined value to be written into Eeprom.
3921 * Description:
3922 * Tries to write the user provided value in the Eeprom, at the offset
3923 * given by the user.
3924 * Return value:
3925 * 0 on success, -EFAULT on failure.
3926 */
3927
3928static int s2io_ethtool_seeprom(struct net_device *dev,
3929 struct ethtool_eeprom *eeprom,
3930 u8 * data_buf)
3931{
3932 int len = eeprom->len, cnt = 0;
3933 u32 valid = 0, data;
3934 nic_t *sp = dev->priv;
3935
3936 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3937 DBG_PRINT(ERR_DBG,
3938 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3939 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3940 eeprom->magic);
3941 return -EFAULT;
3942 }
3943
3944 while (len) {
3945 data = (u32) data_buf[cnt] & 0x000000FF;
3946 if (data) {
3947 valid = (u32) (data << 24);
3948 } else
3949 valid = data;
3950
3951 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3952 DBG_PRINT(ERR_DBG,
3953 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3954 DBG_PRINT(ERR_DBG,
3955 "write into the specified offset\n");
3956 return -EFAULT;
3957 }
3958 cnt++;
3959 len--;
3960 }
3961
3962 return 0;
3963}
3964
3965/**
20346722
K
3966 * s2io_register_test - reads and writes into all clock domains.
3967 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3968 * s2io_nic structure.
3969 * @data : variable that returns the result of each of the test conducted b
3970 * by the driver.
3971 * Description:
3972 * Read and write into all clock domains. The NIC has 3 clock domains,
3973 * see that registers in all the three regions are accessible.
3974 * Return value:
3975 * 0 on success.
3976 */
3977
3978static int s2io_register_test(nic_t * sp, uint64_t * data)
3979{
3980 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3981 u64 val64 = 0;
3982 int fail = 0;
3983
20346722
K
3984 val64 = readq(&bar0->pif_rd_swapper_fb);
3985 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
3986 fail = 1;
3987 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3988 }
3989
3990 val64 = readq(&bar0->rmac_pause_cfg);
3991 if (val64 != 0xc000ffff00000000ULL) {
3992 fail = 1;
3993 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3994 }
3995
3996 val64 = readq(&bar0->rx_queue_cfg);
3997 if (val64 != 0x0808080808080808ULL) {
3998 fail = 1;
3999 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4000 }
4001
4002 val64 = readq(&bar0->xgxs_efifo_cfg);
4003 if (val64 != 0x000000001923141EULL) {
4004 fail = 1;
4005 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4006 }
4007
4008 val64 = 0x5A5A5A5A5A5A5A5AULL;
4009 writeq(val64, &bar0->xmsi_data);
4010 val64 = readq(&bar0->xmsi_data);
4011 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4012 fail = 1;
4013 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4014 }
4015
4016 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4017 writeq(val64, &bar0->xmsi_data);
4018 val64 = readq(&bar0->xmsi_data);
4019 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4020 fail = 1;
4021 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4022 }
4023
4024 *data = fail;
4025 return 0;
4026}
4027
4028/**
20346722 4029 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
4030 * @sp : private member of the device structure, which is a pointer to the
4031 * s2io_nic structure.
4032 * @data:variable that returns the result of each of the test conducted by
4033 * the driver.
4034 * Description:
20346722 4035 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
4036 * register.
4037 * Return value:
4038 * 0 on success.
4039 */
4040
4041static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4042{
4043 int fail = 0;
4044 u32 ret_data;
4045
4046 /* Test Write Error at offset 0 */
4047 if (!write_eeprom(sp, 0, 0, 3))
4048 fail = 1;
4049
4050 /* Test Write at offset 4f0 */
4051 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4052 fail = 1;
4053 if (read_eeprom(sp, 0x4F0, &ret_data))
4054 fail = 1;
4055
4056 if (ret_data != 0x01234567)
4057 fail = 1;
4058
4059 /* Reset the EEPROM data go FFFF */
4060 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4061
4062 /* Test Write Request Error at offset 0x7c */
4063 if (!write_eeprom(sp, 0x07C, 0, 3))
4064 fail = 1;
4065
4066 /* Test Write Request at offset 0x7fc */
4067 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4068 fail = 1;
4069 if (read_eeprom(sp, 0x7FC, &ret_data))
4070 fail = 1;
4071
4072 if (ret_data != 0x01234567)
4073 fail = 1;
4074
4075 /* Reset the EEPROM data go FFFF */
4076 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4077
4078 /* Test Write Error at offset 0x80 */
4079 if (!write_eeprom(sp, 0x080, 0, 3))
4080 fail = 1;
4081
4082 /* Test Write Error at offset 0xfc */
4083 if (!write_eeprom(sp, 0x0FC, 0, 3))
4084 fail = 1;
4085
4086 /* Test Write Error at offset 0x100 */
4087 if (!write_eeprom(sp, 0x100, 0, 3))
4088 fail = 1;
4089
4090 /* Test Write Error at offset 4ec */
4091 if (!write_eeprom(sp, 0x4EC, 0, 3))
4092 fail = 1;
4093
4094 *data = fail;
4095 return 0;
4096}
4097
4098/**
4099 * s2io_bist_test - invokes the MemBist test of the card .
20346722 4100 * @sp : private member of the device structure, which is a pointer to the
1da177e4 4101 * s2io_nic structure.
20346722 4102 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
4103 * the driver.
4104 * Description:
4105 * This invokes the MemBist test of the card. We give around
4106 * 2 secs time for the Test to complete. If it's still not complete
20346722 4107 * within this peiod, we consider that the test failed.
1da177e4
LT
4108 * Return value:
4109 * 0 on success and -1 on failure.
4110 */
4111
4112static int s2io_bist_test(nic_t * sp, uint64_t * data)
4113{
4114 u8 bist = 0;
4115 int cnt = 0, ret = -1;
4116
4117 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4118 bist |= PCI_BIST_START;
4119 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4120
4121 while (cnt < 20) {
4122 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4123 if (!(bist & PCI_BIST_START)) {
4124 *data = (bist & PCI_BIST_CODE_MASK);
4125 ret = 0;
4126 break;
4127 }
4128 msleep(100);
4129 cnt++;
4130 }
4131
4132 return ret;
4133}
4134
4135/**
20346722
K
4136 * s2io-link_test - verifies the link state of the nic
4137 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
4138 * s2io_nic structure.
4139 * @data: variable that returns the result of each of the test conducted by
4140 * the driver.
4141 * Description:
20346722 4142 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
4143 * argument 'data' appropriately.
4144 * Return value:
4145 * 0 on success.
4146 */
4147
4148static int s2io_link_test(nic_t * sp, uint64_t * data)
4149{
4150 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4151 u64 val64;
4152
4153 val64 = readq(&bar0->adapter_status);
4154 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4155 *data = 1;
4156
4157 return 0;
4158}
4159
4160/**
20346722
K
4161 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4162 * @sp - private member of the device structure, which is a pointer to the
1da177e4 4163 * s2io_nic structure.
20346722 4164 * @data - variable that returns the result of each of the test
1da177e4
LT
4165 * conducted by the driver.
4166 * Description:
20346722 4167 * This is one of the offline test that tests the read and write
1da177e4
LT
4168 * access to the RldRam chip on the NIC.
4169 * Return value:
4170 * 0 on success.
4171 */
4172
4173static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4174{
4175 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4176 u64 val64;
4177 int cnt, iteration = 0, test_pass = 0;
4178
4179 val64 = readq(&bar0->adapter_control);
4180 val64 &= ~ADAPTER_ECC_EN;
4181 writeq(val64, &bar0->adapter_control);
4182
4183 val64 = readq(&bar0->mc_rldram_test_ctrl);
4184 val64 |= MC_RLDRAM_TEST_MODE;
4185 writeq(val64, &bar0->mc_rldram_test_ctrl);
4186
4187 val64 = readq(&bar0->mc_rldram_mrs);
4188 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4189 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4190
4191 val64 |= MC_RLDRAM_MRS_ENABLE;
4192 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4193
4194 while (iteration < 2) {
4195 val64 = 0x55555555aaaa0000ULL;
4196 if (iteration == 1) {
4197 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4198 }
4199 writeq(val64, &bar0->mc_rldram_test_d0);
4200
4201 val64 = 0xaaaa5a5555550000ULL;
4202 if (iteration == 1) {
4203 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4204 }
4205 writeq(val64, &bar0->mc_rldram_test_d1);
4206
4207 val64 = 0x55aaaaaaaa5a0000ULL;
4208 if (iteration == 1) {
4209 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4210 }
4211 writeq(val64, &bar0->mc_rldram_test_d2);
4212
4213 val64 = (u64) (0x0000003fffff0000ULL);
4214 writeq(val64, &bar0->mc_rldram_test_add);
4215
4216
4217 val64 = MC_RLDRAM_TEST_MODE;
4218 writeq(val64, &bar0->mc_rldram_test_ctrl);
4219
4220 val64 |=
4221 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4222 MC_RLDRAM_TEST_GO;
4223 writeq(val64, &bar0->mc_rldram_test_ctrl);
4224
4225 for (cnt = 0; cnt < 5; cnt++) {
4226 val64 = readq(&bar0->mc_rldram_test_ctrl);
4227 if (val64 & MC_RLDRAM_TEST_DONE)
4228 break;
4229 msleep(200);
4230 }
4231
4232 if (cnt == 5)
4233 break;
4234
4235 val64 = MC_RLDRAM_TEST_MODE;
4236 writeq(val64, &bar0->mc_rldram_test_ctrl);
4237
4238 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4239 writeq(val64, &bar0->mc_rldram_test_ctrl);
4240
4241 for (cnt = 0; cnt < 5; cnt++) {
4242 val64 = readq(&bar0->mc_rldram_test_ctrl);
4243 if (val64 & MC_RLDRAM_TEST_DONE)
4244 break;
4245 msleep(500);
4246 }
4247
4248 if (cnt == 5)
4249 break;
4250
4251 val64 = readq(&bar0->mc_rldram_test_ctrl);
4252 if (val64 & MC_RLDRAM_TEST_PASS)
4253 test_pass = 1;
4254
4255 iteration++;
4256 }
4257
4258 if (!test_pass)
4259 *data = 1;
4260 else
4261 *data = 0;
4262
4263 return 0;
4264}
4265
4266/**
4267 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4268 * @sp : private member of the device structure, which is a pointer to the
4269 * s2io_nic structure.
4270 * @ethtest : pointer to a ethtool command specific structure that will be
4271 * returned to the user.
20346722 4272 * @data : variable that returns the result of each of the test
1da177e4
LT
4273 * conducted by the driver.
4274 * Description:
4275 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4276 * the health of the card.
4277 * Return value:
4278 * void
4279 */
4280
4281static void s2io_ethtool_test(struct net_device *dev,
4282 struct ethtool_test *ethtest,
4283 uint64_t * data)
4284{
4285 nic_t *sp = dev->priv;
4286 int orig_state = netif_running(sp->dev);
4287
4288 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4289 /* Offline Tests. */
20346722 4290 if (orig_state)
1da177e4 4291 s2io_close(sp->dev);
1da177e4
LT
4292
4293 if (s2io_register_test(sp, &data[0]))
4294 ethtest->flags |= ETH_TEST_FL_FAILED;
4295
4296 s2io_reset(sp);
1da177e4
LT
4297
4298 if (s2io_rldram_test(sp, &data[3]))
4299 ethtest->flags |= ETH_TEST_FL_FAILED;
4300
4301 s2io_reset(sp);
1da177e4
LT
4302
4303 if (s2io_eeprom_test(sp, &data[1]))
4304 ethtest->flags |= ETH_TEST_FL_FAILED;
4305
4306 if (s2io_bist_test(sp, &data[4]))
4307 ethtest->flags |= ETH_TEST_FL_FAILED;
4308
4309 if (orig_state)
4310 s2io_open(sp->dev);
4311
4312 data[2] = 0;
4313 } else {
4314 /* Online Tests. */
4315 if (!orig_state) {
4316 DBG_PRINT(ERR_DBG,
4317 "%s: is not up, cannot run test\n",
4318 dev->name);
4319 data[0] = -1;
4320 data[1] = -1;
4321 data[2] = -1;
4322 data[3] = -1;
4323 data[4] = -1;
4324 }
4325
4326 if (s2io_link_test(sp, &data[2]))
4327 ethtest->flags |= ETH_TEST_FL_FAILED;
4328
4329 data[0] = 0;
4330 data[1] = 0;
4331 data[3] = 0;
4332 data[4] = 0;
4333 }
4334}
4335
4336static void s2io_get_ethtool_stats(struct net_device *dev,
4337 struct ethtool_stats *estats,
4338 u64 * tmp_stats)
4339{
4340 int i = 0;
4341 nic_t *sp = dev->priv;
4342 StatInfo_t *stat_info = sp->mac_control.stats_info;
4343
7ba013ac 4344 s2io_updt_stats(sp);
541ae68f
K
4345 tmp_stats[i++] =
4346 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4347 le32_to_cpu(stat_info->tmac_frms);
4348 tmp_stats[i++] =
4349 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4350 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 4351 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
4352 tmp_stats[i++] =
4353 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4354 le32_to_cpu(stat_info->tmac_mcst_frms);
4355 tmp_stats[i++] =
4356 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4357 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 4358 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
541ae68f
K
4359 tmp_stats[i++] =
4360 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4361 le32_to_cpu(stat_info->tmac_any_err_frms);
1da177e4 4362 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
4363 tmp_stats[i++] =
4364 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4365 le32_to_cpu(stat_info->tmac_vld_ip);
4366 tmp_stats[i++] =
4367 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4368 le32_to_cpu(stat_info->tmac_drop_ip);
4369 tmp_stats[i++] =
4370 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4371 le32_to_cpu(stat_info->tmac_icmp);
4372 tmp_stats[i++] =
4373 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4374 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 4375 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
4376 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4377 le32_to_cpu(stat_info->tmac_udp);
4378 tmp_stats[i++] =
4379 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4380 le32_to_cpu(stat_info->rmac_vld_frms);
4381 tmp_stats[i++] =
4382 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4383 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
4384 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4385 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
4386 tmp_stats[i++] =
4387 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4388 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4389 tmp_stats[i++] =
4390 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4391 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4
LT
4392 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4393 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4394 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
541ae68f
K
4395 tmp_stats[i++] =
4396 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4397 le32_to_cpu(stat_info->rmac_discarded_frms);
4398 tmp_stats[i++] =
4399 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4400 le32_to_cpu(stat_info->rmac_usized_frms);
4401 tmp_stats[i++] =
4402 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4403 le32_to_cpu(stat_info->rmac_osized_frms);
4404 tmp_stats[i++] =
4405 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4406 le32_to_cpu(stat_info->rmac_frag_frms);
4407 tmp_stats[i++] =
4408 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4409 le32_to_cpu(stat_info->rmac_jabber_frms);
4410 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4411 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
4412 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4413 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
541ae68f
K
4414 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4415 le32_to_cpu(stat_info->rmac_drop_ip);
4416 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4417 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 4418 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
541ae68f
K
4419 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4420 le32_to_cpu(stat_info->rmac_udp);
4421 tmp_stats[i++] =
4422 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4423 le32_to_cpu(stat_info->rmac_err_drp_udp);
4424 tmp_stats[i++] =
4425 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4426 le32_to_cpu(stat_info->rmac_pause_cnt);
4427 tmp_stats[i++] =
4428 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4429 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 4430 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
7ba013ac
K
4431 tmp_stats[i++] = 0;
4432 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4433 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
1da177e4
LT
4434}
4435
20346722 4436int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
4437{
4438 return (XENA_REG_SPACE);
4439}
4440
4441
20346722 4442u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4
LT
4443{
4444 nic_t *sp = dev->priv;
4445
4446 return (sp->rx_csum);
4447}
20346722 4448int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4449{
4450 nic_t *sp = dev->priv;
4451
4452 if (data)
4453 sp->rx_csum = 1;
4454 else
4455 sp->rx_csum = 0;
4456
4457 return 0;
4458}
20346722 4459int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
4460{
4461 return (XENA_EEPROM_SPACE);
4462}
4463
20346722 4464int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
4465{
4466 return (S2IO_TEST_LEN);
4467}
20346722
K
4468void s2io_ethtool_get_strings(struct net_device *dev,
4469 u32 stringset, u8 * data)
1da177e4
LT
4470{
4471 switch (stringset) {
4472 case ETH_SS_TEST:
4473 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4474 break;
4475 case ETH_SS_STATS:
4476 memcpy(data, &ethtool_stats_keys,
4477 sizeof(ethtool_stats_keys));
4478 }
4479}
1da177e4
LT
4480static int s2io_ethtool_get_stats_count(struct net_device *dev)
4481{
4482 return (S2IO_STAT_LEN);
4483}
4484
20346722 4485int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4486{
4487 if (data)
4488 dev->features |= NETIF_F_IP_CSUM;
4489 else
4490 dev->features &= ~NETIF_F_IP_CSUM;
4491
4492 return 0;
4493}
4494
4495
4496static struct ethtool_ops netdev_ethtool_ops = {
4497 .get_settings = s2io_ethtool_gset,
4498 .set_settings = s2io_ethtool_sset,
4499 .get_drvinfo = s2io_ethtool_gdrvinfo,
4500 .get_regs_len = s2io_ethtool_get_regs_len,
4501 .get_regs = s2io_ethtool_gregs,
4502 .get_link = ethtool_op_get_link,
4503 .get_eeprom_len = s2io_get_eeprom_len,
4504 .get_eeprom = s2io_ethtool_geeprom,
4505 .set_eeprom = s2io_ethtool_seeprom,
4506 .get_pauseparam = s2io_ethtool_getpause_data,
4507 .set_pauseparam = s2io_ethtool_setpause_data,
4508 .get_rx_csum = s2io_ethtool_get_rx_csum,
4509 .set_rx_csum = s2io_ethtool_set_rx_csum,
4510 .get_tx_csum = ethtool_op_get_tx_csum,
4511 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4512 .get_sg = ethtool_op_get_sg,
4513 .set_sg = ethtool_op_set_sg,
4514#ifdef NETIF_F_TSO
4515 .get_tso = ethtool_op_get_tso,
4516 .set_tso = ethtool_op_set_tso,
4517#endif
4518 .self_test_count = s2io_ethtool_self_test_count,
4519 .self_test = s2io_ethtool_test,
4520 .get_strings = s2io_ethtool_get_strings,
4521 .phys_id = s2io_ethtool_idnic,
4522 .get_stats_count = s2io_ethtool_get_stats_count,
4523 .get_ethtool_stats = s2io_get_ethtool_stats
4524};
4525
4526/**
20346722 4527 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
4528 * @dev : Device pointer.
4529 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4530 * a proprietary structure used to pass information to the driver.
4531 * @cmd : This is used to distinguish between the different commands that
4532 * can be passed to the IOCTL functions.
4533 * Description:
20346722
K
4534 * Currently there are no special functionality supported in IOCTL, hence
4535 * function always return EOPNOTSUPPORTED
1da177e4
LT
4536 */
4537
20346722 4538int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
4539{
4540 return -EOPNOTSUPP;
4541}
4542
4543/**
4544 * s2io_change_mtu - entry point to change MTU size for the device.
4545 * @dev : device pointer.
4546 * @new_mtu : the new MTU size for the device.
4547 * Description: A driver entry point to change MTU size for the device.
4548 * Before changing the MTU the device must be stopped.
4549 * Return value:
4550 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4551 * file on failure.
4552 */
4553
20346722 4554int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4
LT
4555{
4556 nic_t *sp = dev->priv;
1da177e4
LT
4557
4558 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4559 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4560 dev->name);
4561 return -EPERM;
4562 }
4563
1da177e4 4564 dev->mtu = new_mtu;
d8892c6e
K
4565 if (netif_running(dev)) {
4566 s2io_card_down(sp);
4567 netif_stop_queue(dev);
4568 if (s2io_card_up(sp)) {
4569 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4570 __FUNCTION__);
4571 }
4572 if (netif_queue_stopped(dev))
4573 netif_wake_queue(dev);
4574 } else { /* Device is down */
4575 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4576 u64 val64 = new_mtu;
4577
4578 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4579 }
1da177e4
LT
4580
4581 return 0;
4582}
4583
4584/**
4585 * s2io_tasklet - Bottom half of the ISR.
4586 * @dev_adr : address of the device structure in dma_addr_t format.
4587 * Description:
4588 * This is the tasklet or the bottom half of the ISR. This is
20346722 4589 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 4590 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 4591 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
4592 * replenish the Rx buffers in the Rx buffer descriptors.
4593 * Return value:
4594 * void.
4595 */
4596
4597static void s2io_tasklet(unsigned long dev_addr)
4598{
4599 struct net_device *dev = (struct net_device *) dev_addr;
4600 nic_t *sp = dev->priv;
4601 int i, ret;
4602 mac_info_t *mac_control;
4603 struct config_param *config;
4604
4605 mac_control = &sp->mac_control;
4606 config = &sp->config;
4607
4608 if (!TASKLET_IN_USE) {
4609 for (i = 0; i < config->rx_ring_num; i++) {
4610 ret = fill_rx_buffers(sp, i);
4611 if (ret == -ENOMEM) {
4612 DBG_PRINT(ERR_DBG, "%s: Out of ",
4613 dev->name);
4614 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4615 break;
4616 } else if (ret == -EFILL) {
4617 DBG_PRINT(ERR_DBG,
4618 "%s: Rx Ring %d is full\n",
4619 dev->name, i);
4620 break;
4621 }
4622 }
4623 clear_bit(0, (&sp->tasklet_status));
4624 }
4625}
4626
4627/**
4628 * s2io_set_link - Set the LInk status
4629 * @data: long pointer to device private structue
4630 * Description: Sets the link status for the adapter
4631 */
4632
4633static void s2io_set_link(unsigned long data)
4634{
4635 nic_t *nic = (nic_t *) data;
4636 struct net_device *dev = nic->dev;
4637 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4638 register u64 val64;
4639 u16 subid;
4640
4641 if (test_and_set_bit(0, &(nic->link_state))) {
4642 /* The card is being reset, no point doing anything */
4643 return;
4644 }
4645
4646 subid = nic->pdev->subsystem_device;
20346722
K
4647 /*
4648 * Allow a small delay for the NICs self initiated
1da177e4
LT
4649 * cleanup to complete.
4650 */
4651 msleep(100);
4652
4653 val64 = readq(&bar0->adapter_status);
20346722 4654 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
4655 if (LINK_IS_UP(val64)) {
4656 val64 = readq(&bar0->adapter_control);
4657 val64 |= ADAPTER_CNTL_EN;
4658 writeq(val64, &bar0->adapter_control);
541ae68f
K
4659 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4660 subid)) {
1da177e4
LT
4661 val64 = readq(&bar0->gpio_control);
4662 val64 |= GPIO_CTRL_GPIO_0;
4663 writeq(val64, &bar0->gpio_control);
4664 val64 = readq(&bar0->gpio_control);
4665 } else {
4666 val64 |= ADAPTER_LED_ON;
4667 writeq(val64, &bar0->adapter_control);
4668 }
4669 val64 = readq(&bar0->adapter_status);
4670 if (!LINK_IS_UP(val64)) {
4671 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4672 DBG_PRINT(ERR_DBG, " Link down");
4673 DBG_PRINT(ERR_DBG, "after ");
4674 DBG_PRINT(ERR_DBG, "enabling ");
4675 DBG_PRINT(ERR_DBG, "device \n");
4676 }
4677 if (nic->device_enabled_once == FALSE) {
4678 nic->device_enabled_once = TRUE;
4679 }
4680 s2io_link(nic, LINK_UP);
4681 } else {
541ae68f
K
4682 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4683 subid)) {
1da177e4
LT
4684 val64 = readq(&bar0->gpio_control);
4685 val64 &= ~GPIO_CTRL_GPIO_0;
4686 writeq(val64, &bar0->gpio_control);
4687 val64 = readq(&bar0->gpio_control);
4688 }
4689 s2io_link(nic, LINK_DOWN);
4690 }
4691 } else { /* NIC is not Quiescent. */
4692 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4693 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4694 netif_stop_queue(dev);
4695 }
4696 clear_bit(0, &(nic->link_state));
4697}
4698
4699static void s2io_card_down(nic_t * sp)
4700{
4701 int cnt = 0;
4702 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4703 unsigned long flags;
4704 register u64 val64 = 0;
4705
25fff88e 4706 del_timer_sync(&sp->alarm_timer);
1da177e4 4707 /* If s2io_set_link task is executing, wait till it completes. */
20346722 4708 while (test_and_set_bit(0, &(sp->link_state))) {
1da177e4 4709 msleep(50);
20346722 4710 }
1da177e4
LT
4711 atomic_set(&sp->card_state, CARD_DOWN);
4712
4713 /* disable Tx and Rx traffic on the NIC */
4714 stop_nic(sp);
4715
4716 /* Kill tasklet. */
4717 tasklet_kill(&sp->task);
4718
4719 /* Check if the device is Quiescent and then Reset the NIC */
4720 do {
4721 val64 = readq(&bar0->adapter_status);
20346722 4722 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
1da177e4
LT
4723 break;
4724 }
4725
4726 msleep(50);
4727 cnt++;
4728 if (cnt == 10) {
4729 DBG_PRINT(ERR_DBG,
4730 "s2io_close:Device not Quiescent ");
4731 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4732 (unsigned long long) val64);
4733 break;
4734 }
4735 } while (1);
1da177e4
LT
4736 s2io_reset(sp);
4737
7ba013ac
K
4738 /* Waiting till all Interrupt handlers are complete */
4739 cnt = 0;
4740 do {
4741 msleep(10);
4742 if (!atomic_read(&sp->isr_cnt))
4743 break;
4744 cnt++;
4745 } while(cnt < 5);
4746
4747 spin_lock_irqsave(&sp->tx_lock, flags);
4748 /* Free all Tx buffers */
1da177e4 4749 free_tx_buffers(sp);
7ba013ac
K
4750 spin_unlock_irqrestore(&sp->tx_lock, flags);
4751
4752 /* Free all Rx buffers */
4753 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 4754 free_rx_buffers(sp);
7ba013ac 4755 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 4756
1da177e4
LT
4757 clear_bit(0, &(sp->link_state));
4758}
4759
4760static int s2io_card_up(nic_t * sp)
4761{
4762 int i, ret;
4763 mac_info_t *mac_control;
4764 struct config_param *config;
4765 struct net_device *dev = (struct net_device *) sp->dev;
4766
4767 /* Initialize the H/W I/O registers */
4768 if (init_nic(sp) != 0) {
4769 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4770 dev->name);
4771 return -ENODEV;
4772 }
4773
20346722
K
4774 /*
4775 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
4776 * Rx ring and initializing buffers into 30 Rx blocks
4777 */
4778 mac_control = &sp->mac_control;
4779 config = &sp->config;
4780
4781 for (i = 0; i < config->rx_ring_num; i++) {
4782 if ((ret = fill_rx_buffers(sp, i))) {
4783 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4784 dev->name);
4785 s2io_reset(sp);
4786 free_rx_buffers(sp);
4787 return -ENOMEM;
4788 }
4789 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4790 atomic_read(&sp->rx_bufs_left[i]));
4791 }
4792
4793 /* Setting its receive mode */
4794 s2io_set_multicast(dev);
4795
4796 /* Enable tasklet for the device */
4797 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4798
4799 /* Enable Rx Traffic and interrupts on the NIC */
4800 if (start_nic(sp)) {
4801 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4802 tasklet_kill(&sp->task);
4803 s2io_reset(sp);
4804 free_irq(dev->irq, dev);
4805 free_rx_buffers(sp);
4806 return -ENODEV;
4807 }
4808
25fff88e
K
4809 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4810
1da177e4
LT
4811 atomic_set(&sp->card_state, CARD_UP);
4812 return 0;
4813}
4814
20346722 4815/**
1da177e4
LT
4816 * s2io_restart_nic - Resets the NIC.
4817 * @data : long pointer to the device private structure
4818 * Description:
4819 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 4820 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
4821 * the run time of the watch dog routine which is run holding a
4822 * spin lock.
4823 */
4824
4825static void s2io_restart_nic(unsigned long data)
4826{
4827 struct net_device *dev = (struct net_device *) data;
4828 nic_t *sp = dev->priv;
4829
4830 s2io_card_down(sp);
4831 if (s2io_card_up(sp)) {
4832 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4833 dev->name);
4834 }
4835 netif_wake_queue(dev);
4836 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4837 dev->name);
20346722 4838
1da177e4
LT
4839}
4840
20346722
K
4841/**
4842 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
4843 * @dev : Pointer to net device structure
4844 * Description:
4845 * This function is triggered if the Tx Queue is stopped
4846 * for a pre-defined amount of time when the Interface is still up.
4847 * If the Interface is jammed in such a situation, the hardware is
4848 * reset (by s2io_close) and restarted again (by s2io_open) to
4849 * overcome any problem that might have been caused in the hardware.
4850 * Return value:
4851 * void
4852 */
4853
4854static void s2io_tx_watchdog(struct net_device *dev)
4855{
4856 nic_t *sp = dev->priv;
4857
4858 if (netif_carrier_ok(dev)) {
4859 schedule_work(&sp->rst_timer_task);
4860 }
4861}
4862
4863/**
4864 * rx_osm_handler - To perform some OS related operations on SKB.
4865 * @sp: private member of the device structure,pointer to s2io_nic structure.
4866 * @skb : the socket buffer pointer.
4867 * @len : length of the packet
4868 * @cksum : FCS checksum of the frame.
4869 * @ring_no : the ring from which this RxD was extracted.
20346722 4870 * Description:
1da177e4
LT
4871 * This function is called by the Tx interrupt serivce routine to perform
4872 * some OS related operations on the SKB before passing it to the upper
4873 * layers. It mainly checks if the checksum is OK, if so adds it to the
4874 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4875 * to the upper layer. If the checksum is wrong, it increments the Rx
4876 * packet error count, frees the SKB and returns error.
4877 * Return value:
4878 * SUCCESS on success and -1 on failure.
4879 */
20346722 4880static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
1da177e4 4881{
20346722 4882 nic_t *sp = ring_data->nic;
1da177e4 4883 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
4884 struct sk_buff *skb = (struct sk_buff *)
4885 ((unsigned long) rxdp->Host_Control);
4886 int ring_no = ring_data->ring_no;
1da177e4
LT
4887 u16 l3_csum, l4_csum;
4888#ifdef CONFIG_2BUFF_MODE
20346722
K
4889 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4890 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4891 int get_block = ring_data->rx_curr_get_info.block_index;
4892 int get_off = ring_data->rx_curr_get_info.offset;
4893 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
1da177e4 4894 unsigned char *buff;
20346722
K
4895#else
4896 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
1da177e4 4897#endif
20346722
K
4898 skb->dev = dev;
4899 if (rxdp->Control_1 & RXD_T_CODE) {
4900 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4901 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4902 dev->name, err);
1ddc50d4
K
4903 dev_kfree_skb(skb);
4904 sp->stats.rx_crc_errors++;
4905 atomic_dec(&sp->rx_bufs_left[ring_no]);
4906 rxdp->Host_Control = 0;
4907 return 0;
20346722 4908 }
1da177e4 4909
20346722
K
4910 /* Updating statistics */
4911 rxdp->Host_Control = 0;
4912 sp->rx_pkt_count++;
4913 sp->stats.rx_packets++;
4914#ifndef CONFIG_2BUFF_MODE
4915 sp->stats.rx_bytes += len;
4916#else
4917 sp->stats.rx_bytes += buf0_len + buf2_len;
4918#endif
4919
4920#ifndef CONFIG_2BUFF_MODE
4921 skb_put(skb, len);
4922#else
4923 buff = skb_push(skb, buf0_len);
4924 memcpy(buff, ba->ba_0, buf0_len);
4925 skb_put(skb, buf2_len);
4926#endif
4927
4928 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4929 (sp->rx_csum)) {
4930 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
4931 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4932 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 4933 /*
1da177e4
LT
4934 * NIC verifies if the Checksum of the received
4935 * frame is Ok or not and accordingly returns
4936 * a flag in the RxD.
4937 */
4938 skb->ip_summed = CHECKSUM_UNNECESSARY;
4939 } else {
20346722
K
4940 /*
4941 * Packet with erroneous checksum, let the
1da177e4
LT
4942 * upper layers deal with it.
4943 */
4944 skb->ip_summed = CHECKSUM_NONE;
4945 }
4946 } else {
4947 skb->ip_summed = CHECKSUM_NONE;
4948 }
4949
1da177e4 4950 skb->protocol = eth_type_trans(skb, dev);
1da177e4 4951#ifdef CONFIG_S2IO_NAPI
be3a6b02
K
4952 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4953 /* Queueing the vlan frame to the upper layer */
4954 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
4955 RXD_GET_VLAN_TAG(rxdp->Control_2));
4956 } else {
4957 netif_receive_skb(skb);
4958 }
1da177e4 4959#else
be3a6b02
K
4960 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4961 /* Queueing the vlan frame to the upper layer */
4962 vlan_hwaccel_rx(skb, sp->vlgrp,
4963 RXD_GET_VLAN_TAG(rxdp->Control_2));
4964 } else {
4965 netif_rx(skb);
4966 }
1da177e4 4967#endif
1da177e4 4968 dev->last_rx = jiffies;
1da177e4 4969 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
4970 return SUCCESS;
4971}
4972
4973/**
4974 * s2io_link - stops/starts the Tx queue.
4975 * @sp : private member of the device structure, which is a pointer to the
4976 * s2io_nic structure.
4977 * @link : inidicates whether link is UP/DOWN.
4978 * Description:
4979 * This function stops/starts the Tx queue depending on whether the link
20346722
K
4980 * status of the NIC is is down or up. This is called by the Alarm
4981 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
4982 * Return value:
4983 * void.
4984 */
4985
20346722 4986void s2io_link(nic_t * sp, int link)
1da177e4
LT
4987{
4988 struct net_device *dev = (struct net_device *) sp->dev;
4989
4990 if (link != sp->last_link_state) {
4991 if (link == LINK_DOWN) {
4992 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4993 netif_carrier_off(dev);
4994 } else {
4995 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4996 netif_carrier_on(dev);
4997 }
4998 }
4999 sp->last_link_state = link;
5000}
5001
5002/**
20346722
K
5003 * get_xena_rev_id - to identify revision ID of xena.
5004 * @pdev : PCI Dev structure
5005 * Description:
5006 * Function to identify the Revision ID of xena.
5007 * Return value:
5008 * returns the revision ID of the device.
5009 */
5010
5011int get_xena_rev_id(struct pci_dev *pdev)
5012{
5013 u8 id = 0;
5014 int ret;
5015 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5016 return id;
5017}
5018
5019/**
5020 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5021 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5022 * s2io_nic structure.
5023 * Description:
5024 * This function initializes a few of the PCI and PCI-X configuration registers
5025 * with recommended values.
5026 * Return value:
5027 * void
5028 */
5029
5030static void s2io_init_pci(nic_t * sp)
5031{
20346722 5032 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
5033
5034 /* Enable Data Parity Error Recovery in PCI-X command register. */
5035 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5036 &(pcix_cmd));
1da177e4 5037 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5038 (pcix_cmd | 1));
1da177e4 5039 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5040 &(pcix_cmd));
1da177e4
LT
5041
5042 /* Set the PErr Response bit in PCI command register. */
5043 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5044 pci_write_config_word(sp->pdev, PCI_COMMAND,
5045 (pci_cmd | PCI_COMMAND_PARITY));
5046 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5047
1da177e4 5048 /* Forcibly disabling relaxed ordering capability of the card. */
20346722 5049 pcix_cmd &= 0xfffd;
1da177e4 5050 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5051 pcix_cmd);
1da177e4 5052 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5053 &(pcix_cmd));
1da177e4
LT
5054}
5055
5056MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5057MODULE_LICENSE("GPL");
5058module_param(tx_fifo_num, int, 0);
1da177e4 5059module_param(rx_ring_num, int, 0);
20346722
K
5060module_param_array(tx_fifo_len, uint, NULL, 0);
5061module_param_array(rx_ring_sz, uint, NULL, 0);
20346722 5062module_param_array(rts_frm_len, uint, NULL, 0);
5e25b9dd 5063module_param(use_continuous_tx_intrs, int, 1);
1da177e4
LT
5064module_param(rmac_pause_time, int, 0);
5065module_param(mc_pause_threshold_q0q3, int, 0);
5066module_param(mc_pause_threshold_q4q7, int, 0);
5067module_param(shared_splits, int, 0);
5068module_param(tmac_util_period, int, 0);
5069module_param(rmac_util_period, int, 0);
b6e3f982 5070module_param(bimodal, bool, 0);
1da177e4
LT
5071#ifndef CONFIG_S2IO_NAPI
5072module_param(indicate_max_pkts, int, 0);
5073#endif
20346722 5074
1da177e4 5075/**
20346722 5076 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
5077 * @pdev : structure containing the PCI related information of the device.
5078 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5079 * Description:
5080 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
5081 * All OS related initialization including memory and device structure and
5082 * initlaization of the device private variable is done. Also the swapper
5083 * control register is initialized to enable read and write into the I/O
1da177e4
LT
5084 * registers of the device.
5085 * Return value:
5086 * returns 0 on success and negative on failure.
5087 */
5088
5089static int __devinit
5090s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5091{
5092 nic_t *sp;
5093 struct net_device *dev;
1da177e4
LT
5094 int i, j, ret;
5095 int dma_flag = FALSE;
5096 u32 mac_up, mac_down;
5097 u64 val64 = 0, tmp64 = 0;
5098 XENA_dev_config_t __iomem *bar0 = NULL;
5099 u16 subid;
5100 mac_info_t *mac_control;
5101 struct config_param *config;
541ae68f 5102 int mode;
1da177e4 5103
20346722
K
5104#ifdef CONFIG_S2IO_NAPI
5105 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5106#endif
1da177e4
LT
5107
5108 if ((ret = pci_enable_device(pdev))) {
5109 DBG_PRINT(ERR_DBG,
5110 "s2io_init_nic: pci_enable_device failed\n");
5111 return ret;
5112 }
5113
1e7f0bd8 5114 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5115 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5116 dma_flag = TRUE;
1da177e4 5117 if (pci_set_consistent_dma_mask
1e7f0bd8 5118 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5119 DBG_PRINT(ERR_DBG,
5120 "Unable to obtain 64bit DMA for \
5121 consistent allocations\n");
5122 pci_disable_device(pdev);
5123 return -ENOMEM;
5124 }
1e7f0bd8 5125 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
5126 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5127 } else {
5128 pci_disable_device(pdev);
5129 return -ENOMEM;
5130 }
5131
5132 if (pci_request_regions(pdev, s2io_driver_name)) {
5133 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5134 pci_disable_device(pdev);
5135 return -ENODEV;
5136 }
5137
5138 dev = alloc_etherdev(sizeof(nic_t));
5139 if (dev == NULL) {
5140 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5141 pci_disable_device(pdev);
5142 pci_release_regions(pdev);
5143 return -ENODEV;
5144 }
5145
5146 pci_set_master(pdev);
5147 pci_set_drvdata(pdev, dev);
5148 SET_MODULE_OWNER(dev);
5149 SET_NETDEV_DEV(dev, &pdev->dev);
5150
5151 /* Private member variable initialized to s2io NIC structure */
5152 sp = dev->priv;
5153 memset(sp, 0, sizeof(nic_t));
5154 sp->dev = dev;
5155 sp->pdev = pdev;
1da177e4 5156 sp->high_dma_flag = dma_flag;
1da177e4 5157 sp->device_enabled_once = FALSE;
1da177e4 5158
541ae68f
K
5159 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5160 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5161 sp->device_type = XFRAME_II_DEVICE;
5162 else
5163 sp->device_type = XFRAME_I_DEVICE;
5164
1da177e4
LT
5165 /* Initialize some PCI/PCI-X fields of the NIC. */
5166 s2io_init_pci(sp);
5167
20346722 5168 /*
1da177e4 5169 * Setting the device configuration parameters.
20346722
K
5170 * Most of these parameters can be specified by the user during
5171 * module insertion as they are module loadable parameters. If
5172 * these parameters are not not specified during load time, they
1da177e4
LT
5173 * are initialized with default values.
5174 */
5175 mac_control = &sp->mac_control;
5176 config = &sp->config;
5177
5178 /* Tx side parameters. */
5179 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5180 config->tx_fifo_num = tx_fifo_num;
5181 for (i = 0; i < MAX_TX_FIFOS; i++) {
5182 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5183 config->tx_cfg[i].fifo_priority = i;
5184 }
5185
20346722
K
5186 /* mapping the QoS priority to the configured fifos */
5187 for (i = 0; i < MAX_TX_FIFOS; i++)
5188 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5189
1da177e4
LT
5190 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5191 for (i = 0; i < config->tx_fifo_num; i++) {
5192 config->tx_cfg[i].f_no_snoop =
5193 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5194 if (config->tx_cfg[i].fifo_len < 65) {
5195 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5196 break;
5197 }
5198 }
5199 config->max_txds = MAX_SKB_FRAGS;
5200
5201 /* Rx side parameters. */
5202 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5203 config->rx_ring_num = rx_ring_num;
5204 for (i = 0; i < MAX_RX_RINGS; i++) {
5205 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5206 (MAX_RXDS_PER_BLOCK + 1);
5207 config->rx_cfg[i].ring_priority = i;
5208 }
5209
5210 for (i = 0; i < rx_ring_num; i++) {
5211 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5212 config->rx_cfg[i].f_no_snoop =
5213 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5214 }
5215
5216 /* Setting Mac Control parameters */
5217 mac_control->rmac_pause_time = rmac_pause_time;
5218 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5219 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5220
5221
5222 /* Initialize Ring buffer parameters. */
5223 for (i = 0; i < config->rx_ring_num; i++)
5224 atomic_set(&sp->rx_bufs_left[i], 0);
5225
7ba013ac
K
5226 /* Initialize the number of ISRs currently running */
5227 atomic_set(&sp->isr_cnt, 0);
5228
1da177e4
LT
5229 /* initialize the shared memory used by the NIC and the host */
5230 if (init_shared_mem(sp)) {
5231 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5232 dev->name);
5233 ret = -ENOMEM;
5234 goto mem_alloc_failed;
5235 }
5236
5237 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5238 pci_resource_len(pdev, 0));
5239 if (!sp->bar0) {
5240 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5241 dev->name);
5242 ret = -ENOMEM;
5243 goto bar0_remap_failed;
5244 }
5245
5246 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5247 pci_resource_len(pdev, 2));
5248 if (!sp->bar1) {
5249 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5250 dev->name);
5251 ret = -ENOMEM;
5252 goto bar1_remap_failed;
5253 }
5254
5255 dev->irq = pdev->irq;
5256 dev->base_addr = (unsigned long) sp->bar0;
5257
5258 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5259 for (j = 0; j < MAX_TX_FIFOS; j++) {
5260 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5261 (sp->bar1 + (j * 0x00020000));
5262 }
5263
5264 /* Driver entry points */
5265 dev->open = &s2io_open;
5266 dev->stop = &s2io_close;
5267 dev->hard_start_xmit = &s2io_xmit;
5268 dev->get_stats = &s2io_get_stats;
5269 dev->set_multicast_list = &s2io_set_multicast;
5270 dev->do_ioctl = &s2io_ioctl;
5271 dev->change_mtu = &s2io_change_mtu;
5272 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
5273 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5274 dev->vlan_rx_register = s2io_vlan_rx_register;
5275 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 5276
1da177e4
LT
5277 /*
5278 * will use eth_mac_addr() for dev->set_mac_address
5279 * mac address will be set every time dev->open() is called
5280 */
20346722 5281#if defined(CONFIG_S2IO_NAPI)
1da177e4 5282 dev->poll = s2io_poll;
20346722 5283 dev->weight = 32;
1da177e4
LT
5284#endif
5285
5286 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5287 if (sp->high_dma_flag == TRUE)
5288 dev->features |= NETIF_F_HIGHDMA;
5289#ifdef NETIF_F_TSO
5290 dev->features |= NETIF_F_TSO;
5291#endif
5292
5293 dev->tx_timeout = &s2io_tx_watchdog;
5294 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5295 INIT_WORK(&sp->rst_timer_task,
5296 (void (*)(void *)) s2io_restart_nic, dev);
5297 INIT_WORK(&sp->set_link_task,
5298 (void (*)(void *)) s2io_set_link, sp);
5299
541ae68f
K
5300 if (!(sp->device_type & XFRAME_II_DEVICE)) {
5301 pci_save_state(sp->pdev);
5302 }
1da177e4
LT
5303
5304 /* Setting swapper control on the NIC, for proper reset operation */
5305 if (s2io_set_swapper(sp)) {
5306 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5307 dev->name);
5308 ret = -EAGAIN;
5309 goto set_swap_failed;
5310 }
5311
541ae68f
K
5312 /* Verify if the Herc works on the slot its placed into */
5313 if (sp->device_type & XFRAME_II_DEVICE) {
5314 mode = s2io_verify_pci_mode(sp);
5315 if (mode < 0) {
5316 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5317 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5318 ret = -EBADSLT;
5319 goto set_swap_failed;
5320 }
5321 }
5322
5323 /* Not needed for Herc */
5324 if (sp->device_type & XFRAME_I_DEVICE) {
5325 /*
5326 * Fix for all "FFs" MAC address problems observed on
5327 * Alpha platforms
5328 */
5329 fix_mac_address(sp);
5330 s2io_reset(sp);
5331 }
1da177e4
LT
5332
5333 /*
1da177e4
LT
5334 * MAC address initialization.
5335 * For now only one mac address will be read and used.
5336 */
5337 bar0 = sp->bar0;
5338 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5339 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5340 writeq(val64, &bar0->rmac_addr_cmd_mem);
5341 wait_for_cmd_complete(sp);
5342
5343 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5344 mac_down = (u32) tmp64;
5345 mac_up = (u32) (tmp64 >> 32);
5346
5347 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5348
5349 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5350 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5351 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5352 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5353 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5354 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5355
1da177e4
LT
5356 /* Set the factory defined MAC address initially */
5357 dev->addr_len = ETH_ALEN;
5358 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5359
5360 /*
20346722 5361 * Initialize the tasklet status and link state flags
541ae68f 5362 * and the card state parameter
1da177e4
LT
5363 */
5364 atomic_set(&(sp->card_state), 0);
5365 sp->tasklet_status = 0;
5366 sp->link_state = 0;
5367
1da177e4
LT
5368 /* Initialize spinlocks */
5369 spin_lock_init(&sp->tx_lock);
5370#ifndef CONFIG_S2IO_NAPI
5371 spin_lock_init(&sp->put_lock);
5372#endif
7ba013ac 5373 spin_lock_init(&sp->rx_lock);
1da177e4 5374
20346722
K
5375 /*
5376 * SXE-002: Configure link and activity LED to init state
5377 * on driver load.
1da177e4
LT
5378 */
5379 subid = sp->pdev->subsystem_device;
5380 if ((subid & 0xFF) >= 0x07) {
5381 val64 = readq(&bar0->gpio_control);
5382 val64 |= 0x0000800000000000ULL;
5383 writeq(val64, &bar0->gpio_control);
5384 val64 = 0x0411040400000000ULL;
5385 writeq(val64, (void __iomem *) bar0 + 0x2700);
5386 val64 = readq(&bar0->gpio_control);
5387 }
5388
5389 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5390
5391 if (register_netdev(dev)) {
5392 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5393 ret = -ENODEV;
5394 goto register_failed;
5395 }
5396
541ae68f
K
5397 if (sp->device_type & XFRAME_II_DEVICE) {
5398 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5399 dev->name);
5400 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5401 get_xena_rev_id(sp->pdev),
5402 s2io_driver_version);
5403 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5404 sp->def_mac_addr[0].mac_addr[0],
5405 sp->def_mac_addr[0].mac_addr[1],
5406 sp->def_mac_addr[0].mac_addr[2],
5407 sp->def_mac_addr[0].mac_addr[3],
5408 sp->def_mac_addr[0].mac_addr[4],
5409 sp->def_mac_addr[0].mac_addr[5]);
5410 int mode = s2io_print_pci_mode(sp);
5411 if (mode < 0) {
5412 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5413 ret = -EBADSLT;
5414 goto set_swap_failed;
5415 }
5416 } else {
5417 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5418 dev->name);
5419 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5420 get_xena_rev_id(sp->pdev),
5421 s2io_driver_version);
5422 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5423 sp->def_mac_addr[0].mac_addr[0],
5424 sp->def_mac_addr[0].mac_addr[1],
5425 sp->def_mac_addr[0].mac_addr[2],
5426 sp->def_mac_addr[0].mac_addr[3],
5427 sp->def_mac_addr[0].mac_addr[4],
5428 sp->def_mac_addr[0].mac_addr[5]);
5429 }
5430
7ba013ac
K
5431 /* Initialize device name */
5432 strcpy(sp->name, dev->name);
541ae68f
K
5433 if (sp->device_type & XFRAME_II_DEVICE)
5434 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5435 else
5436 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
7ba013ac 5437
b6e3f982
K
5438 /* Initialize bimodal Interrupts */
5439 sp->config.bimodal = bimodal;
5440 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5441 sp->config.bimodal = 0;
5442 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5443 dev->name);
5444 }
5445
20346722
K
5446 /*
5447 * Make Link state as off at this point, when the Link change
5448 * interrupt comes the state will be automatically changed to
1da177e4
LT
5449 * the right state.
5450 */
5451 netif_carrier_off(dev);
1da177e4
LT
5452
5453 return 0;
5454
5455 register_failed:
5456 set_swap_failed:
5457 iounmap(sp->bar1);
5458 bar1_remap_failed:
5459 iounmap(sp->bar0);
5460 bar0_remap_failed:
5461 mem_alloc_failed:
5462 free_shared_mem(sp);
5463 pci_disable_device(pdev);
5464 pci_release_regions(pdev);
5465 pci_set_drvdata(pdev, NULL);
5466 free_netdev(dev);
5467
5468 return ret;
5469}
5470
5471/**
20346722 5472 * s2io_rem_nic - Free the PCI device
1da177e4 5473 * @pdev: structure containing the PCI related information of the device.
20346722 5474 * Description: This function is called by the Pci subsystem to release a
1da177e4 5475 * PCI device and free up all resource held up by the device. This could
20346722 5476 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
5477 * from memory.
5478 */
5479
5480static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5481{
5482 struct net_device *dev =
5483 (struct net_device *) pci_get_drvdata(pdev);
5484 nic_t *sp;
5485
5486 if (dev == NULL) {
5487 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5488 return;
5489 }
5490
5491 sp = dev->priv;
5492 unregister_netdev(dev);
5493
5494 free_shared_mem(sp);
5495 iounmap(sp->bar0);
5496 iounmap(sp->bar1);
5497 pci_disable_device(pdev);
5498 pci_release_regions(pdev);
5499 pci_set_drvdata(pdev, NULL);
1da177e4
LT
5500 free_netdev(dev);
5501}
5502
5503/**
5504 * s2io_starter - Entry point for the driver
5505 * Description: This function is the entry point for the driver. It verifies
5506 * the module loadable parameters and initializes PCI configuration space.
5507 */
5508
5509int __init s2io_starter(void)
5510{
5511 return pci_module_init(&s2io_driver);
5512}
5513
5514/**
20346722 5515 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
5516 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5517 */
5518
20346722 5519void s2io_closer(void)
1da177e4
LT
5520{
5521 pci_unregister_driver(&s2io_driver);
5522 DBG_PRINT(INIT_DBG, "cleanup done\n");
5523}
5524
5525module_init(s2io_starter);
5526module_exit(s2io_closer);