[PATCH] irq-flags: drivers/net: Use the new IRQF_ constants
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
1da177e4
LT
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8
AR
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7
AR
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
1da177e4
LT
45 ************************************************************************/
46
1da177e4
LT
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/ioport.h>
51#include <linux/pci.h>
1e7f0bd8 52#include <linux/dma-mapping.h>
1da177e4
LT
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/stddef.h>
60#include <linux/ioctl.h>
61#include <linux/timex.h>
62#include <linux/sched.h>
63#include <linux/ethtool.h>
1da177e4 64#include <linux/workqueue.h>
be3a6b02 65#include <linux/if_vlan.h>
7d3d0439
RA
66#include <linux/ip.h>
67#include <linux/tcp.h>
68#include <net/tcp.h>
1da177e4 69
1da177e4
LT
70#include <asm/system.h>
71#include <asm/uaccess.h>
20346722 72#include <asm/io.h>
fe931395 73#include <asm/div64.h>
1da177e4
LT
74
75/* local include */
76#include "s2io.h"
77#include "s2io-regs.h"
78
5d3213cc 79#define DRV_VERSION "2.0.14.2"
6c1792f4 80
1da177e4 81/* S2io Driver name & version. */
20346722 82static char s2io_driver_name[] = "Neterion";
6c1792f4 83static char s2io_driver_version[] = DRV_VERSION;
1da177e4 84
26df54bf
AB
85static int rxd_size[4] = {32,48,48,64};
86static int rxd_count[4] = {127,85,85,63};
da6971d8 87
5e25b9dd
K
88static inline int RXD_IS_UP2DT(RxD_t *rxdp)
89{
90 int ret;
91
92 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
93 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
94
95 return ret;
96}
97
20346722 98/*
1da177e4
LT
99 * Cards with following subsystem_id have a link state indication
100 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
101 * macro below identifies these cards given the subsystem_id.
102 */
541ae68f
K
103#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
104 (dev_type == XFRAME_I_DEVICE) ? \
105 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
106 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
107
108#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
109 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
110#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
111#define PANIC 1
112#define LOW 2
113static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
114{
20346722
K
115 mac_info_t *mac_control;
116
117 mac_control = &sp->mac_control;
863c11a9
AR
118 if (rxb_size <= rxd_count[sp->rxd_mode])
119 return PANIC;
120 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
121 return LOW;
122 return 0;
1da177e4
LT
123}
124
125/* Ethtool related variables and Macros. */
126static char s2io_gstrings[][ETH_GSTRING_LEN] = {
127 "Register test\t(offline)",
128 "Eeprom test\t(offline)",
129 "Link test\t(online)",
130 "RLDRAM test\t(offline)",
131 "BIST Test\t(offline)"
132};
133
134static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
135 {"tmac_frms"},
136 {"tmac_data_octets"},
137 {"tmac_drop_frms"},
138 {"tmac_mcst_frms"},
139 {"tmac_bcst_frms"},
140 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
141 {"tmac_ttl_octets"},
142 {"tmac_ucst_frms"},
143 {"tmac_nucst_frms"},
1da177e4 144 {"tmac_any_err_frms"},
bd1034f0 145 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
146 {"tmac_vld_ip_octets"},
147 {"tmac_vld_ip"},
148 {"tmac_drop_ip"},
149 {"tmac_icmp"},
150 {"tmac_rst_tcp"},
151 {"tmac_tcp"},
152 {"tmac_udp"},
153 {"rmac_vld_frms"},
154 {"rmac_data_octets"},
155 {"rmac_fcs_err_frms"},
156 {"rmac_drop_frms"},
157 {"rmac_vld_mcst_frms"},
158 {"rmac_vld_bcst_frms"},
159 {"rmac_in_rng_len_err_frms"},
bd1034f0 160 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
161 {"rmac_long_frms"},
162 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
163 {"rmac_unsup_ctrl_frms"},
164 {"rmac_ttl_octets"},
165 {"rmac_accepted_ucst_frms"},
166 {"rmac_accepted_nucst_frms"},
1da177e4 167 {"rmac_discarded_frms"},
bd1034f0
AR
168 {"rmac_drop_events"},
169 {"rmac_ttl_less_fb_octets"},
170 {"rmac_ttl_frms"},
1da177e4
LT
171 {"rmac_usized_frms"},
172 {"rmac_osized_frms"},
173 {"rmac_frag_frms"},
174 {"rmac_jabber_frms"},
bd1034f0
AR
175 {"rmac_ttl_64_frms"},
176 {"rmac_ttl_65_127_frms"},
177 {"rmac_ttl_128_255_frms"},
178 {"rmac_ttl_256_511_frms"},
179 {"rmac_ttl_512_1023_frms"},
180 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
181 {"rmac_ip"},
182 {"rmac_ip_octets"},
183 {"rmac_hdr_err_ip"},
184 {"rmac_drop_ip"},
185 {"rmac_icmp"},
186 {"rmac_tcp"},
187 {"rmac_udp"},
188 {"rmac_err_drp_udp"},
bd1034f0
AR
189 {"rmac_xgmii_err_sym"},
190 {"rmac_frms_q0"},
191 {"rmac_frms_q1"},
192 {"rmac_frms_q2"},
193 {"rmac_frms_q3"},
194 {"rmac_frms_q4"},
195 {"rmac_frms_q5"},
196 {"rmac_frms_q6"},
197 {"rmac_frms_q7"},
198 {"rmac_full_q0"},
199 {"rmac_full_q1"},
200 {"rmac_full_q2"},
201 {"rmac_full_q3"},
202 {"rmac_full_q4"},
203 {"rmac_full_q5"},
204 {"rmac_full_q6"},
205 {"rmac_full_q7"},
1da177e4 206 {"rmac_pause_cnt"},
bd1034f0
AR
207 {"rmac_xgmii_data_err_cnt"},
208 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
209 {"rmac_accepted_ip"},
210 {"rmac_err_tcp"},
bd1034f0
AR
211 {"rd_req_cnt"},
212 {"new_rd_req_cnt"},
213 {"new_rd_req_rtry_cnt"},
214 {"rd_rtry_cnt"},
215 {"wr_rtry_rd_ack_cnt"},
216 {"wr_req_cnt"},
217 {"new_wr_req_cnt"},
218 {"new_wr_req_rtry_cnt"},
219 {"wr_rtry_cnt"},
220 {"wr_disc_cnt"},
221 {"rd_rtry_wr_ack_cnt"},
222 {"txp_wr_cnt"},
223 {"txd_rd_cnt"},
224 {"txd_wr_cnt"},
225 {"rxd_rd_cnt"},
226 {"rxd_wr_cnt"},
227 {"txf_rd_cnt"},
228 {"rxf_wr_cnt"},
229 {"rmac_ttl_1519_4095_frms"},
230 {"rmac_ttl_4096_8191_frms"},
231 {"rmac_ttl_8192_max_frms"},
232 {"rmac_ttl_gt_max_frms"},
233 {"rmac_osized_alt_frms"},
234 {"rmac_jabber_alt_frms"},
235 {"rmac_gt_max_alt_frms"},
236 {"rmac_vlan_frms"},
237 {"rmac_len_discard"},
238 {"rmac_fcs_discard"},
239 {"rmac_pf_discard"},
240 {"rmac_da_discard"},
241 {"rmac_red_discard"},
242 {"rmac_rts_discard"},
243 {"rmac_ingm_full_discard"},
244 {"link_fault_cnt"},
7ba013ac
K
245 {"\n DRIVER STATISTICS"},
246 {"single_bit_ecc_errs"},
247 {"double_bit_ecc_errs"},
bd1034f0
AR
248 {"parity_err_cnt"},
249 {"serious_err_cnt"},
250 {"soft_reset_cnt"},
251 {"fifo_full_cnt"},
252 {"ring_full_cnt"},
253 ("alarm_transceiver_temp_high"),
254 ("alarm_transceiver_temp_low"),
255 ("alarm_laser_bias_current_high"),
256 ("alarm_laser_bias_current_low"),
257 ("alarm_laser_output_power_high"),
258 ("alarm_laser_output_power_low"),
259 ("warn_transceiver_temp_high"),
260 ("warn_transceiver_temp_low"),
261 ("warn_laser_bias_current_high"),
262 ("warn_laser_bias_current_low"),
263 ("warn_laser_output_power_high"),
264 ("warn_laser_output_power_low"),
7d3d0439
RA
265 ("lro_aggregated_pkts"),
266 ("lro_flush_both_count"),
267 ("lro_out_of_sequence_pkts"),
268 ("lro_flush_due_to_max_pkts"),
269 ("lro_avg_aggr_pkts"),
1da177e4
LT
270};
271
272#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
273#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
274
275#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
277
25fff88e
K
278#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
279 init_timer(&timer); \
280 timer.function = handle; \
281 timer.data = (unsigned long) arg; \
282 mod_timer(&timer, (jiffies + exp)) \
283
be3a6b02
K
284/* Add the vlan */
285static void s2io_vlan_rx_register(struct net_device *dev,
286 struct vlan_group *grp)
287{
288 nic_t *nic = dev->priv;
289 unsigned long flags;
290
291 spin_lock_irqsave(&nic->tx_lock, flags);
292 nic->vlgrp = grp;
293 spin_unlock_irqrestore(&nic->tx_lock, flags);
294}
295
296/* Unregister the vlan */
297static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
298{
299 nic_t *nic = dev->priv;
300 unsigned long flags;
301
302 spin_lock_irqsave(&nic->tx_lock, flags);
303 if (nic->vlgrp)
304 nic->vlgrp->vlan_devices[vid] = NULL;
305 spin_unlock_irqrestore(&nic->tx_lock, flags);
306}
307
20346722 308/*
1da177e4
LT
309 * Constants to be programmed into the Xena's registers, to configure
310 * the XAUI.
311 */
312
1da177e4 313#define END_SIGN 0x0
f71e1309 314static const u64 herc_act_dtx_cfg[] = {
541ae68f 315 /* Set address */
e960fc5c 316 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 317 /* Write data */
e960fc5c 318 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
319 /* Set address */
320 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
321 /* Write data */
322 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
323 /* Set address */
e960fc5c 324 0x801205150D440000ULL, 0x801205150D4400E0ULL,
325 /* Write data */
326 0x801205150D440004ULL, 0x801205150D4400E4ULL,
327 /* Set address */
541ae68f
K
328 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
329 /* Write data */
330 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
331 /* Done */
332 END_SIGN
333};
334
f71e1309 335static const u64 xena_dtx_cfg[] = {
c92ca04b 336 /* Set address */
1da177e4 337 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
338 /* Write data */
339 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
340 /* Set address */
341 0x8001051500000000ULL, 0x80010515000000E0ULL,
342 /* Write data */
343 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
344 /* Set address */
1da177e4 345 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
346 /* Write data */
347 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
348 END_SIGN
349};
350
20346722 351/*
1da177e4
LT
352 * Constants for Fixing the MacAddress problem seen mostly on
353 * Alpha machines.
354 */
f71e1309 355static const u64 fix_mac[] = {
1da177e4
LT
356 0x0060000000000000ULL, 0x0060600000000000ULL,
357 0x0040600000000000ULL, 0x0000600000000000ULL,
358 0x0020600000000000ULL, 0x0060600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0000600000000000ULL,
369 0x0040600000000000ULL, 0x0060600000000000ULL,
370 END_SIGN
371};
372
373/* Module Loadable parameters. */
374static unsigned int tx_fifo_num = 1;
375static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
9dc737a7 376 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
1da177e4
LT
377static unsigned int rx_ring_num = 1;
378static unsigned int rx_ring_sz[MAX_RX_RINGS] =
9dc737a7 379 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
20346722
K
380static unsigned int rts_frm_len[MAX_RX_RINGS] =
381 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
da6971d8 382static unsigned int rx_ring_mode = 1;
5e25b9dd 383static unsigned int use_continuous_tx_intrs = 1;
863c11a9 384static unsigned int rmac_pause_time = 0x100;
1da177e4
LT
385static unsigned int mc_pause_threshold_q0q3 = 187;
386static unsigned int mc_pause_threshold_q4q7 = 187;
387static unsigned int shared_splits;
388static unsigned int tmac_util_period = 5;
389static unsigned int rmac_util_period = 5;
b6e3f982 390static unsigned int bimodal = 0;
da6971d8 391static unsigned int l3l4hdr_size = 128;
1da177e4
LT
392#ifndef CONFIG_S2IO_NAPI
393static unsigned int indicate_max_pkts;
394#endif
303bcb4b
K
395/* Frequency of Rx desc syncs expressed as power of 2 */
396static unsigned int rxsync_frequency = 3;
cc6e7c44
RA
397/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
398static unsigned int intr_type = 0;
7d3d0439
RA
399/* Large receive offload feature */
400static unsigned int lro = 0;
401/* Max pkts to be aggregated by LRO at one time. If not specified,
402 * aggregation happens until we hit max IP pkt size(64K)
403 */
404static unsigned int lro_max_pkts = 0xFFFF;
1da177e4 405
20346722 406/*
1da177e4 407 * S2IO device table.
20346722 408 * This table lists all the devices that this driver supports.
1da177e4
LT
409 */
410static struct pci_device_id s2io_tbl[] __devinitdata = {
411 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
412 PCI_ANY_ID, PCI_ANY_ID},
413 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
414 PCI_ANY_ID, PCI_ANY_ID},
415 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
416 PCI_ANY_ID, PCI_ANY_ID},
417 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
418 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
419 {0,}
420};
421
422MODULE_DEVICE_TABLE(pci, s2io_tbl);
423
424static struct pci_driver s2io_driver = {
425 .name = "S2IO",
426 .id_table = s2io_tbl,
427 .probe = s2io_init_nic,
428 .remove = __devexit_p(s2io_rem_nic),
429};
430
431/* A simplifier macro used both by init and free shared_mem Fns(). */
432#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
433
434/**
435 * init_shared_mem - Allocation and Initialization of Memory
436 * @nic: Device private variable.
20346722
K
437 * Description: The function allocates all the memory areas shared
438 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
439 * Rx descriptors and the statistics block.
440 */
441
442static int init_shared_mem(struct s2io_nic *nic)
443{
444 u32 size;
445 void *tmp_v_addr, *tmp_v_addr_next;
446 dma_addr_t tmp_p_addr, tmp_p_addr_next;
447 RxD_block_t *pre_rxd_blk = NULL;
20346722 448 int i, j, blk_cnt, rx_sz, tx_sz;
1da177e4
LT
449 int lst_size, lst_per_page;
450 struct net_device *dev = nic->dev;
8ae418cf 451 unsigned long tmp;
1da177e4 452 buffAdd_t *ba;
1da177e4
LT
453
454 mac_info_t *mac_control;
455 struct config_param *config;
456
457 mac_control = &nic->mac_control;
458 config = &nic->config;
459
460
461 /* Allocation and initialization of TXDLs in FIOFs */
462 size = 0;
463 for (i = 0; i < config->tx_fifo_num; i++) {
464 size += config->tx_cfg[i].fifo_len;
465 }
466 if (size > MAX_AVAILABLE_TXDS) {
0b1f7ebe
K
467 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
468 __FUNCTION__);
469 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
1da177e4
LT
470 return FAILURE;
471 }
472
473 lst_size = (sizeof(TxD_t) * config->max_txds);
20346722 474 tx_sz = lst_size * size;
1da177e4
LT
475 lst_per_page = PAGE_SIZE / lst_size;
476
477 for (i = 0; i < config->tx_fifo_num; i++) {
478 int fifo_len = config->tx_cfg[i].fifo_len;
479 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
20346722
K
480 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
481 GFP_KERNEL);
482 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
483 DBG_PRINT(ERR_DBG,
484 "Malloc failed for list_info\n");
485 return -ENOMEM;
486 }
20346722 487 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
488 }
489 for (i = 0; i < config->tx_fifo_num; i++) {
490 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
491 lst_per_page);
20346722
K
492 mac_control->fifos[i].tx_curr_put_info.offset = 0;
493 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 494 config->tx_cfg[i].fifo_len - 1;
20346722
K
495 mac_control->fifos[i].tx_curr_get_info.offset = 0;
496 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 497 config->tx_cfg[i].fifo_len - 1;
20346722
K
498 mac_control->fifos[i].fifo_no = i;
499 mac_control->fifos[i].nic = nic;
fed5eccd 500 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
20346722 501
1da177e4
LT
502 for (j = 0; j < page_num; j++) {
503 int k = 0;
504 dma_addr_t tmp_p;
505 void *tmp_v;
506 tmp_v = pci_alloc_consistent(nic->pdev,
507 PAGE_SIZE, &tmp_p);
508 if (!tmp_v) {
509 DBG_PRINT(ERR_DBG,
510 "pci_alloc_consistent ");
511 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
512 return -ENOMEM;
513 }
776bd20f 514 /* If we got a zero DMA address(can happen on
515 * certain platforms like PPC), reallocate.
516 * Store virtual address of page we don't want,
517 * to be freed later.
518 */
519 if (!tmp_p) {
520 mac_control->zerodma_virt_addr = tmp_v;
521 DBG_PRINT(INIT_DBG,
522 "%s: Zero DMA address for TxDL. ", dev->name);
523 DBG_PRINT(INIT_DBG,
6b4d617d 524 "Virtual address %p\n", tmp_v);
776bd20f 525 tmp_v = pci_alloc_consistent(nic->pdev,
526 PAGE_SIZE, &tmp_p);
527 if (!tmp_v) {
528 DBG_PRINT(ERR_DBG,
529 "pci_alloc_consistent ");
530 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
531 return -ENOMEM;
532 }
533 }
1da177e4
LT
534 while (k < lst_per_page) {
535 int l = (j * lst_per_page) + k;
536 if (l == config->tx_cfg[i].fifo_len)
20346722
K
537 break;
538 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 539 tmp_v + (k * lst_size);
20346722 540 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
541 tmp_p + (k * lst_size);
542 k++;
543 }
544 }
545 }
1da177e4 546
fed5eccd
AR
547 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
548 if (!nic->ufo_in_band_v)
549 return -ENOMEM;
550
1da177e4
LT
551 /* Allocation and initialization of RXDs in Rings */
552 size = 0;
553 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
554 if (config->rx_cfg[i].num_rxd %
555 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
556 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
557 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
558 i);
559 DBG_PRINT(ERR_DBG, "RxDs per Block");
560 return FAILURE;
561 }
562 size += config->rx_cfg[i].num_rxd;
20346722 563 mac_control->rings[i].block_count =
da6971d8
AR
564 config->rx_cfg[i].num_rxd /
565 (rxd_count[nic->rxd_mode] + 1 );
566 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
567 mac_control->rings[i].block_count;
1da177e4 568 }
da6971d8
AR
569 if (nic->rxd_mode == RXD_MODE_1)
570 size = (size * (sizeof(RxD1_t)));
571 else
572 size = (size * (sizeof(RxD3_t)));
20346722 573 rx_sz = size;
1da177e4
LT
574
575 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
576 mac_control->rings[i].rx_curr_get_info.block_index = 0;
577 mac_control->rings[i].rx_curr_get_info.offset = 0;
578 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 579 config->rx_cfg[i].num_rxd - 1;
20346722
K
580 mac_control->rings[i].rx_curr_put_info.block_index = 0;
581 mac_control->rings[i].rx_curr_put_info.offset = 0;
582 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 583 config->rx_cfg[i].num_rxd - 1;
20346722
K
584 mac_control->rings[i].nic = nic;
585 mac_control->rings[i].ring_no = i;
586
da6971d8
AR
587 blk_cnt = config->rx_cfg[i].num_rxd /
588 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
589 /* Allocating all the Rx blocks */
590 for (j = 0; j < blk_cnt; j++) {
da6971d8
AR
591 rx_block_info_t *rx_blocks;
592 int l;
593
594 rx_blocks = &mac_control->rings[i].rx_blocks[j];
595 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
596 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
597 &tmp_p_addr);
598 if (tmp_v_addr == NULL) {
599 /*
20346722
K
600 * In case of failure, free_shared_mem()
601 * is called, which should free any
602 * memory that was alloced till the
1da177e4
LT
603 * failure happened.
604 */
da6971d8 605 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
606 return -ENOMEM;
607 }
608 memset(tmp_v_addr, 0, size);
da6971d8
AR
609 rx_blocks->block_virt_addr = tmp_v_addr;
610 rx_blocks->block_dma_addr = tmp_p_addr;
611 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
612 rxd_count[nic->rxd_mode],
613 GFP_KERNEL);
614 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
615 rx_blocks->rxds[l].virt_addr =
616 rx_blocks->block_virt_addr +
617 (rxd_size[nic->rxd_mode] * l);
618 rx_blocks->rxds[l].dma_addr =
619 rx_blocks->block_dma_addr +
620 (rxd_size[nic->rxd_mode] * l);
621 }
1da177e4
LT
622 }
623 /* Interlinking all Rx Blocks */
624 for (j = 0; j < blk_cnt; j++) {
20346722
K
625 tmp_v_addr =
626 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 627 tmp_v_addr_next =
20346722 628 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 629 blk_cnt].block_virt_addr;
20346722
K
630 tmp_p_addr =
631 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 632 tmp_p_addr_next =
20346722 633 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
634 blk_cnt].block_dma_addr;
635
636 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
1da177e4
LT
637 pre_rxd_blk->reserved_2_pNext_RxD_block =
638 (unsigned long) tmp_v_addr_next;
1da177e4
LT
639 pre_rxd_blk->pNext_RxD_Blk_physical =
640 (u64) tmp_p_addr_next;
641 }
642 }
da6971d8
AR
643 if (nic->rxd_mode >= RXD_MODE_3A) {
644 /*
645 * Allocation of Storages for buffer addresses in 2BUFF mode
646 * and the buffers as well.
647 */
648 for (i = 0; i < config->rx_ring_num; i++) {
649 blk_cnt = config->rx_cfg[i].num_rxd /
650 (rxd_count[nic->rxd_mode]+ 1);
651 mac_control->rings[i].ba =
652 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
1da177e4 653 GFP_KERNEL);
da6971d8 654 if (!mac_control->rings[i].ba)
1da177e4 655 return -ENOMEM;
da6971d8
AR
656 for (j = 0; j < blk_cnt; j++) {
657 int k = 0;
658 mac_control->rings[i].ba[j] =
659 kmalloc((sizeof(buffAdd_t) *
660 (rxd_count[nic->rxd_mode] + 1)),
661 GFP_KERNEL);
662 if (!mac_control->rings[i].ba[j])
1da177e4 663 return -ENOMEM;
da6971d8
AR
664 while (k != rxd_count[nic->rxd_mode]) {
665 ba = &mac_control->rings[i].ba[j][k];
666
667 ba->ba_0_org = (void *) kmalloc
668 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
669 if (!ba->ba_0_org)
670 return -ENOMEM;
671 tmp = (unsigned long)ba->ba_0_org;
672 tmp += ALIGN_SIZE;
673 tmp &= ~((unsigned long) ALIGN_SIZE);
674 ba->ba_0 = (void *) tmp;
675
676 ba->ba_1_org = (void *) kmalloc
677 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
678 if (!ba->ba_1_org)
679 return -ENOMEM;
680 tmp = (unsigned long) ba->ba_1_org;
681 tmp += ALIGN_SIZE;
682 tmp &= ~((unsigned long) ALIGN_SIZE);
683 ba->ba_1 = (void *) tmp;
684 k++;
685 }
1da177e4
LT
686 }
687 }
688 }
1da177e4
LT
689
690 /* Allocation and initialization of Statistics block */
691 size = sizeof(StatInfo_t);
692 mac_control->stats_mem = pci_alloc_consistent
693 (nic->pdev, size, &mac_control->stats_mem_phy);
694
695 if (!mac_control->stats_mem) {
20346722
K
696 /*
697 * In case of failure, free_shared_mem() is called, which
698 * should free any memory that was alloced till the
1da177e4
LT
699 * failure happened.
700 */
701 return -ENOMEM;
702 }
703 mac_control->stats_mem_sz = size;
704
705 tmp_v_addr = mac_control->stats_mem;
706 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
707 memset(tmp_v_addr, 0, size);
1da177e4
LT
708 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
709 (unsigned long long) tmp_p_addr);
710
711 return SUCCESS;
712}
713
20346722
K
714/**
715 * free_shared_mem - Free the allocated Memory
1da177e4
LT
716 * @nic: Device private variable.
717 * Description: This function is to free all memory locations allocated by
718 * the init_shared_mem() function and return it to the kernel.
719 */
720
721static void free_shared_mem(struct s2io_nic *nic)
722{
723 int i, j, blk_cnt, size;
724 void *tmp_v_addr;
725 dma_addr_t tmp_p_addr;
726 mac_info_t *mac_control;
727 struct config_param *config;
728 int lst_size, lst_per_page;
776bd20f 729 struct net_device *dev = nic->dev;
1da177e4
LT
730
731 if (!nic)
732 return;
733
734 mac_control = &nic->mac_control;
735 config = &nic->config;
736
737 lst_size = (sizeof(TxD_t) * config->max_txds);
738 lst_per_page = PAGE_SIZE / lst_size;
739
740 for (i = 0; i < config->tx_fifo_num; i++) {
741 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
742 lst_per_page);
743 for (j = 0; j < page_num; j++) {
744 int mem_blks = (j * lst_per_page);
776bd20f 745 if (!mac_control->fifos[i].list_info)
746 return;
747 if (!mac_control->fifos[i].list_info[mem_blks].
748 list_virt_addr)
1da177e4
LT
749 break;
750 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
751 mac_control->fifos[i].
752 list_info[mem_blks].
1da177e4 753 list_virt_addr,
20346722
K
754 mac_control->fifos[i].
755 list_info[mem_blks].
1da177e4
LT
756 list_phy_addr);
757 }
776bd20f 758 /* If we got a zero DMA address during allocation,
759 * free the page now
760 */
761 if (mac_control->zerodma_virt_addr) {
762 pci_free_consistent(nic->pdev, PAGE_SIZE,
763 mac_control->zerodma_virt_addr,
764 (dma_addr_t)0);
765 DBG_PRINT(INIT_DBG,
6b4d617d
AM
766 "%s: Freeing TxDL with zero DMA addr. ",
767 dev->name);
768 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
769 mac_control->zerodma_virt_addr);
776bd20f 770 }
20346722 771 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
772 }
773
1da177e4 774 size = SIZE_OF_BLOCK;
1da177e4 775 for (i = 0; i < config->rx_ring_num; i++) {
20346722 776 blk_cnt = mac_control->rings[i].block_count;
1da177e4 777 for (j = 0; j < blk_cnt; j++) {
20346722
K
778 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
779 block_virt_addr;
780 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
781 block_dma_addr;
1da177e4
LT
782 if (tmp_v_addr == NULL)
783 break;
784 pci_free_consistent(nic->pdev, size,
785 tmp_v_addr, tmp_p_addr);
da6971d8 786 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1da177e4
LT
787 }
788 }
789
da6971d8
AR
790 if (nic->rxd_mode >= RXD_MODE_3A) {
791 /* Freeing buffer storage addresses in 2BUFF mode. */
792 for (i = 0; i < config->rx_ring_num; i++) {
793 blk_cnt = config->rx_cfg[i].num_rxd /
794 (rxd_count[nic->rxd_mode] + 1);
795 for (j = 0; j < blk_cnt; j++) {
796 int k = 0;
797 if (!mac_control->rings[i].ba[j])
798 continue;
799 while (k != rxd_count[nic->rxd_mode]) {
800 buffAdd_t *ba =
801 &mac_control->rings[i].ba[j][k];
802 kfree(ba->ba_0_org);
803 kfree(ba->ba_1_org);
804 k++;
805 }
806 kfree(mac_control->rings[i].ba[j]);
1da177e4 807 }
da6971d8 808 kfree(mac_control->rings[i].ba);
1da177e4 809 }
1da177e4 810 }
1da177e4
LT
811
812 if (mac_control->stats_mem) {
813 pci_free_consistent(nic->pdev,
814 mac_control->stats_mem_sz,
815 mac_control->stats_mem,
816 mac_control->stats_mem_phy);
817 }
fed5eccd
AR
818 if (nic->ufo_in_band_v)
819 kfree(nic->ufo_in_band_v);
1da177e4
LT
820}
821
541ae68f
K
822/**
823 * s2io_verify_pci_mode -
824 */
825
826static int s2io_verify_pci_mode(nic_t *nic)
827{
509a2671 828 XENA_dev_config_t __iomem *bar0 = nic->bar0;
541ae68f
K
829 register u64 val64 = 0;
830 int mode;
831
832 val64 = readq(&bar0->pci_mode);
833 mode = (u8)GET_PCI_MODE(val64);
834
835 if ( val64 & PCI_MODE_UNKNOWN_MODE)
836 return -1; /* Unknown PCI mode */
837 return mode;
838}
839
c92ca04b
AR
840#define NEC_VENID 0x1033
841#define NEC_DEVID 0x0125
842static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
843{
844 struct pci_dev *tdev = NULL;
845 while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
846 if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
847 if (tdev->bus == s2io_pdev->bus->parent)
848 return 1;
849 }
850 }
851 return 0;
852}
541ae68f 853
7b32a312 854static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
855/**
856 * s2io_print_pci_mode -
857 */
858static int s2io_print_pci_mode(nic_t *nic)
859{
509a2671 860 XENA_dev_config_t __iomem *bar0 = nic->bar0;
541ae68f
K
861 register u64 val64 = 0;
862 int mode;
863 struct config_param *config = &nic->config;
864
865 val64 = readq(&bar0->pci_mode);
866 mode = (u8)GET_PCI_MODE(val64);
867
868 if ( val64 & PCI_MODE_UNKNOWN_MODE)
869 return -1; /* Unknown PCI mode */
870
c92ca04b
AR
871 config->bus_speed = bus_speed[mode];
872
873 if (s2io_on_nec_bridge(nic->pdev)) {
874 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
875 nic->dev->name);
876 return mode;
877 }
878
541ae68f
K
879 if (val64 & PCI_MODE_32_BITS) {
880 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
881 } else {
882 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
883 }
884
885 switch(mode) {
886 case PCI_MODE_PCI_33:
887 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f
K
888 break;
889 case PCI_MODE_PCI_66:
890 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f
K
891 break;
892 case PCI_MODE_PCIX_M1_66:
893 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f
K
894 break;
895 case PCI_MODE_PCIX_M1_100:
896 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f
K
897 break;
898 case PCI_MODE_PCIX_M1_133:
899 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f
K
900 break;
901 case PCI_MODE_PCIX_M2_66:
902 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f
K
903 break;
904 case PCI_MODE_PCIX_M2_100:
905 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f
K
906 break;
907 case PCI_MODE_PCIX_M2_133:
908 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f
K
909 break;
910 default:
911 return -1; /* Unsupported bus speed */
912 }
913
914 return mode;
915}
916
20346722
K
917/**
918 * init_nic - Initialization of hardware
1da177e4 919 * @nic: device peivate variable
20346722
K
920 * Description: The function sequentially configures every block
921 * of the H/W from their reset values.
922 * Return Value: SUCCESS on success and
1da177e4
LT
923 * '-1' on failure (endian settings incorrect).
924 */
925
926static int init_nic(struct s2io_nic *nic)
927{
928 XENA_dev_config_t __iomem *bar0 = nic->bar0;
929 struct net_device *dev = nic->dev;
930 register u64 val64 = 0;
931 void __iomem *add;
932 u32 time;
933 int i, j;
934 mac_info_t *mac_control;
935 struct config_param *config;
c92ca04b 936 int dtx_cnt = 0;
1da177e4 937 unsigned long long mem_share;
20346722 938 int mem_size;
1da177e4
LT
939
940 mac_control = &nic->mac_control;
941 config = &nic->config;
942
5e25b9dd 943 /* to set the swapper controle on the card */
20346722 944 if(s2io_set_swapper(nic)) {
1da177e4
LT
945 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
946 return -1;
947 }
948
541ae68f
K
949 /*
950 * Herc requires EOI to be removed from reset before XGXS, so..
951 */
952 if (nic->device_type & XFRAME_II_DEVICE) {
953 val64 = 0xA500000000ULL;
954 writeq(val64, &bar0->sw_reset);
955 msleep(500);
956 val64 = readq(&bar0->sw_reset);
957 }
958
1da177e4
LT
959 /* Remove XGXS from reset state */
960 val64 = 0;
961 writeq(val64, &bar0->sw_reset);
1da177e4 962 msleep(500);
20346722 963 val64 = readq(&bar0->sw_reset);
1da177e4
LT
964
965 /* Enable Receiving broadcasts */
966 add = &bar0->mac_cfg;
967 val64 = readq(&bar0->mac_cfg);
968 val64 |= MAC_RMAC_BCAST_ENABLE;
969 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
970 writel((u32) val64, add);
971 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
972 writel((u32) (val64 >> 32), (add + 4));
973
974 /* Read registers in all blocks */
975 val64 = readq(&bar0->mac_int_mask);
976 val64 = readq(&bar0->mc_int_mask);
977 val64 = readq(&bar0->xgxs_int_mask);
978
979 /* Set MTU */
980 val64 = dev->mtu;
981 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
982
541ae68f
K
983 if (nic->device_type & XFRAME_II_DEVICE) {
984 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 985 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 986 &bar0->dtx_control, UF);
541ae68f
K
987 if (dtx_cnt & 0x1)
988 msleep(1); /* Necessary!! */
1da177e4
LT
989 dtx_cnt++;
990 }
541ae68f 991 } else {
c92ca04b
AR
992 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
993 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
994 &bar0->dtx_control, UF);
995 val64 = readq(&bar0->dtx_control);
996 dtx_cnt++;
1da177e4
LT
997 }
998 }
999
1000 /* Tx DMA Initialization */
1001 val64 = 0;
1002 writeq(val64, &bar0->tx_fifo_partition_0);
1003 writeq(val64, &bar0->tx_fifo_partition_1);
1004 writeq(val64, &bar0->tx_fifo_partition_2);
1005 writeq(val64, &bar0->tx_fifo_partition_3);
1006
1007
1008 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1009 val64 |=
1010 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1011 13) | vBIT(config->tx_cfg[i].fifo_priority,
1012 ((i * 32) + 5), 3);
1013
1014 if (i == (config->tx_fifo_num - 1)) {
1015 if (i % 2 == 0)
1016 i++;
1017 }
1018
1019 switch (i) {
1020 case 1:
1021 writeq(val64, &bar0->tx_fifo_partition_0);
1022 val64 = 0;
1023 break;
1024 case 3:
1025 writeq(val64, &bar0->tx_fifo_partition_1);
1026 val64 = 0;
1027 break;
1028 case 5:
1029 writeq(val64, &bar0->tx_fifo_partition_2);
1030 val64 = 0;
1031 break;
1032 case 7:
1033 writeq(val64, &bar0->tx_fifo_partition_3);
1034 break;
1035 }
1036 }
1037
5e25b9dd
K
1038 /*
1039 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1040 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1041 */
541ae68f
K
1042 if ((nic->device_type == XFRAME_I_DEVICE) &&
1043 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd
K
1044 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1045
1da177e4
LT
1046 val64 = readq(&bar0->tx_fifo_partition_0);
1047 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1048 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1049
20346722
K
1050 /*
1051 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1052 * integrity checking.
1053 */
1054 val64 = readq(&bar0->tx_pa_cfg);
1055 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1056 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1057 writeq(val64, &bar0->tx_pa_cfg);
1058
1059 /* Rx DMA intialization. */
1060 val64 = 0;
1061 for (i = 0; i < config->rx_ring_num; i++) {
1062 val64 |=
1063 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1064 3);
1065 }
1066 writeq(val64, &bar0->rx_queue_priority);
1067
20346722
K
1068 /*
1069 * Allocating equal share of memory to all the
1da177e4
LT
1070 * configured Rings.
1071 */
1072 val64 = 0;
541ae68f
K
1073 if (nic->device_type & XFRAME_II_DEVICE)
1074 mem_size = 32;
1075 else
1076 mem_size = 64;
1077
1da177e4
LT
1078 for (i = 0; i < config->rx_ring_num; i++) {
1079 switch (i) {
1080 case 0:
20346722
K
1081 mem_share = (mem_size / config->rx_ring_num +
1082 mem_size % config->rx_ring_num);
1da177e4
LT
1083 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1084 continue;
1085 case 1:
20346722 1086 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1087 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1088 continue;
1089 case 2:
20346722 1090 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1091 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1092 continue;
1093 case 3:
20346722 1094 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1095 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1096 continue;
1097 case 4:
20346722 1098 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1099 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1100 continue;
1101 case 5:
20346722 1102 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1103 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1104 continue;
1105 case 6:
20346722 1106 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1107 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1108 continue;
1109 case 7:
20346722 1110 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1111 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1112 continue;
1113 }
1114 }
1115 writeq(val64, &bar0->rx_queue_cfg);
1116
20346722 1117 /*
5e25b9dd
K
1118 * Filling Tx round robin registers
1119 * as per the number of FIFOs
1da177e4 1120 */
5e25b9dd
K
1121 switch (config->tx_fifo_num) {
1122 case 1:
1123 val64 = 0x0000000000000000ULL;
1124 writeq(val64, &bar0->tx_w_round_robin_0);
1125 writeq(val64, &bar0->tx_w_round_robin_1);
1126 writeq(val64, &bar0->tx_w_round_robin_2);
1127 writeq(val64, &bar0->tx_w_round_robin_3);
1128 writeq(val64, &bar0->tx_w_round_robin_4);
1129 break;
1130 case 2:
1131 val64 = 0x0000010000010000ULL;
1132 writeq(val64, &bar0->tx_w_round_robin_0);
1133 val64 = 0x0100000100000100ULL;
1134 writeq(val64, &bar0->tx_w_round_robin_1);
1135 val64 = 0x0001000001000001ULL;
1136 writeq(val64, &bar0->tx_w_round_robin_2);
1137 val64 = 0x0000010000010000ULL;
1138 writeq(val64, &bar0->tx_w_round_robin_3);
1139 val64 = 0x0100000000000000ULL;
1140 writeq(val64, &bar0->tx_w_round_robin_4);
1141 break;
1142 case 3:
1143 val64 = 0x0001000102000001ULL;
1144 writeq(val64, &bar0->tx_w_round_robin_0);
1145 val64 = 0x0001020000010001ULL;
1146 writeq(val64, &bar0->tx_w_round_robin_1);
1147 val64 = 0x0200000100010200ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_2);
1149 val64 = 0x0001000102000001ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_3);
1151 val64 = 0x0001020000000000ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_4);
1153 break;
1154 case 4:
1155 val64 = 0x0001020300010200ULL;
1156 writeq(val64, &bar0->tx_w_round_robin_0);
1157 val64 = 0x0100000102030001ULL;
1158 writeq(val64, &bar0->tx_w_round_robin_1);
1159 val64 = 0x0200010000010203ULL;
1160 writeq(val64, &bar0->tx_w_round_robin_2);
1161 val64 = 0x0001020001000001ULL;
1162 writeq(val64, &bar0->tx_w_round_robin_3);
1163 val64 = 0x0203000100000000ULL;
1164 writeq(val64, &bar0->tx_w_round_robin_4);
1165 break;
1166 case 5:
1167 val64 = 0x0001000203000102ULL;
1168 writeq(val64, &bar0->tx_w_round_robin_0);
1169 val64 = 0x0001020001030004ULL;
1170 writeq(val64, &bar0->tx_w_round_robin_1);
1171 val64 = 0x0001000203000102ULL;
1172 writeq(val64, &bar0->tx_w_round_robin_2);
1173 val64 = 0x0001020001030004ULL;
1174 writeq(val64, &bar0->tx_w_round_robin_3);
1175 val64 = 0x0001000000000000ULL;
1176 writeq(val64, &bar0->tx_w_round_robin_4);
1177 break;
1178 case 6:
1179 val64 = 0x0001020304000102ULL;
1180 writeq(val64, &bar0->tx_w_round_robin_0);
1181 val64 = 0x0304050001020001ULL;
1182 writeq(val64, &bar0->tx_w_round_robin_1);
1183 val64 = 0x0203000100000102ULL;
1184 writeq(val64, &bar0->tx_w_round_robin_2);
1185 val64 = 0x0304000102030405ULL;
1186 writeq(val64, &bar0->tx_w_round_robin_3);
1187 val64 = 0x0001000200000000ULL;
1188 writeq(val64, &bar0->tx_w_round_robin_4);
1189 break;
1190 case 7:
1191 val64 = 0x0001020001020300ULL;
1192 writeq(val64, &bar0->tx_w_round_robin_0);
1193 val64 = 0x0102030400010203ULL;
1194 writeq(val64, &bar0->tx_w_round_robin_1);
1195 val64 = 0x0405060001020001ULL;
1196 writeq(val64, &bar0->tx_w_round_robin_2);
1197 val64 = 0x0304050000010200ULL;
1198 writeq(val64, &bar0->tx_w_round_robin_3);
1199 val64 = 0x0102030000000000ULL;
1200 writeq(val64, &bar0->tx_w_round_robin_4);
1201 break;
1202 case 8:
1203 val64 = 0x0001020300040105ULL;
1204 writeq(val64, &bar0->tx_w_round_robin_0);
1205 val64 = 0x0200030106000204ULL;
1206 writeq(val64, &bar0->tx_w_round_robin_1);
1207 val64 = 0x0103000502010007ULL;
1208 writeq(val64, &bar0->tx_w_round_robin_2);
1209 val64 = 0x0304010002060500ULL;
1210 writeq(val64, &bar0->tx_w_round_robin_3);
1211 val64 = 0x0103020400000000ULL;
1212 writeq(val64, &bar0->tx_w_round_robin_4);
1213 break;
1214 }
1215
5d3213cc
AR
1216 /* Enable Tx FIFO partition 0. */
1217 val64 = readq(&bar0->tx_fifo_partition_0);
1218 val64 |= (TX_FIFO_PARTITION_EN);
1219 writeq(val64, &bar0->tx_fifo_partition_0);
1220
5e25b9dd
K
1221 /* Filling the Rx round robin registers as per the
1222 * number of Rings and steering based on QoS.
1223 */
1224 switch (config->rx_ring_num) {
1225 case 1:
1226 val64 = 0x8080808080808080ULL;
1227 writeq(val64, &bar0->rts_qos_steering);
1228 break;
1229 case 2:
1230 val64 = 0x0000010000010000ULL;
1231 writeq(val64, &bar0->rx_w_round_robin_0);
1232 val64 = 0x0100000100000100ULL;
1233 writeq(val64, &bar0->rx_w_round_robin_1);
1234 val64 = 0x0001000001000001ULL;
1235 writeq(val64, &bar0->rx_w_round_robin_2);
1236 val64 = 0x0000010000010000ULL;
1237 writeq(val64, &bar0->rx_w_round_robin_3);
1238 val64 = 0x0100000000000000ULL;
1239 writeq(val64, &bar0->rx_w_round_robin_4);
1240
1241 val64 = 0x8080808040404040ULL;
1242 writeq(val64, &bar0->rts_qos_steering);
1243 break;
1244 case 3:
1245 val64 = 0x0001000102000001ULL;
1246 writeq(val64, &bar0->rx_w_round_robin_0);
1247 val64 = 0x0001020000010001ULL;
1248 writeq(val64, &bar0->rx_w_round_robin_1);
1249 val64 = 0x0200000100010200ULL;
1250 writeq(val64, &bar0->rx_w_round_robin_2);
1251 val64 = 0x0001000102000001ULL;
1252 writeq(val64, &bar0->rx_w_round_robin_3);
1253 val64 = 0x0001020000000000ULL;
1254 writeq(val64, &bar0->rx_w_round_robin_4);
1255
1256 val64 = 0x8080804040402020ULL;
1257 writeq(val64, &bar0->rts_qos_steering);
1258 break;
1259 case 4:
1260 val64 = 0x0001020300010200ULL;
1261 writeq(val64, &bar0->rx_w_round_robin_0);
1262 val64 = 0x0100000102030001ULL;
1263 writeq(val64, &bar0->rx_w_round_robin_1);
1264 val64 = 0x0200010000010203ULL;
1265 writeq(val64, &bar0->rx_w_round_robin_2);
1266 val64 = 0x0001020001000001ULL;
1267 writeq(val64, &bar0->rx_w_round_robin_3);
1268 val64 = 0x0203000100000000ULL;
1269 writeq(val64, &bar0->rx_w_round_robin_4);
1270
1271 val64 = 0x8080404020201010ULL;
1272 writeq(val64, &bar0->rts_qos_steering);
1273 break;
1274 case 5:
1275 val64 = 0x0001000203000102ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_0);
1277 val64 = 0x0001020001030004ULL;
1278 writeq(val64, &bar0->rx_w_round_robin_1);
1279 val64 = 0x0001000203000102ULL;
1280 writeq(val64, &bar0->rx_w_round_robin_2);
1281 val64 = 0x0001020001030004ULL;
1282 writeq(val64, &bar0->rx_w_round_robin_3);
1283 val64 = 0x0001000000000000ULL;
1284 writeq(val64, &bar0->rx_w_round_robin_4);
1285
1286 val64 = 0x8080404020201008ULL;
1287 writeq(val64, &bar0->rts_qos_steering);
1288 break;
1289 case 6:
1290 val64 = 0x0001020304000102ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_0);
1292 val64 = 0x0304050001020001ULL;
1293 writeq(val64, &bar0->rx_w_round_robin_1);
1294 val64 = 0x0203000100000102ULL;
1295 writeq(val64, &bar0->rx_w_round_robin_2);
1296 val64 = 0x0304000102030405ULL;
1297 writeq(val64, &bar0->rx_w_round_robin_3);
1298 val64 = 0x0001000200000000ULL;
1299 writeq(val64, &bar0->rx_w_round_robin_4);
1300
1301 val64 = 0x8080404020100804ULL;
1302 writeq(val64, &bar0->rts_qos_steering);
1303 break;
1304 case 7:
1305 val64 = 0x0001020001020300ULL;
1306 writeq(val64, &bar0->rx_w_round_robin_0);
1307 val64 = 0x0102030400010203ULL;
1308 writeq(val64, &bar0->rx_w_round_robin_1);
1309 val64 = 0x0405060001020001ULL;
1310 writeq(val64, &bar0->rx_w_round_robin_2);
1311 val64 = 0x0304050000010200ULL;
1312 writeq(val64, &bar0->rx_w_round_robin_3);
1313 val64 = 0x0102030000000000ULL;
1314 writeq(val64, &bar0->rx_w_round_robin_4);
1315
1316 val64 = 0x8080402010080402ULL;
1317 writeq(val64, &bar0->rts_qos_steering);
1318 break;
1319 case 8:
1320 val64 = 0x0001020300040105ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_0);
1322 val64 = 0x0200030106000204ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_1);
1324 val64 = 0x0103000502010007ULL;
1325 writeq(val64, &bar0->rx_w_round_robin_2);
1326 val64 = 0x0304010002060500ULL;
1327 writeq(val64, &bar0->rx_w_round_robin_3);
1328 val64 = 0x0103020400000000ULL;
1329 writeq(val64, &bar0->rx_w_round_robin_4);
1330
1331 val64 = 0x8040201008040201ULL;
1332 writeq(val64, &bar0->rts_qos_steering);
1333 break;
1334 }
1da177e4
LT
1335
1336 /* UDP Fix */
1337 val64 = 0;
20346722 1338 for (i = 0; i < 8; i++)
1da177e4
LT
1339 writeq(val64, &bar0->rts_frm_len_n[i]);
1340
5e25b9dd
K
1341 /* Set the default rts frame length for the rings configured */
1342 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1343 for (i = 0 ; i < config->rx_ring_num ; i++)
1344 writeq(val64, &bar0->rts_frm_len_n[i]);
1345
1346 /* Set the frame length for the configured rings
1347 * desired by the user
1348 */
1349 for (i = 0; i < config->rx_ring_num; i++) {
1350 /* If rts_frm_len[i] == 0 then it is assumed that user not
1351 * specified frame length steering.
1352 * If the user provides the frame length then program
1353 * the rts_frm_len register for those values or else
1354 * leave it as it is.
1355 */
1356 if (rts_frm_len[i] != 0) {
1357 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1358 &bar0->rts_frm_len_n[i]);
1359 }
1360 }
1da177e4 1361
20346722 1362 /* Program statistics memory */
1da177e4 1363 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1364
541ae68f
K
1365 if (nic->device_type == XFRAME_II_DEVICE) {
1366 val64 = STAT_BC(0x320);
1367 writeq(val64, &bar0->stat_byte_cnt);
1368 }
1369
20346722 1370 /*
1da177e4
LT
1371 * Initializing the sampling rate for the device to calculate the
1372 * bandwidth utilization.
1373 */
1374 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1375 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1376 writeq(val64, &bar0->mac_link_util);
1377
1378
20346722
K
1379 /*
1380 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1381 * Scheme.
1382 */
20346722
K
1383 /*
1384 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1385 * 250 interrupts per sec. Continuous interrupts are enabled
1386 * by default.
1387 */
541ae68f
K
1388 if (nic->device_type == XFRAME_II_DEVICE) {
1389 int count = (nic->config.bus_speed * 125)/2;
1390 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1391 } else {
1392
1393 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1394 }
1395 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1396 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1397 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f
K
1398 if (use_continuous_tx_intrs)
1399 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1400 writeq(val64, &bar0->tti_data1_mem);
1401
1402 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1403 TTI_DATA2_MEM_TX_UFC_B(0x20) |
5e25b9dd 1404 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1405 writeq(val64, &bar0->tti_data2_mem);
1406
1407 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1408 writeq(val64, &bar0->tti_command_mem);
1409
20346722 1410 /*
1da177e4
LT
1411 * Once the operation completes, the Strobe bit of the command
1412 * register will be reset. We poll for this particular condition
1413 * We wait for a maximum of 500ms for the operation to complete,
1414 * if it's not complete by then we return error.
1415 */
1416 time = 0;
1417 while (TRUE) {
1418 val64 = readq(&bar0->tti_command_mem);
1419 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1420 break;
1421 }
1422 if (time > 10) {
1423 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1424 dev->name);
1425 return -1;
1426 }
1427 msleep(50);
1428 time++;
1429 }
1430
b6e3f982
K
1431 if (nic->config.bimodal) {
1432 int k = 0;
1433 for (k = 0; k < config->rx_ring_num; k++) {
1434 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1435 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1436 writeq(val64, &bar0->tti_command_mem);
541ae68f 1437
541ae68f 1438 /*
b6e3f982
K
1439 * Once the operation completes, the Strobe bit of the command
1440 * register will be reset. We poll for this particular condition
1441 * We wait for a maximum of 500ms for the operation to complete,
1442 * if it's not complete by then we return error.
1443 */
1444 time = 0;
1445 while (TRUE) {
1446 val64 = readq(&bar0->tti_command_mem);
1447 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1448 break;
1449 }
1450 if (time > 10) {
1451 DBG_PRINT(ERR_DBG,
1452 "%s: TTI init Failed\n",
1453 dev->name);
1454 return -1;
1455 }
1456 time++;
1457 msleep(50);
1458 }
1459 }
541ae68f 1460 } else {
1da177e4 1461
b6e3f982
K
1462 /* RTI Initialization */
1463 if (nic->device_type == XFRAME_II_DEVICE) {
1464 /*
1465 * Programmed to generate Apprx 500 Intrs per
1466 * second
1467 */
1468 int count = (nic->config.bus_speed * 125)/4;
1469 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1470 } else {
1471 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1472 }
1473 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1474 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1475 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1476
b6e3f982 1477 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1478
b6e3f982 1479 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
cc6e7c44
RA
1480 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1481 if (nic->intr_type == MSI_X)
1482 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1483 RTI_DATA2_MEM_RX_UFC_D(0x40));
1484 else
1485 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1486 RTI_DATA2_MEM_RX_UFC_D(0x80));
b6e3f982 1487 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1488
b6e3f982
K
1489 for (i = 0; i < config->rx_ring_num; i++) {
1490 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1491 | RTI_CMD_MEM_OFFSET(i);
1492 writeq(val64, &bar0->rti_command_mem);
1493
1494 /*
1495 * Once the operation completes, the Strobe bit of the
1496 * command register will be reset. We poll for this
1497 * particular condition. We wait for a maximum of 500ms
1498 * for the operation to complete, if it's not complete
1499 * by then we return error.
1500 */
1501 time = 0;
1502 while (TRUE) {
1503 val64 = readq(&bar0->rti_command_mem);
1504 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1505 break;
1506 }
1507 if (time > 10) {
1508 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1509 dev->name);
1510 return -1;
1511 }
1512 time++;
1513 msleep(50);
1514 }
1da177e4 1515 }
1da177e4
LT
1516 }
1517
20346722
K
1518 /*
1519 * Initializing proper values as Pause threshold into all
1da177e4
LT
1520 * the 8 Queues on Rx side.
1521 */
1522 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1523 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1524
1525 /* Disable RMAC PAD STRIPPING */
509a2671 1526 add = &bar0->mac_cfg;
1da177e4
LT
1527 val64 = readq(&bar0->mac_cfg);
1528 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1529 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1530 writel((u32) (val64), add);
1531 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1532 writel((u32) (val64 >> 32), (add + 4));
1533 val64 = readq(&bar0->mac_cfg);
1534
7d3d0439
RA
1535 /* Enable FCS stripping by adapter */
1536 add = &bar0->mac_cfg;
1537 val64 = readq(&bar0->mac_cfg);
1538 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1539 if (nic->device_type == XFRAME_II_DEVICE)
1540 writeq(val64, &bar0->mac_cfg);
1541 else {
1542 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1543 writel((u32) (val64), add);
1544 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1545 writel((u32) (val64 >> 32), (add + 4));
1546 }
1547
20346722
K
1548 /*
1549 * Set the time value to be inserted in the pause frame
1da177e4
LT
1550 * generated by xena.
1551 */
1552 val64 = readq(&bar0->rmac_pause_cfg);
1553 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1554 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1555 writeq(val64, &bar0->rmac_pause_cfg);
1556
20346722 1557 /*
1da177e4
LT
1558 * Set the Threshold Limit for Generating the pause frame
1559 * If the amount of data in any Queue exceeds ratio of
1560 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1561 * pause frame is generated
1562 */
1563 val64 = 0;
1564 for (i = 0; i < 4; i++) {
1565 val64 |=
1566 (((u64) 0xFF00 | nic->mac_control.
1567 mc_pause_threshold_q0q3)
1568 << (i * 2 * 8));
1569 }
1570 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1571
1572 val64 = 0;
1573 for (i = 0; i < 4; i++) {
1574 val64 |=
1575 (((u64) 0xFF00 | nic->mac_control.
1576 mc_pause_threshold_q4q7)
1577 << (i * 2 * 8));
1578 }
1579 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1580
20346722
K
1581 /*
1582 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1583 * exceeded the limit pointed by shared_splits
1584 */
1585 val64 = readq(&bar0->pic_control);
1586 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1587 writeq(val64, &bar0->pic_control);
1588
863c11a9
AR
1589 if (nic->config.bus_speed == 266) {
1590 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1591 writeq(0x0, &bar0->read_retry_delay);
1592 writeq(0x0, &bar0->write_retry_delay);
1593 }
1594
541ae68f
K
1595 /*
1596 * Programming the Herc to split every write transaction
1597 * that does not start on an ADB to reduce disconnects.
1598 */
1599 if (nic->device_type == XFRAME_II_DEVICE) {
863c11a9
AR
1600 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1601 writeq(val64, &bar0->misc_control);
1602 val64 = readq(&bar0->pic_control2);
1603 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1604 writeq(val64, &bar0->pic_control2);
541ae68f 1605 }
c92ca04b
AR
1606 if (strstr(nic->product_name, "CX4")) {
1607 val64 = TMAC_AVG_IPG(0x17);
1608 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1609 }
1610
1da177e4
LT
1611 return SUCCESS;
1612}
a371a07d
K
1613#define LINK_UP_DOWN_INTERRUPT 1
1614#define MAC_RMAC_ERR_TIMER 2
1615
ac1f60db 1616static int s2io_link_fault_indication(nic_t *nic)
a371a07d 1617{
cc6e7c44
RA
1618 if (nic->intr_type != INTA)
1619 return MAC_RMAC_ERR_TIMER;
a371a07d
K
1620 if (nic->device_type == XFRAME_II_DEVICE)
1621 return LINK_UP_DOWN_INTERRUPT;
1622 else
1623 return MAC_RMAC_ERR_TIMER;
1624}
1da177e4 1625
20346722
K
1626/**
1627 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1628 * @nic: device private variable,
1629 * @mask: A mask indicating which Intr block must be modified and,
1630 * @flag: A flag indicating whether to enable or disable the Intrs.
1631 * Description: This function will either disable or enable the interrupts
20346722
K
1632 * depending on the flag argument. The mask argument can be used to
1633 * enable/disable any Intr block.
1da177e4
LT
1634 * Return Value: NONE.
1635 */
1636
1637static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1638{
1639 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1640 register u64 val64 = 0, temp64 = 0;
1641
1642 /* Top level interrupt classification */
1643 /* PIC Interrupts */
1644 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1645 /* Enable PIC Intrs in the general intr mask register */
1646 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1647 if (flag == ENABLE_INTRS) {
1648 temp64 = readq(&bar0->general_int_mask);
1649 temp64 &= ~((u64) val64);
1650 writeq(temp64, &bar0->general_int_mask);
20346722 1651 /*
a371a07d
K
1652 * If Hercules adapter enable GPIO otherwise
1653 * disabled all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
1654 * interrupts for now.
1655 * TODO
1da177e4 1656 */
a371a07d
K
1657 if (s2io_link_fault_indication(nic) ==
1658 LINK_UP_DOWN_INTERRUPT ) {
1659 temp64 = readq(&bar0->pic_int_mask);
1660 temp64 &= ~((u64) PIC_INT_GPIO);
1661 writeq(temp64, &bar0->pic_int_mask);
1662 temp64 = readq(&bar0->gpio_int_mask);
1663 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1664 writeq(temp64, &bar0->gpio_int_mask);
1665 } else {
1666 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1667 }
20346722 1668 /*
1da177e4
LT
1669 * No MSI Support is available presently, so TTI and
1670 * RTI interrupts are also disabled.
1671 */
1672 } else if (flag == DISABLE_INTRS) {
20346722
K
1673 /*
1674 * Disable PIC Intrs in the general
1675 * intr mask register
1da177e4
LT
1676 */
1677 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1678 temp64 = readq(&bar0->general_int_mask);
1679 val64 |= temp64;
1680 writeq(val64, &bar0->general_int_mask);
1681 }
1682 }
1683
1684 /* DMA Interrupts */
1685 /* Enabling/Disabling Tx DMA interrupts */
1686 if (mask & TX_DMA_INTR) {
1687 /* Enable TxDMA Intrs in the general intr mask register */
1688 val64 = TXDMA_INT_M;
1689 if (flag == ENABLE_INTRS) {
1690 temp64 = readq(&bar0->general_int_mask);
1691 temp64 &= ~((u64) val64);
1692 writeq(temp64, &bar0->general_int_mask);
20346722
K
1693 /*
1694 * Keep all interrupts other than PFC interrupt
1da177e4
LT
1695 * and PCC interrupt disabled in DMA level.
1696 */
1697 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1698 TXDMA_PCC_INT_M);
1699 writeq(val64, &bar0->txdma_int_mask);
20346722
K
1700 /*
1701 * Enable only the MISC error 1 interrupt in PFC block
1da177e4
LT
1702 */
1703 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1704 writeq(val64, &bar0->pfc_err_mask);
20346722
K
1705 /*
1706 * Enable only the FB_ECC error interrupt in PCC block
1da177e4
LT
1707 */
1708 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1709 writeq(val64, &bar0->pcc_err_mask);
1710 } else if (flag == DISABLE_INTRS) {
20346722
K
1711 /*
1712 * Disable TxDMA Intrs in the general intr mask
1713 * register
1da177e4
LT
1714 */
1715 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1716 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1717 temp64 = readq(&bar0->general_int_mask);
1718 val64 |= temp64;
1719 writeq(val64, &bar0->general_int_mask);
1720 }
1721 }
1722
1723 /* Enabling/Disabling Rx DMA interrupts */
1724 if (mask & RX_DMA_INTR) {
1725 /* Enable RxDMA Intrs in the general intr mask register */
1726 val64 = RXDMA_INT_M;
1727 if (flag == ENABLE_INTRS) {
1728 temp64 = readq(&bar0->general_int_mask);
1729 temp64 &= ~((u64) val64);
1730 writeq(temp64, &bar0->general_int_mask);
20346722
K
1731 /*
1732 * All RxDMA block interrupts are disabled for now
1733 * TODO
1da177e4
LT
1734 */
1735 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1736 } else if (flag == DISABLE_INTRS) {
20346722
K
1737 /*
1738 * Disable RxDMA Intrs in the general intr mask
1739 * register
1da177e4
LT
1740 */
1741 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1742 temp64 = readq(&bar0->general_int_mask);
1743 val64 |= temp64;
1744 writeq(val64, &bar0->general_int_mask);
1745 }
1746 }
1747
1748 /* MAC Interrupts */
1749 /* Enabling/Disabling MAC interrupts */
1750 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1751 val64 = TXMAC_INT_M | RXMAC_INT_M;
1752 if (flag == ENABLE_INTRS) {
1753 temp64 = readq(&bar0->general_int_mask);
1754 temp64 &= ~((u64) val64);
1755 writeq(temp64, &bar0->general_int_mask);
20346722
K
1756 /*
1757 * All MAC block error interrupts are disabled for now
1da177e4
LT
1758 * TODO
1759 */
1da177e4 1760 } else if (flag == DISABLE_INTRS) {
20346722
K
1761 /*
1762 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1763 */
1764 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1765 writeq(DISABLE_ALL_INTRS,
1766 &bar0->mac_rmac_err_mask);
1767
1768 temp64 = readq(&bar0->general_int_mask);
1769 val64 |= temp64;
1770 writeq(val64, &bar0->general_int_mask);
1771 }
1772 }
1773
1774 /* XGXS Interrupts */
1775 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1776 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1777 if (flag == ENABLE_INTRS) {
1778 temp64 = readq(&bar0->general_int_mask);
1779 temp64 &= ~((u64) val64);
1780 writeq(temp64, &bar0->general_int_mask);
20346722 1781 /*
1da177e4 1782 * All XGXS block error interrupts are disabled for now
20346722 1783 * TODO
1da177e4
LT
1784 */
1785 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1786 } else if (flag == DISABLE_INTRS) {
20346722
K
1787 /*
1788 * Disable MC Intrs in the general intr mask register
1da177e4
LT
1789 */
1790 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1791 temp64 = readq(&bar0->general_int_mask);
1792 val64 |= temp64;
1793 writeq(val64, &bar0->general_int_mask);
1794 }
1795 }
1796
1797 /* Memory Controller(MC) interrupts */
1798 if (mask & MC_INTR) {
1799 val64 = MC_INT_M;
1800 if (flag == ENABLE_INTRS) {
1801 temp64 = readq(&bar0->general_int_mask);
1802 temp64 &= ~((u64) val64);
1803 writeq(temp64, &bar0->general_int_mask);
20346722 1804 /*
5e25b9dd 1805 * Enable all MC Intrs.
1da177e4 1806 */
5e25b9dd
K
1807 writeq(0x0, &bar0->mc_int_mask);
1808 writeq(0x0, &bar0->mc_err_mask);
1da177e4
LT
1809 } else if (flag == DISABLE_INTRS) {
1810 /*
1811 * Disable MC Intrs in the general intr mask register
1812 */
1813 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1814 temp64 = readq(&bar0->general_int_mask);
1815 val64 |= temp64;
1816 writeq(val64, &bar0->general_int_mask);
1817 }
1818 }
1819
1820
1821 /* Tx traffic interrupts */
1822 if (mask & TX_TRAFFIC_INTR) {
1823 val64 = TXTRAFFIC_INT_M;
1824 if (flag == ENABLE_INTRS) {
1825 temp64 = readq(&bar0->general_int_mask);
1826 temp64 &= ~((u64) val64);
1827 writeq(temp64, &bar0->general_int_mask);
20346722 1828 /*
1da177e4 1829 * Enable all the Tx side interrupts
20346722 1830 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1831 */
1832 writeq(0x0, &bar0->tx_traffic_mask);
1833 } else if (flag == DISABLE_INTRS) {
20346722
K
1834 /*
1835 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1836 * register.
1837 */
1838 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1839 temp64 = readq(&bar0->general_int_mask);
1840 val64 |= temp64;
1841 writeq(val64, &bar0->general_int_mask);
1842 }
1843 }
1844
1845 /* Rx traffic interrupts */
1846 if (mask & RX_TRAFFIC_INTR) {
1847 val64 = RXTRAFFIC_INT_M;
1848 if (flag == ENABLE_INTRS) {
1849 temp64 = readq(&bar0->general_int_mask);
1850 temp64 &= ~((u64) val64);
1851 writeq(temp64, &bar0->general_int_mask);
1852 /* writing 0 Enables all 8 RX interrupt levels */
1853 writeq(0x0, &bar0->rx_traffic_mask);
1854 } else if (flag == DISABLE_INTRS) {
20346722
K
1855 /*
1856 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1857 * register.
1858 */
1859 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1860 temp64 = readq(&bar0->general_int_mask);
1861 val64 |= temp64;
1862 writeq(val64, &bar0->general_int_mask);
1863 }
1864 }
1865}
1866
541ae68f 1867static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
20346722
K
1868{
1869 int ret = 0;
1870
1871 if (flag == FALSE) {
541ae68f 1872 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1873 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1874 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1875 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1876 ret = 1;
1877 }
541ae68f 1878 }else {
5e25b9dd
K
1879 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1880 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1881 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1882 ret = 1;
1883 }
20346722
K
1884 }
1885 } else {
541ae68f 1886 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1887 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1888 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1889 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1890 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1891 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1892 ret = 1;
1893 }
1894 } else {
1895 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1896 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1897 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1898 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1899 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1900 ret = 1;
1901 }
20346722
K
1902 }
1903 }
1904
1905 return ret;
1906}
1907/**
1908 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4
LT
1909 * @val64 : Value read from adapter status register.
1910 * @flag : indicates if the adapter enable bit was ever written once
1911 * before.
1912 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1913 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1914 * differs and the calling function passes the input argument flag to
1915 * indicate this.
20346722 1916 * Return: 1 If xena is quiescence
1da177e4
LT
1917 * 0 If Xena is not quiescence
1918 */
1919
20346722 1920static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1da177e4 1921{
541ae68f 1922 int ret = 0, herc;
1da177e4 1923 u64 tmp64 = ~((u64) val64);
5e25b9dd 1924 int rev_id = get_xena_rev_id(sp->pdev);
1da177e4 1925
541ae68f 1926 herc = (sp->device_type == XFRAME_II_DEVICE);
1da177e4
LT
1927 if (!
1928 (tmp64 &
1929 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1930 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1931 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1932 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1933 ADAPTER_STATUS_P_PLL_LOCK))) {
541ae68f 1934 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1da177e4
LT
1935 }
1936
1937 return ret;
1938}
1939
1940/**
1941 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1942 * @sp: Pointer to device specifc structure
20346722 1943 * Description :
1da177e4
LT
1944 * New procedure to clear mac address reading problems on Alpha platforms
1945 *
1946 */
1947
ac1f60db 1948static void fix_mac_address(nic_t * sp)
1da177e4
LT
1949{
1950 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1951 u64 val64;
1952 int i = 0;
1953
1954 while (fix_mac[i] != END_SIGN) {
1955 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1956 udelay(10);
1da177e4
LT
1957 val64 = readq(&bar0->gpio_control);
1958 }
1959}
1960
1961/**
20346722 1962 * start_nic - Turns the device on
1da177e4 1963 * @nic : device private variable.
20346722
K
1964 * Description:
1965 * This function actually turns the device on. Before this function is
1966 * called,all Registers are configured from their reset states
1967 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1968 * calling this function, the device interrupts are cleared and the NIC is
1969 * literally switched on by writing into the adapter control register.
20346722 1970 * Return Value:
1da177e4
LT
1971 * SUCCESS on success and -1 on failure.
1972 */
1973
1974static int start_nic(struct s2io_nic *nic)
1975{
1976 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1977 struct net_device *dev = nic->dev;
1978 register u64 val64 = 0;
20346722
K
1979 u16 interruptible;
1980 u16 subid, i;
1da177e4
LT
1981 mac_info_t *mac_control;
1982 struct config_param *config;
1983
1984 mac_control = &nic->mac_control;
1985 config = &nic->config;
1986
1987 /* PRC Initialization and configuration */
1988 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1989 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1990 &bar0->prc_rxd0_n[i]);
1991
1992 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982
K
1993 if (nic->config.bimodal)
1994 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
da6971d8
AR
1995 if (nic->rxd_mode == RXD_MODE_1)
1996 val64 |= PRC_CTRL_RC_ENABLED;
1997 else
1998 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
1999 if (nic->device_type == XFRAME_II_DEVICE)
2000 val64 |= PRC_CTRL_GROUP_READS;
2001 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2002 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2003 writeq(val64, &bar0->prc_ctrl_n[i]);
2004 }
2005
da6971d8
AR
2006 if (nic->rxd_mode == RXD_MODE_3B) {
2007 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2008 val64 = readq(&bar0->rx_pa_cfg);
2009 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2010 writeq(val64, &bar0->rx_pa_cfg);
2011 }
1da177e4 2012
20346722 2013 /*
1da177e4
LT
2014 * Enabling MC-RLDRAM. After enabling the device, we timeout
2015 * for around 100ms, which is approximately the time required
2016 * for the device to be ready for operation.
2017 */
2018 val64 = readq(&bar0->mc_rldram_mrs);
2019 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2020 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2021 val64 = readq(&bar0->mc_rldram_mrs);
2022
20346722 2023 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2024
2025 /* Enabling ECC Protection. */
2026 val64 = readq(&bar0->adapter_control);
2027 val64 &= ~ADAPTER_ECC_EN;
2028 writeq(val64, &bar0->adapter_control);
2029
20346722
K
2030 /*
2031 * Clearing any possible Link state change interrupts that
1da177e4
LT
2032 * could have popped up just before Enabling the card.
2033 */
2034 val64 = readq(&bar0->mac_rmac_err_reg);
2035 if (val64)
2036 writeq(val64, &bar0->mac_rmac_err_reg);
2037
20346722
K
2038 /*
2039 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2040 * it.
2041 */
2042 val64 = readq(&bar0->adapter_status);
20346722 2043 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
2044 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2045 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2046 (unsigned long long) val64);
2047 return FAILURE;
2048 }
2049
2050 /* Enable select interrupts */
cc6e7c44
RA
2051 if (nic->intr_type != INTA)
2052 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2053 else {
2054 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2055 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2056 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2057 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
2058 }
1da177e4 2059
20346722 2060 /*
1da177e4 2061 * With some switches, link might be already up at this point.
20346722
K
2062 * Because of this weird behavior, when we enable laser,
2063 * we may not get link. We need to handle this. We cannot
2064 * figure out which switch is misbehaving. So we are forced to
2065 * make a global change.
1da177e4
LT
2066 */
2067
2068 /* Enabling Laser. */
2069 val64 = readq(&bar0->adapter_control);
2070 val64 |= ADAPTER_EOI_TX_ON;
2071 writeq(val64, &bar0->adapter_control);
2072
c92ca04b
AR
2073 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2074 /*
2075 * Dont see link state interrupts initally on some switches,
2076 * so directly scheduling the link state task here.
2077 */
2078 schedule_work(&nic->set_link_task);
2079 }
1da177e4
LT
2080 /* SXE-002: Initialize link and activity LED */
2081 subid = nic->pdev->subsystem_device;
541ae68f
K
2082 if (((subid & 0xFF) >= 0x07) &&
2083 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2084 val64 = readq(&bar0->gpio_control);
2085 val64 |= 0x0000800000000000ULL;
2086 writeq(val64, &bar0->gpio_control);
2087 val64 = 0x0411040400000000ULL;
509a2671 2088 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2089 }
2090
1da177e4
LT
2091 return SUCCESS;
2092}
fed5eccd
AR
2093/**
2094 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2095 */
2096static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2097{
2098 nic_t *nic = fifo_data->nic;
2099 struct sk_buff *skb;
2100 TxD_t *txds;
2101 u16 j, frg_cnt;
2102
2103 txds = txdlp;
26b7625c 2104 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
fed5eccd
AR
2105 pci_unmap_single(nic->pdev, (dma_addr_t)
2106 txds->Buffer_Pointer, sizeof(u64),
2107 PCI_DMA_TODEVICE);
2108 txds++;
2109 }
2110
2111 skb = (struct sk_buff *) ((unsigned long)
2112 txds->Host_Control);
2113 if (!skb) {
2114 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2115 return NULL;
2116 }
2117 pci_unmap_single(nic->pdev, (dma_addr_t)
2118 txds->Buffer_Pointer,
2119 skb->len - skb->data_len,
2120 PCI_DMA_TODEVICE);
2121 frg_cnt = skb_shinfo(skb)->nr_frags;
2122 if (frg_cnt) {
2123 txds++;
2124 for (j = 0; j < frg_cnt; j++, txds++) {
2125 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2126 if (!txds->Buffer_Pointer)
2127 break;
2128 pci_unmap_page(nic->pdev, (dma_addr_t)
2129 txds->Buffer_Pointer,
2130 frag->size, PCI_DMA_TODEVICE);
2131 }
2132 }
2133 txdlp->Host_Control = 0;
2134 return(skb);
2135}
1da177e4 2136
20346722
K
2137/**
2138 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2139 * @nic : device private variable.
20346722 2140 * Description:
1da177e4 2141 * Free all queued Tx buffers.
20346722 2142 * Return Value: void
1da177e4
LT
2143*/
2144
2145static void free_tx_buffers(struct s2io_nic *nic)
2146{
2147 struct net_device *dev = nic->dev;
2148 struct sk_buff *skb;
2149 TxD_t *txdp;
2150 int i, j;
2151 mac_info_t *mac_control;
2152 struct config_param *config;
fed5eccd 2153 int cnt = 0;
1da177e4
LT
2154
2155 mac_control = &nic->mac_control;
2156 config = &nic->config;
2157
2158 for (i = 0; i < config->tx_fifo_num; i++) {
2159 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
20346722 2160 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1da177e4 2161 list_virt_addr;
fed5eccd
AR
2162 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2163 if (skb) {
2164 dev_kfree_skb(skb);
2165 cnt++;
1da177e4 2166 }
1da177e4
LT
2167 }
2168 DBG_PRINT(INTR_DBG,
2169 "%s:forcibly freeing %d skbs on FIFO%d\n",
2170 dev->name, cnt, i);
20346722
K
2171 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2172 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2173 }
2174}
2175
20346722
K
2176/**
2177 * stop_nic - To stop the nic
1da177e4 2178 * @nic ; device private variable.
20346722
K
2179 * Description:
2180 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2181 * function does. This function is called to stop the device.
2182 * Return Value:
2183 * void.
2184 */
2185
2186static void stop_nic(struct s2io_nic *nic)
2187{
2188 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2189 register u64 val64 = 0;
5d3213cc 2190 u16 interruptible;
1da177e4
LT
2191 mac_info_t *mac_control;
2192 struct config_param *config;
2193
2194 mac_control = &nic->mac_control;
2195 config = &nic->config;
2196
2197 /* Disable all interrupts */
e960fc5c 2198 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d
K
2199 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2200 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2201 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2202
5d3213cc
AR
2203 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2204 val64 = readq(&bar0->adapter_control);
2205 val64 &= ~(ADAPTER_CNTL_EN);
2206 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2207}
2208
26df54bf 2209static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
da6971d8
AR
2210{
2211 struct net_device *dev = nic->dev;
2212 struct sk_buff *frag_list;
50eb8006 2213 void *tmp;
da6971d8
AR
2214
2215 /* Buffer-1 receives L3/L4 headers */
2216 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2217 (nic->pdev, skb->data, l3l4hdr_size + 4,
2218 PCI_DMA_FROMDEVICE);
2219
2220 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2221 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2222 if (skb_shinfo(skb)->frag_list == NULL) {
2223 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2224 return -ENOMEM ;
2225 }
2226 frag_list = skb_shinfo(skb)->frag_list;
2227 frag_list->next = NULL;
50eb8006
JG
2228 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2229 frag_list->data = tmp;
2230 frag_list->tail = tmp;
da6971d8
AR
2231
2232 /* Buffer-2 receives L4 data payload */
2233 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2234 frag_list->data, dev->mtu,
2235 PCI_DMA_FROMDEVICE);
2236 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2237 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2238
2239 return SUCCESS;
2240}
2241
20346722
K
2242/**
2243 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2244 * @nic: device private variable
20346722
K
2245 * @ring_no: ring number
2246 * Description:
1da177e4
LT
2247 * The function allocates Rx side skbs and puts the physical
2248 * address of these buffers into the RxD buffer pointers, so that the NIC
2249 * can DMA the received frame into these locations.
2250 * The NIC supports 3 receive modes, viz
2251 * 1. single buffer,
2252 * 2. three buffer and
2253 * 3. Five buffer modes.
20346722
K
2254 * Each mode defines how many fragments the received frame will be split
2255 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2256 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2257 * is split into 3 fragments. As of now only single buffer mode is
2258 * supported.
2259 * Return Value:
2260 * SUCCESS on success or an appropriate -ve value on failure.
2261 */
2262
ac1f60db 2263static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2264{
2265 struct net_device *dev = nic->dev;
2266 struct sk_buff *skb;
2267 RxD_t *rxdp;
2268 int off, off1, size, block_no, block_no1;
1da177e4 2269 u32 alloc_tab = 0;
20346722 2270 u32 alloc_cnt;
1da177e4
LT
2271 mac_info_t *mac_control;
2272 struct config_param *config;
20346722 2273 u64 tmp;
1da177e4 2274 buffAdd_t *ba;
1da177e4
LT
2275#ifndef CONFIG_S2IO_NAPI
2276 unsigned long flags;
2277#endif
303bcb4b 2278 RxD_t *first_rxdp = NULL;
1da177e4
LT
2279
2280 mac_control = &nic->mac_control;
2281 config = &nic->config;
20346722
K
2282 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2283 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4 2284
5d3213cc 2285 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
863c11a9 2286 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4 2287 while (alloc_tab < alloc_cnt) {
20346722 2288 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2289 block_index;
20346722 2290 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1da177e4 2291
da6971d8
AR
2292 rxdp = mac_control->rings[ring_no].
2293 rx_blocks[block_no].rxds[off].virt_addr;
2294
2295 if ((block_no == block_no1) && (off == off1) &&
2296 (rxdp->Host_Control)) {
2297 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2298 dev->name);
1da177e4
LT
2299 DBG_PRINT(INTR_DBG, " info equated\n");
2300 goto end;
2301 }
da6971d8 2302 if (off && (off == rxd_count[nic->rxd_mode])) {
20346722 2303 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2304 block_index++;
da6971d8
AR
2305 if (mac_control->rings[ring_no].rx_curr_put_info.
2306 block_index == mac_control->rings[ring_no].
2307 block_count)
2308 mac_control->rings[ring_no].rx_curr_put_info.
2309 block_index = 0;
2310 block_no = mac_control->rings[ring_no].
2311 rx_curr_put_info.block_index;
2312 if (off == rxd_count[nic->rxd_mode])
2313 off = 0;
20346722 2314 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8
AR
2315 offset = off;
2316 rxdp = mac_control->rings[ring_no].
2317 rx_blocks[block_no].block_virt_addr;
1da177e4
LT
2318 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2319 dev->name, rxdp);
2320 }
2321#ifndef CONFIG_S2IO_NAPI
2322 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2323 mac_control->rings[ring_no].put_pos =
da6971d8 2324 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
1da177e4
LT
2325 spin_unlock_irqrestore(&nic->put_lock, flags);
2326#endif
da6971d8
AR
2327 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2328 ((nic->rxd_mode >= RXD_MODE_3A) &&
2329 (rxdp->Control_2 & BIT(0)))) {
20346722 2330 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8 2331 offset = off;
1da177e4
LT
2332 goto end;
2333 }
da6971d8
AR
2334 /* calculate size of skb based on ring mode */
2335 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2336 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2337 if (nic->rxd_mode == RXD_MODE_1)
2338 size += NET_IP_ALIGN;
2339 else if (nic->rxd_mode == RXD_MODE_3B)
2340 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2341 else
2342 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2343
da6971d8
AR
2344 /* allocate skb */
2345 skb = dev_alloc_skb(size);
2346 if(!skb) {
1da177e4
LT
2347 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2348 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2349 if (first_rxdp) {
2350 wmb();
2351 first_rxdp->Control_1 |= RXD_OWN_XENA;
2352 }
da6971d8
AR
2353 return -ENOMEM ;
2354 }
2355 if (nic->rxd_mode == RXD_MODE_1) {
2356 /* 1 buffer mode - normal operation mode */
2357 memset(rxdp, 0, sizeof(RxD1_t));
2358 skb_reserve(skb, NET_IP_ALIGN);
2359 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
863c11a9
AR
2360 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2361 PCI_DMA_FROMDEVICE);
2362 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
da6971d8
AR
2363
2364 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2365 /*
2366 * 2 or 3 buffer mode -
2367 * Both 2 buffer mode and 3 buffer mode provides 128
2368 * byte aligned receive buffers.
2369 *
2370 * 3 buffer mode provides header separation where in
2371 * skb->data will have L3/L4 headers where as
2372 * skb_shinfo(skb)->frag_list will have the L4 data
2373 * payload
2374 */
2375
2376 memset(rxdp, 0, sizeof(RxD3_t));
2377 ba = &mac_control->rings[ring_no].ba[block_no][off];
2378 skb_reserve(skb, BUF0_LEN);
2379 tmp = (u64)(unsigned long) skb->data;
2380 tmp += ALIGN_SIZE;
2381 tmp &= ~ALIGN_SIZE;
2382 skb->data = (void *) (unsigned long)tmp;
2383 skb->tail = (void *) (unsigned long)tmp;
2384
2385 ((RxD3_t*)rxdp)->Buffer0_ptr =
2386 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2387 PCI_DMA_FROMDEVICE);
2388 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2389 if (nic->rxd_mode == RXD_MODE_3B) {
2390 /* Two buffer mode */
2391
2392 /*
2393 * Buffer2 will have L3/L4 header plus
2394 * L4 payload
2395 */
2396 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2397 (nic->pdev, skb->data, dev->mtu + 4,
2398 PCI_DMA_FROMDEVICE);
2399
2400 /* Buffer-1 will be dummy buffer not used */
2401 ((RxD3_t*)rxdp)->Buffer1_ptr =
2402 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2403 PCI_DMA_FROMDEVICE);
2404 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2405 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2406 (dev->mtu + 4);
2407 } else {
2408 /* 3 buffer mode */
2409 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2410 dev_kfree_skb_irq(skb);
2411 if (first_rxdp) {
2412 wmb();
2413 first_rxdp->Control_1 |=
2414 RXD_OWN_XENA;
2415 }
2416 return -ENOMEM ;
2417 }
2418 }
2419 rxdp->Control_2 |= BIT(0);
1da177e4 2420 }
1da177e4 2421 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b
K
2422 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2423 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2424 off++;
da6971d8
AR
2425 if (off == (rxd_count[nic->rxd_mode] + 1))
2426 off = 0;
20346722 2427 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
20346722 2428
da6971d8 2429 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2430 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2431 if (first_rxdp) {
2432 wmb();
2433 first_rxdp->Control_1 |= RXD_OWN_XENA;
2434 }
2435 first_rxdp = rxdp;
2436 }
1da177e4
LT
2437 atomic_inc(&nic->rx_bufs_left[ring_no]);
2438 alloc_tab++;
2439 }
2440
2441 end:
303bcb4b
K
2442 /* Transfer ownership of first descriptor to adapter just before
2443 * exiting. Before that, use memory barrier so that ownership
2444 * and other fields are seen by adapter correctly.
2445 */
2446 if (first_rxdp) {
2447 wmb();
2448 first_rxdp->Control_1 |= RXD_OWN_XENA;
2449 }
2450
1da177e4
LT
2451 return SUCCESS;
2452}
2453
da6971d8
AR
2454static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2455{
2456 struct net_device *dev = sp->dev;
2457 int j;
2458 struct sk_buff *skb;
2459 RxD_t *rxdp;
2460 mac_info_t *mac_control;
2461 buffAdd_t *ba;
2462
2463 mac_control = &sp->mac_control;
2464 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2465 rxdp = mac_control->rings[ring_no].
2466 rx_blocks[blk].rxds[j].virt_addr;
2467 skb = (struct sk_buff *)
2468 ((unsigned long) rxdp->Host_Control);
2469 if (!skb) {
2470 continue;
2471 }
2472 if (sp->rxd_mode == RXD_MODE_1) {
2473 pci_unmap_single(sp->pdev, (dma_addr_t)
2474 ((RxD1_t*)rxdp)->Buffer0_ptr,
2475 dev->mtu +
2476 HEADER_ETHERNET_II_802_3_SIZE
2477 + HEADER_802_2_SIZE +
2478 HEADER_SNAP_SIZE,
2479 PCI_DMA_FROMDEVICE);
2480 memset(rxdp, 0, sizeof(RxD1_t));
2481 } else if(sp->rxd_mode == RXD_MODE_3B) {
2482 ba = &mac_control->rings[ring_no].
2483 ba[blk][j];
2484 pci_unmap_single(sp->pdev, (dma_addr_t)
2485 ((RxD3_t*)rxdp)->Buffer0_ptr,
2486 BUF0_LEN,
2487 PCI_DMA_FROMDEVICE);
2488 pci_unmap_single(sp->pdev, (dma_addr_t)
2489 ((RxD3_t*)rxdp)->Buffer1_ptr,
2490 BUF1_LEN,
2491 PCI_DMA_FROMDEVICE);
2492 pci_unmap_single(sp->pdev, (dma_addr_t)
2493 ((RxD3_t*)rxdp)->Buffer2_ptr,
2494 dev->mtu + 4,
2495 PCI_DMA_FROMDEVICE);
2496 memset(rxdp, 0, sizeof(RxD3_t));
2497 } else {
2498 pci_unmap_single(sp->pdev, (dma_addr_t)
2499 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2500 PCI_DMA_FROMDEVICE);
2501 pci_unmap_single(sp->pdev, (dma_addr_t)
2502 ((RxD3_t*)rxdp)->Buffer1_ptr,
2503 l3l4hdr_size + 4,
2504 PCI_DMA_FROMDEVICE);
2505 pci_unmap_single(sp->pdev, (dma_addr_t)
2506 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2507 PCI_DMA_FROMDEVICE);
2508 memset(rxdp, 0, sizeof(RxD3_t));
2509 }
2510 dev_kfree_skb(skb);
2511 atomic_dec(&sp->rx_bufs_left[ring_no]);
2512 }
2513}
2514
1da177e4 2515/**
20346722 2516 * free_rx_buffers - Frees all Rx buffers
1da177e4 2517 * @sp: device private variable.
20346722 2518 * Description:
1da177e4
LT
2519 * This function will free all Rx buffers allocated by host.
2520 * Return Value:
2521 * NONE.
2522 */
2523
2524static void free_rx_buffers(struct s2io_nic *sp)
2525{
2526 struct net_device *dev = sp->dev;
da6971d8 2527 int i, blk = 0, buf_cnt = 0;
1da177e4
LT
2528 mac_info_t *mac_control;
2529 struct config_param *config;
1da177e4
LT
2530
2531 mac_control = &sp->mac_control;
2532 config = &sp->config;
2533
2534 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2535 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2536 free_rxd_blk(sp,i,blk);
1da177e4 2537
20346722
K
2538 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2539 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2540 mac_control->rings[i].rx_curr_put_info.offset = 0;
2541 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2542 atomic_set(&sp->rx_bufs_left[i], 0);
2543 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2544 dev->name, buf_cnt, i);
2545 }
2546}
2547
2548/**
2549 * s2io_poll - Rx interrupt handler for NAPI support
2550 * @dev : pointer to the device structure.
20346722 2551 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2552 * during one pass through the 'Poll" function.
2553 * Description:
2554 * Comes into picture only if NAPI support has been incorporated. It does
2555 * the same thing that rx_intr_handler does, but not in a interrupt context
2556 * also It will process only a given number of packets.
2557 * Return value:
2558 * 0 on success and 1 if there are No Rx packets to be processed.
2559 */
2560
20346722 2561#if defined(CONFIG_S2IO_NAPI)
1da177e4
LT
2562static int s2io_poll(struct net_device *dev, int *budget)
2563{
2564 nic_t *nic = dev->priv;
20346722 2565 int pkt_cnt = 0, org_pkts_to_process;
1da177e4
LT
2566 mac_info_t *mac_control;
2567 struct config_param *config;
509a2671 2568 XENA_dev_config_t __iomem *bar0 = nic->bar0;
863c11a9 2569 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
20346722 2570 int i;
1da177e4 2571
7ba013ac 2572 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2573 mac_control = &nic->mac_control;
2574 config = &nic->config;
2575
20346722
K
2576 nic->pkts_to_process = *budget;
2577 if (nic->pkts_to_process > dev->quota)
2578 nic->pkts_to_process = dev->quota;
2579 org_pkts_to_process = nic->pkts_to_process;
1da177e4 2580
1da177e4 2581 writeq(val64, &bar0->rx_traffic_int);
863c11a9 2582 val64 = readl(&bar0->rx_traffic_int);
1da177e4
LT
2583
2584 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2585 rx_intr_handler(&mac_control->rings[i]);
2586 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2587 if (!nic->pkts_to_process) {
2588 /* Quota for the current iteration has been met */
2589 goto no_rx;
1da177e4 2590 }
1da177e4
LT
2591 }
2592 if (!pkt_cnt)
2593 pkt_cnt = 1;
2594
2595 dev->quota -= pkt_cnt;
2596 *budget -= pkt_cnt;
2597 netif_rx_complete(dev);
2598
2599 for (i = 0; i < config->rx_ring_num; i++) {
2600 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2601 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2602 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2603 break;
2604 }
2605 }
2606 /* Re enable the Rx interrupts. */
c92ca04b
AR
2607 writeq(0x0, &bar0->rx_traffic_mask);
2608 val64 = readl(&bar0->rx_traffic_mask);
7ba013ac 2609 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2610 return 0;
2611
20346722 2612no_rx:
1da177e4
LT
2613 dev->quota -= pkt_cnt;
2614 *budget -= pkt_cnt;
2615
2616 for (i = 0; i < config->rx_ring_num; i++) {
2617 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2618 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2619 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2620 break;
2621 }
2622 }
7ba013ac 2623 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2624 return 1;
2625}
20346722
K
2626#endif
2627
612eff0e
BH
2628/**
2629 * s2io_netpoll - Rx interrupt service handler for netpoll support
2630 * @dev : pointer to the device structure.
2631 * Description:
2632 * Polling 'interrupt' - used by things like netconsole to send skbs
2633 * without having to re-enable interrupts. It's not called while
2634 * the interrupt routine is executing.
2635 */
2636
2637#ifdef CONFIG_NET_POLL_CONTROLLER
2638static void s2io_netpoll(struct net_device *dev)
2639{
2640 nic_t *nic = dev->priv;
2641 mac_info_t *mac_control;
2642 struct config_param *config;
2643 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2644 u64 val64;
2645 int i;
2646
2647 disable_irq(dev->irq);
2648
2649 atomic_inc(&nic->isr_cnt);
2650 mac_control = &nic->mac_control;
2651 config = &nic->config;
2652
2653 val64 = readq(&bar0->rx_traffic_int);
2654 writeq(val64, &bar0->rx_traffic_int);
2655
2656 for (i = 0; i < config->rx_ring_num; i++)
2657 rx_intr_handler(&mac_control->rings[i]);
2658
2659 for (i = 0; i < config->rx_ring_num; i++) {
2660 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2661 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2662 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2663 break;
2664 }
2665 }
2666 atomic_dec(&nic->isr_cnt);
2667 enable_irq(dev->irq);
2668 return;
2669}
2670#endif
2671
20346722 2672/**
1da177e4
LT
2673 * rx_intr_handler - Rx interrupt handler
2674 * @nic: device private variable.
20346722
K
2675 * Description:
2676 * If the interrupt is because of a received frame or if the
1da177e4 2677 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2678 * called. It picks out the RxD at which place the last Rx processing had
2679 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2680 * the offset.
2681 * Return Value:
2682 * NONE.
2683 */
20346722 2684static void rx_intr_handler(ring_info_t *ring_data)
1da177e4 2685{
20346722 2686 nic_t *nic = ring_data->nic;
1da177e4 2687 struct net_device *dev = (struct net_device *) nic->dev;
da6971d8 2688 int get_block, put_block, put_offset;
1da177e4
LT
2689 rx_curr_get_info_t get_info, put_info;
2690 RxD_t *rxdp;
2691 struct sk_buff *skb;
20346722
K
2692#ifndef CONFIG_S2IO_NAPI
2693 int pkt_cnt = 0;
1da177e4 2694#endif
7d3d0439
RA
2695 int i;
2696
7ba013ac
K
2697 spin_lock(&nic->rx_lock);
2698 if (atomic_read(&nic->card_state) == CARD_DOWN) {
776bd20f 2699 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
7ba013ac
K
2700 __FUNCTION__, dev->name);
2701 spin_unlock(&nic->rx_lock);
776bd20f 2702 return;
7ba013ac
K
2703 }
2704
20346722
K
2705 get_info = ring_data->rx_curr_get_info;
2706 get_block = get_info.block_index;
2707 put_info = ring_data->rx_curr_put_info;
2708 put_block = put_info.block_index;
da6971d8 2709 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
20346722
K
2710#ifndef CONFIG_S2IO_NAPI
2711 spin_lock(&nic->put_lock);
2712 put_offset = ring_data->put_pos;
2713 spin_unlock(&nic->put_lock);
2714#else
da6971d8 2715 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
20346722
K
2716 put_info.offset;
2717#endif
da6971d8
AR
2718 while (RXD_IS_UP2DT(rxdp)) {
2719 /* If your are next to put index then it's FIFO full condition */
2720 if ((get_block == put_block) &&
2721 (get_info.offset + 1) == put_info.offset) {
2722 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2723 break;
2724 }
20346722
K
2725 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2726 if (skb == NULL) {
2727 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2728 dev->name);
2729 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2730 spin_unlock(&nic->rx_lock);
20346722 2731 return;
1da177e4 2732 }
da6971d8
AR
2733 if (nic->rxd_mode == RXD_MODE_1) {
2734 pci_unmap_single(nic->pdev, (dma_addr_t)
2735 ((RxD1_t*)rxdp)->Buffer0_ptr,
20346722
K
2736 dev->mtu +
2737 HEADER_ETHERNET_II_802_3_SIZE +
2738 HEADER_802_2_SIZE +
2739 HEADER_SNAP_SIZE,
2740 PCI_DMA_FROMDEVICE);
da6971d8
AR
2741 } else if (nic->rxd_mode == RXD_MODE_3B) {
2742 pci_unmap_single(nic->pdev, (dma_addr_t)
2743 ((RxD3_t*)rxdp)->Buffer0_ptr,
20346722 2744 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8
AR
2745 pci_unmap_single(nic->pdev, (dma_addr_t)
2746 ((RxD3_t*)rxdp)->Buffer1_ptr,
20346722 2747 BUF1_LEN, PCI_DMA_FROMDEVICE);
da6971d8
AR
2748 pci_unmap_single(nic->pdev, (dma_addr_t)
2749 ((RxD3_t*)rxdp)->Buffer2_ptr,
2750 dev->mtu + 4,
20346722 2751 PCI_DMA_FROMDEVICE);
da6971d8
AR
2752 } else {
2753 pci_unmap_single(nic->pdev, (dma_addr_t)
2754 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2755 PCI_DMA_FROMDEVICE);
2756 pci_unmap_single(nic->pdev, (dma_addr_t)
2757 ((RxD3_t*)rxdp)->Buffer1_ptr,
2758 l3l4hdr_size + 4,
2759 PCI_DMA_FROMDEVICE);
2760 pci_unmap_single(nic->pdev, (dma_addr_t)
2761 ((RxD3_t*)rxdp)->Buffer2_ptr,
2762 dev->mtu, PCI_DMA_FROMDEVICE);
2763 }
863c11a9 2764 prefetch(skb->data);
20346722
K
2765 rx_osm_handler(ring_data, rxdp);
2766 get_info.offset++;
da6971d8
AR
2767 ring_data->rx_curr_get_info.offset = get_info.offset;
2768 rxdp = ring_data->rx_blocks[get_block].
2769 rxds[get_info.offset].virt_addr;
2770 if (get_info.offset == rxd_count[nic->rxd_mode]) {
20346722 2771 get_info.offset = 0;
da6971d8 2772 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2773 get_block++;
da6971d8
AR
2774 if (get_block == ring_data->block_count)
2775 get_block = 0;
2776 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
2777 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2778 }
1da177e4 2779
20346722
K
2780#ifdef CONFIG_S2IO_NAPI
2781 nic->pkts_to_process -= 1;
2782 if (!nic->pkts_to_process)
2783 break;
2784#else
2785 pkt_cnt++;
1da177e4
LT
2786 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2787 break;
20346722 2788#endif
1da177e4 2789 }
7d3d0439
RA
2790 if (nic->lro) {
2791 /* Clear all LRO sessions before exiting */
2792 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2793 lro_t *lro = &nic->lro0_n[i];
2794 if (lro->in_use) {
2795 update_L3L4_header(nic, lro);
2796 queue_rx_frame(lro->parent);
2797 clear_lro_session(lro);
2798 }
2799 }
2800 }
2801
7ba013ac 2802 spin_unlock(&nic->rx_lock);
1da177e4 2803}
20346722
K
2804
2805/**
1da177e4
LT
2806 * tx_intr_handler - Transmit interrupt handler
2807 * @nic : device private variable
20346722
K
2808 * Description:
2809 * If an interrupt was raised to indicate DMA complete of the
2810 * Tx packet, this function is called. It identifies the last TxD
2811 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2812 * DMA'ed into the NICs internal memory.
2813 * Return Value:
2814 * NONE
2815 */
2816
20346722 2817static void tx_intr_handler(fifo_info_t *fifo_data)
1da177e4 2818{
20346722 2819 nic_t *nic = fifo_data->nic;
1da177e4
LT
2820 struct net_device *dev = (struct net_device *) nic->dev;
2821 tx_curr_get_info_t get_info, put_info;
2822 struct sk_buff *skb;
2823 TxD_t *txdlp;
1da177e4 2824
20346722
K
2825 get_info = fifo_data->tx_curr_get_info;
2826 put_info = fifo_data->tx_curr_put_info;
2827 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2828 list_virt_addr;
2829 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2830 (get_info.offset != put_info.offset) &&
2831 (txdlp->Host_Control)) {
2832 /* Check for TxD errors */
2833 if (txdlp->Control_1 & TXD_T_CODE) {
2834 unsigned long long err;
2835 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
2836 if (err & 0x1) {
2837 nic->mac_control.stats_info->sw_stat.
2838 parity_err_cnt++;
2839 }
776bd20f 2840 if ((err >> 48) == 0xA) {
2841 DBG_PRINT(TX_DBG, "TxD returned due \
cc6e7c44 2842to loss of link\n");
776bd20f 2843 }
2844 else {
2845 DBG_PRINT(ERR_DBG, "***TxD error \
cc6e7c44 2846%llx\n", err);
776bd20f 2847 }
20346722 2848 }
1da177e4 2849
fed5eccd 2850 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722
K
2851 if (skb == NULL) {
2852 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2853 __FUNCTION__);
2854 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2855 return;
2856 }
2857
20346722 2858 /* Updating the statistics block */
20346722
K
2859 nic->stats.tx_bytes += skb->len;
2860 dev_kfree_skb_irq(skb);
2861
2862 get_info.offset++;
863c11a9
AR
2863 if (get_info.offset == get_info.fifo_len + 1)
2864 get_info.offset = 0;
20346722
K
2865 txdlp = (TxD_t *) fifo_data->list_info
2866 [get_info.offset].list_virt_addr;
2867 fifo_data->tx_curr_get_info.offset =
2868 get_info.offset;
1da177e4
LT
2869 }
2870
2871 spin_lock(&nic->tx_lock);
2872 if (netif_queue_stopped(dev))
2873 netif_wake_queue(dev);
2874 spin_unlock(&nic->tx_lock);
2875}
2876
bd1034f0
AR
2877/**
2878 * s2io_mdio_write - Function to write in to MDIO registers
2879 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2880 * @addr : address value
2881 * @value : data value
2882 * @dev : pointer to net_device structure
2883 * Description:
2884 * This function is used to write values to the MDIO registers
2885 * NONE
2886 */
2887static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2888{
2889 u64 val64 = 0x0;
2890 nic_t *sp = dev->priv;
2891 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2892
2893 //address transaction
2894 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2895 | MDIO_MMD_DEV_ADDR(mmd_type)
2896 | MDIO_MMS_PRT_ADDR(0x0);
2897 writeq(val64, &bar0->mdio_control);
2898 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2899 writeq(val64, &bar0->mdio_control);
2900 udelay(100);
2901
2902 //Data transaction
2903 val64 = 0x0;
2904 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2905 | MDIO_MMD_DEV_ADDR(mmd_type)
2906 | MDIO_MMS_PRT_ADDR(0x0)
2907 | MDIO_MDIO_DATA(value)
2908 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2909 writeq(val64, &bar0->mdio_control);
2910 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2911 writeq(val64, &bar0->mdio_control);
2912 udelay(100);
2913
2914 val64 = 0x0;
2915 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2916 | MDIO_MMD_DEV_ADDR(mmd_type)
2917 | MDIO_MMS_PRT_ADDR(0x0)
2918 | MDIO_OP(MDIO_OP_READ_TRANS);
2919 writeq(val64, &bar0->mdio_control);
2920 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2921 writeq(val64, &bar0->mdio_control);
2922 udelay(100);
2923
2924}
2925
2926/**
2927 * s2io_mdio_read - Function to write in to MDIO registers
2928 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2929 * @addr : address value
2930 * @dev : pointer to net_device structure
2931 * Description:
2932 * This function is used to read values to the MDIO registers
2933 * NONE
2934 */
2935static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2936{
2937 u64 val64 = 0x0;
2938 u64 rval64 = 0x0;
2939 nic_t *sp = dev->priv;
2940 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2941
2942 /* address transaction */
2943 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2944 | MDIO_MMD_DEV_ADDR(mmd_type)
2945 | MDIO_MMS_PRT_ADDR(0x0);
2946 writeq(val64, &bar0->mdio_control);
2947 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2948 writeq(val64, &bar0->mdio_control);
2949 udelay(100);
2950
2951 /* Data transaction */
2952 val64 = 0x0;
2953 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2954 | MDIO_MMD_DEV_ADDR(mmd_type)
2955 | MDIO_MMS_PRT_ADDR(0x0)
2956 | MDIO_OP(MDIO_OP_READ_TRANS);
2957 writeq(val64, &bar0->mdio_control);
2958 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2959 writeq(val64, &bar0->mdio_control);
2960 udelay(100);
2961
2962 /* Read the value from regs */
2963 rval64 = readq(&bar0->mdio_control);
2964 rval64 = rval64 & 0xFFFF0000;
2965 rval64 = rval64 >> 16;
2966 return rval64;
2967}
2968/**
2969 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2970 * @counter : couter value to be updated
2971 * @flag : flag to indicate the status
2972 * @type : counter type
2973 * Description:
2974 * This function is to check the status of the xpak counters value
2975 * NONE
2976 */
2977
2978static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2979{
2980 u64 mask = 0x3;
2981 u64 val64;
2982 int i;
2983 for(i = 0; i <index; i++)
2984 mask = mask << 0x2;
2985
2986 if(flag > 0)
2987 {
2988 *counter = *counter + 1;
2989 val64 = *regs_stat & mask;
2990 val64 = val64 >> (index * 0x2);
2991 val64 = val64 + 1;
2992 if(val64 == 3)
2993 {
2994 switch(type)
2995 {
2996 case 1:
2997 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2998 "service. Excessive temperatures may "
2999 "result in premature transceiver "
3000 "failure \n");
3001 break;
3002 case 2:
3003 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3004 "service Excessive bias currents may "
3005 "indicate imminent laser diode "
3006 "failure \n");
3007 break;
3008 case 3:
3009 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3010 "service Excessive laser output "
3011 "power may saturate far-end "
3012 "receiver\n");
3013 break;
3014 default:
3015 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3016 "type \n");
3017 }
3018 val64 = 0x0;
3019 }
3020 val64 = val64 << (index * 0x2);
3021 *regs_stat = (*regs_stat & (~mask)) | (val64);
3022
3023 } else {
3024 *regs_stat = *regs_stat & (~mask);
3025 }
3026}
3027
3028/**
3029 * s2io_updt_xpak_counter - Function to update the xpak counters
3030 * @dev : pointer to net_device struct
3031 * Description:
3032 * This function is to upate the status of the xpak counters value
3033 * NONE
3034 */
3035static void s2io_updt_xpak_counter(struct net_device *dev)
3036{
3037 u16 flag = 0x0;
3038 u16 type = 0x0;
3039 u16 val16 = 0x0;
3040 u64 val64 = 0x0;
3041 u64 addr = 0x0;
3042
3043 nic_t *sp = dev->priv;
3044 StatInfo_t *stat_info = sp->mac_control.stats_info;
3045
3046 /* Check the communication with the MDIO slave */
3047 addr = 0x0000;
3048 val64 = 0x0;
3049 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3050 if((val64 == 0xFFFF) || (val64 == 0x0000))
3051 {
3052 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3053 "Returned %llx\n", (unsigned long long)val64);
3054 return;
3055 }
3056
3057 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3058 if(val64 != 0x2040)
3059 {
3060 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3061 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3062 (unsigned long long)val64);
3063 return;
3064 }
3065
3066 /* Loading the DOM register to MDIO register */
3067 addr = 0xA100;
3068 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3069 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3070
3071 /* Reading the Alarm flags */
3072 addr = 0xA070;
3073 val64 = 0x0;
3074 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3075
3076 flag = CHECKBIT(val64, 0x7);
3077 type = 1;
3078 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3079 &stat_info->xpak_stat.xpak_regs_stat,
3080 0x0, flag, type);
3081
3082 if(CHECKBIT(val64, 0x6))
3083 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3084
3085 flag = CHECKBIT(val64, 0x3);
3086 type = 2;
3087 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3088 &stat_info->xpak_stat.xpak_regs_stat,
3089 0x2, flag, type);
3090
3091 if(CHECKBIT(val64, 0x2))
3092 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3093
3094 flag = CHECKBIT(val64, 0x1);
3095 type = 3;
3096 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3097 &stat_info->xpak_stat.xpak_regs_stat,
3098 0x4, flag, type);
3099
3100 if(CHECKBIT(val64, 0x0))
3101 stat_info->xpak_stat.alarm_laser_output_power_low++;
3102
3103 /* Reading the Warning flags */
3104 addr = 0xA074;
3105 val64 = 0x0;
3106 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3107
3108 if(CHECKBIT(val64, 0x7))
3109 stat_info->xpak_stat.warn_transceiver_temp_high++;
3110
3111 if(CHECKBIT(val64, 0x6))
3112 stat_info->xpak_stat.warn_transceiver_temp_low++;
3113
3114 if(CHECKBIT(val64, 0x3))
3115 stat_info->xpak_stat.warn_laser_bias_current_high++;
3116
3117 if(CHECKBIT(val64, 0x2))
3118 stat_info->xpak_stat.warn_laser_bias_current_low++;
3119
3120 if(CHECKBIT(val64, 0x1))
3121 stat_info->xpak_stat.warn_laser_output_power_high++;
3122
3123 if(CHECKBIT(val64, 0x0))
3124 stat_info->xpak_stat.warn_laser_output_power_low++;
3125}
3126
20346722 3127/**
1da177e4
LT
3128 * alarm_intr_handler - Alarm Interrrupt handler
3129 * @nic: device private variable
20346722 3130 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 3131 * complete, this function is called. If the interrupt was to indicate
20346722
K
3132 * a loss of link, the OSM link status handler is invoked for any other
3133 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
3134 * and a H/W reset is issued.
3135 * Return Value:
3136 * NONE
3137*/
3138
3139static void alarm_intr_handler(struct s2io_nic *nic)
3140{
3141 struct net_device *dev = (struct net_device *) nic->dev;
3142 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3143 register u64 val64 = 0, err_reg = 0;
bd1034f0
AR
3144 u64 cnt;
3145 int i;
3146 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3147 /* Handling the XPAK counters update */
3148 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3149 /* waiting for an hour */
3150 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3151 } else {
3152 s2io_updt_xpak_counter(dev);
3153 /* reset the count to zero */
3154 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3155 }
1da177e4
LT
3156
3157 /* Handling link status change error Intr */
a371a07d
K
3158 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3159 err_reg = readq(&bar0->mac_rmac_err_reg);
3160 writeq(err_reg, &bar0->mac_rmac_err_reg);
3161 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3162 schedule_work(&nic->set_link_task);
3163 }
1da177e4
LT
3164 }
3165
5e25b9dd
K
3166 /* Handling Ecc errors */
3167 val64 = readq(&bar0->mc_err_reg);
3168 writeq(val64, &bar0->mc_err_reg);
3169 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3170 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac
K
3171 nic->mac_control.stats_info->sw_stat.
3172 double_ecc_errs++;
776bd20f 3173 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
5e25b9dd 3174 dev->name);
776bd20f 3175 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
e960fc5c 3176 if (nic->device_type != XFRAME_II_DEVICE) {
776bd20f 3177 /* Reset XframeI only if critical error */
3178 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3179 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3180 netif_stop_queue(dev);
3181 schedule_work(&nic->rst_timer_task);
bd1034f0
AR
3182 nic->mac_control.stats_info->sw_stat.
3183 soft_reset_cnt++;
776bd20f 3184 }
e960fc5c 3185 }
5e25b9dd 3186 } else {
7ba013ac
K
3187 nic->mac_control.stats_info->sw_stat.
3188 single_ecc_errs++;
5e25b9dd
K
3189 }
3190 }
3191
1da177e4
LT
3192 /* In case of a serious error, the device will be Reset. */
3193 val64 = readq(&bar0->serr_source);
3194 if (val64 & SERR_SOURCE_ANY) {
bd1034f0 3195 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
1da177e4 3196 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
776bd20f 3197 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3198 (unsigned long long)val64);
1da177e4
LT
3199 netif_stop_queue(dev);
3200 schedule_work(&nic->rst_timer_task);
bd1034f0 3201 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
3202 }
3203
3204 /*
3205 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3206 * Error occurs, the adapter will be recycled by disabling the
20346722 3207 * adapter enable bit and enabling it again after the device
1da177e4
LT
3208 * becomes Quiescent.
3209 */
3210 val64 = readq(&bar0->pcc_err_reg);
3211 writeq(val64, &bar0->pcc_err_reg);
3212 if (val64 & PCC_FB_ECC_DB_ERR) {
3213 u64 ac = readq(&bar0->adapter_control);
3214 ac &= ~(ADAPTER_CNTL_EN);
3215 writeq(ac, &bar0->adapter_control);
3216 ac = readq(&bar0->adapter_control);
3217 schedule_work(&nic->set_link_task);
3218 }
bd1034f0
AR
3219 /* Check for data parity error */
3220 val64 = readq(&bar0->pic_int_status);
3221 if (val64 & PIC_INT_GPIO) {
3222 val64 = readq(&bar0->gpio_int_reg);
3223 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3224 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3225 schedule_work(&nic->rst_timer_task);
3226 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3227 }
3228 }
3229
3230 /* Check for ring full counter */
3231 if (nic->device_type & XFRAME_II_DEVICE) {
3232 val64 = readq(&bar0->ring_bump_counter1);
3233 for (i=0; i<4; i++) {
3234 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3235 cnt >>= 64 - ((i+1)*16);
3236 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3237 += cnt;
3238 }
3239
3240 val64 = readq(&bar0->ring_bump_counter2);
3241 for (i=0; i<4; i++) {
3242 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3243 cnt >>= 64 - ((i+1)*16);
3244 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3245 += cnt;
3246 }
3247 }
1da177e4
LT
3248
3249 /* Other type of interrupts are not being handled now, TODO */
3250}
3251
20346722 3252/**
1da177e4 3253 * wait_for_cmd_complete - waits for a command to complete.
20346722 3254 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3255 * s2io_nic structure.
20346722
K
3256 * Description: Function that waits for a command to Write into RMAC
3257 * ADDR DATA registers to be completed and returns either success or
3258 * error depending on whether the command was complete or not.
1da177e4
LT
3259 * Return value:
3260 * SUCCESS on success and FAILURE on failure.
3261 */
3262
c92ca04b 3263static int wait_for_cmd_complete(void *addr, u64 busy_bit)
1da177e4 3264{
1da177e4
LT
3265 int ret = FAILURE, cnt = 0;
3266 u64 val64;
3267
3268 while (TRUE) {
c92ca04b
AR
3269 val64 = readq(addr);
3270 if (!(val64 & busy_bit)) {
1da177e4
LT
3271 ret = SUCCESS;
3272 break;
3273 }
c92ca04b
AR
3274
3275 if(in_interrupt())
3276 mdelay(50);
3277 else
3278 msleep(50);
3279
1da177e4
LT
3280 if (cnt++ > 10)
3281 break;
3282 }
1da177e4
LT
3283 return ret;
3284}
3285
20346722
K
3286/**
3287 * s2io_reset - Resets the card.
1da177e4
LT
3288 * @sp : private member of the device structure.
3289 * Description: Function to Reset the card. This function then also
20346722 3290 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3291 * the card reset also resets the configuration space.
3292 * Return value:
3293 * void.
3294 */
3295
26df54bf 3296static void s2io_reset(nic_t * sp)
1da177e4
LT
3297{
3298 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3299 u64 val64;
5e25b9dd 3300 u16 subid, pci_cmd;
1da177e4 3301
0b1f7ebe 3302 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3303 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3304
1da177e4
LT
3305 val64 = SW_RESET_ALL;
3306 writeq(val64, &bar0->sw_reset);
3307
20346722
K
3308 /*
3309 * At this stage, if the PCI write is indeed completed, the
3310 * card is reset and so is the PCI Config space of the device.
3311 * So a read cannot be issued at this stage on any of the
1da177e4
LT
3312 * registers to ensure the write into "sw_reset" register
3313 * has gone through.
3314 * Question: Is there any system call that will explicitly force
3315 * all the write commands still pending on the bus to be pushed
3316 * through?
3317 * As of now I'am just giving a 250ms delay and hoping that the
3318 * PCI write to sw_reset register is done by this time.
3319 */
3320 msleep(250);
c92ca04b
AR
3321 if (strstr(sp->product_name, "CX4")) {
3322 msleep(750);
3323 }
1da177e4 3324
e960fc5c 3325 /* Restore the PCI state saved during initialization. */
3326 pci_restore_state(sp->pdev);
3327 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
0b1f7ebe 3328 pci_cmd);
1da177e4
LT
3329 s2io_init_pci(sp);
3330
3331 msleep(250);
3332
20346722
K
3333 /* Set swapper to enable I/O register access */
3334 s2io_set_swapper(sp);
3335
cc6e7c44
RA
3336 /* Restore the MSIX table entries from local variables */
3337 restore_xmsi_data(sp);
3338
5e25b9dd 3339 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b
K
3340 if (sp->device_type == XFRAME_II_DEVICE) {
3341 /* Clear parity err detect bit */
3342 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3343
303bcb4b
K
3344 /* Clearing PCIX Ecc status register */
3345 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3346
303bcb4b
K
3347 /* Clearing PCI_STATUS error reflected here */
3348 writeq(BIT(62), &bar0->txpic_int_reg);
3349 }
5e25b9dd 3350
20346722
K
3351 /* Reset device statistics maintained by OS */
3352 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3353
1da177e4
LT
3354 /* SXE-002: Configure link and activity LED to turn it off */
3355 subid = sp->pdev->subsystem_device;
541ae68f
K
3356 if (((subid & 0xFF) >= 0x07) &&
3357 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3358 val64 = readq(&bar0->gpio_control);
3359 val64 |= 0x0000800000000000ULL;
3360 writeq(val64, &bar0->gpio_control);
3361 val64 = 0x0411040400000000ULL;
509a2671 3362 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3363 }
3364
541ae68f
K
3365 /*
3366 * Clear spurious ECC interrupts that would have occured on
3367 * XFRAME II cards after reset.
3368 */
3369 if (sp->device_type == XFRAME_II_DEVICE) {
3370 val64 = readq(&bar0->pcc_err_reg);
3371 writeq(val64, &bar0->pcc_err_reg);
3372 }
3373
1da177e4
LT
3374 sp->device_enabled_once = FALSE;
3375}
3376
3377/**
20346722
K
3378 * s2io_set_swapper - to set the swapper controle on the card
3379 * @sp : private member of the device structure,
1da177e4 3380 * pointer to the s2io_nic structure.
20346722 3381 * Description: Function to set the swapper control on the card
1da177e4
LT
3382 * correctly depending on the 'endianness' of the system.
3383 * Return value:
3384 * SUCCESS on success and FAILURE on failure.
3385 */
3386
26df54bf 3387static int s2io_set_swapper(nic_t * sp)
1da177e4
LT
3388{
3389 struct net_device *dev = sp->dev;
3390 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3391 u64 val64, valt, valr;
3392
20346722 3393 /*
1da177e4
LT
3394 * Set proper endian settings and verify the same by reading
3395 * the PIF Feed-back register.
3396 */
3397
3398 val64 = readq(&bar0->pif_rd_swapper_fb);
3399 if (val64 != 0x0123456789ABCDEFULL) {
3400 int i = 0;
3401 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3402 0x8100008181000081ULL, /* FE=1, SE=0 */
3403 0x4200004242000042ULL, /* FE=0, SE=1 */
3404 0}; /* FE=0, SE=0 */
3405
3406 while(i<4) {
3407 writeq(value[i], &bar0->swapper_ctrl);
3408 val64 = readq(&bar0->pif_rd_swapper_fb);
3409 if (val64 == 0x0123456789ABCDEFULL)
3410 break;
3411 i++;
3412 }
3413 if (i == 4) {
3414 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3415 dev->name);
3416 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3417 (unsigned long long) val64);
3418 return FAILURE;
3419 }
3420 valr = value[i];
3421 } else {
3422 valr = readq(&bar0->swapper_ctrl);
3423 }
3424
3425 valt = 0x0123456789ABCDEFULL;
3426 writeq(valt, &bar0->xmsi_address);
3427 val64 = readq(&bar0->xmsi_address);
3428
3429 if(val64 != valt) {
3430 int i = 0;
3431 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3432 0x0081810000818100ULL, /* FE=1, SE=0 */
3433 0x0042420000424200ULL, /* FE=0, SE=1 */
3434 0}; /* FE=0, SE=0 */
3435
3436 while(i<4) {
3437 writeq((value[i] | valr), &bar0->swapper_ctrl);
3438 writeq(valt, &bar0->xmsi_address);
3439 val64 = readq(&bar0->xmsi_address);
3440 if(val64 == valt)
3441 break;
3442 i++;
3443 }
3444 if(i == 4) {
20346722 3445 unsigned long long x = val64;
1da177e4 3446 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3447 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3448 return FAILURE;
3449 }
3450 }
3451 val64 = readq(&bar0->swapper_ctrl);
3452 val64 &= 0xFFFF000000000000ULL;
3453
3454#ifdef __BIG_ENDIAN
20346722
K
3455 /*
3456 * The device by default set to a big endian format, so a
1da177e4
LT
3457 * big endian driver need not set anything.
3458 */
3459 val64 |= (SWAPPER_CTRL_TXP_FE |
3460 SWAPPER_CTRL_TXP_SE |
3461 SWAPPER_CTRL_TXD_R_FE |
3462 SWAPPER_CTRL_TXD_W_FE |
3463 SWAPPER_CTRL_TXF_R_FE |
3464 SWAPPER_CTRL_RXD_R_FE |
3465 SWAPPER_CTRL_RXD_W_FE |
3466 SWAPPER_CTRL_RXF_W_FE |
3467 SWAPPER_CTRL_XMSI_FE |
1da177e4 3468 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
92383340 3469 if (sp->intr_type == INTA)
cc6e7c44 3470 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3471 writeq(val64, &bar0->swapper_ctrl);
3472#else
20346722 3473 /*
1da177e4 3474 * Initially we enable all bits to make it accessible by the
20346722 3475 * driver, then we selectively enable only those bits that
1da177e4
LT
3476 * we want to set.
3477 */
3478 val64 |= (SWAPPER_CTRL_TXP_FE |
3479 SWAPPER_CTRL_TXP_SE |
3480 SWAPPER_CTRL_TXD_R_FE |
3481 SWAPPER_CTRL_TXD_R_SE |
3482 SWAPPER_CTRL_TXD_W_FE |
3483 SWAPPER_CTRL_TXD_W_SE |
3484 SWAPPER_CTRL_TXF_R_FE |
3485 SWAPPER_CTRL_RXD_R_FE |
3486 SWAPPER_CTRL_RXD_R_SE |
3487 SWAPPER_CTRL_RXD_W_FE |
3488 SWAPPER_CTRL_RXD_W_SE |
3489 SWAPPER_CTRL_RXF_W_FE |
3490 SWAPPER_CTRL_XMSI_FE |
1da177e4 3491 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
cc6e7c44
RA
3492 if (sp->intr_type == INTA)
3493 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3494 writeq(val64, &bar0->swapper_ctrl);
3495#endif
3496 val64 = readq(&bar0->swapper_ctrl);
3497
20346722
K
3498 /*
3499 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3500 * feedback register.
3501 */
3502 val64 = readq(&bar0->pif_rd_swapper_fb);
3503 if (val64 != 0x0123456789ABCDEFULL) {
3504 /* Endian settings are incorrect, calls for another dekko. */
3505 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3506 dev->name);
3507 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3508 (unsigned long long) val64);
3509 return FAILURE;
3510 }
3511
3512 return SUCCESS;
3513}
3514
ac1f60db 3515static int wait_for_msix_trans(nic_t *nic, int i)
cc6e7c44 3516{
37eb47ed 3517 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3518 u64 val64;
3519 int ret = 0, cnt = 0;
3520
3521 do {
3522 val64 = readq(&bar0->xmsi_access);
3523 if (!(val64 & BIT(15)))
3524 break;
3525 mdelay(1);
3526 cnt++;
3527 } while(cnt < 5);
3528 if (cnt == 5) {
3529 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3530 ret = 1;
3531 }
3532
3533 return ret;
3534}
3535
26df54bf 3536static void restore_xmsi_data(nic_t *nic)
cc6e7c44 3537{
37eb47ed 3538 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3539 u64 val64;
3540 int i;
3541
c92ca04b 3542 for (i=0; i< nic->avail_msix_vectors; i++) {
cc6e7c44
RA
3543 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3544 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3545 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3546 writeq(val64, &bar0->xmsi_access);
3547 if (wait_for_msix_trans(nic, i)) {
3548 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3549 continue;
3550 }
3551 }
3552}
3553
ac1f60db 3554static void store_xmsi_data(nic_t *nic)
cc6e7c44 3555{
37eb47ed 3556 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3557 u64 val64, addr, data;
3558 int i;
3559
3560 /* Store and display */
c92ca04b 3561 for (i=0; i< nic->avail_msix_vectors; i++) {
cc6e7c44
RA
3562 val64 = (BIT(15) | vBIT(i, 26, 6));
3563 writeq(val64, &bar0->xmsi_access);
3564 if (wait_for_msix_trans(nic, i)) {
3565 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3566 continue;
3567 }
3568 addr = readq(&bar0->xmsi_address);
3569 data = readq(&bar0->xmsi_data);
3570 if (addr && data) {
3571 nic->msix_info[i].addr = addr;
3572 nic->msix_info[i].data = data;
3573 }
3574 }
3575}
3576
3577int s2io_enable_msi(nic_t *nic)
3578{
37eb47ed 3579 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3580 u16 msi_ctrl, msg_val;
3581 struct config_param *config = &nic->config;
3582 struct net_device *dev = nic->dev;
3583 u64 val64, tx_mat, rx_mat;
3584 int i, err;
3585
3586 val64 = readq(&bar0->pic_control);
3587 val64 &= ~BIT(1);
3588 writeq(val64, &bar0->pic_control);
3589
3590 err = pci_enable_msi(nic->pdev);
3591 if (err) {
3592 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3593 nic->dev->name);
3594 return err;
3595 }
3596
3597 /*
3598 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3599 * for interrupt handling.
3600 */
3601 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3602 msg_val ^= 0x1;
3603 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3604 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3605
3606 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3607 msi_ctrl |= 0x10;
3608 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3609
3610 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3611 tx_mat = readq(&bar0->tx_mat0_n[0]);
3612 for (i=0; i<config->tx_fifo_num; i++) {
3613 tx_mat |= TX_MAT_SET(i, 1);
3614 }
3615 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3616
3617 rx_mat = readq(&bar0->rx_mat);
3618 for (i=0; i<config->rx_ring_num; i++) {
3619 rx_mat |= RX_MAT_SET(i, 1);
3620 }
3621 writeq(rx_mat, &bar0->rx_mat);
3622
3623 dev->irq = nic->pdev->irq;
3624 return 0;
3625}
3626
26df54bf 3627static int s2io_enable_msi_x(nic_t *nic)
cc6e7c44 3628{
37eb47ed 3629 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3630 u64 tx_mat, rx_mat;
3631 u16 msi_control; /* Temp variable */
3632 int ret, i, j, msix_indx = 1;
3633
3634 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3635 GFP_KERNEL);
3636 if (nic->entries == NULL) {
3637 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3638 return -ENOMEM;
3639 }
3640 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3641
3642 nic->s2io_entries =
3643 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3644 GFP_KERNEL);
3645 if (nic->s2io_entries == NULL) {
3646 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3647 kfree(nic->entries);
3648 return -ENOMEM;
3649 }
3650 memset(nic->s2io_entries, 0,
3651 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3652
3653 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3654 nic->entries[i].entry = i;
3655 nic->s2io_entries[i].entry = i;
3656 nic->s2io_entries[i].arg = NULL;
3657 nic->s2io_entries[i].in_use = 0;
3658 }
3659
3660 tx_mat = readq(&bar0->tx_mat0_n[0]);
3661 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3662 tx_mat |= TX_MAT_SET(i, msix_indx);
3663 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3664 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3665 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3666 }
3667 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3668
3669 if (!nic->config.bimodal) {
3670 rx_mat = readq(&bar0->rx_mat);
3671 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3672 rx_mat |= RX_MAT_SET(j, msix_indx);
3673 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3674 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3675 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3676 }
3677 writeq(rx_mat, &bar0->rx_mat);
3678 } else {
3679 tx_mat = readq(&bar0->tx_mat0_n[7]);
3680 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3681 tx_mat |= TX_MAT_SET(i, msix_indx);
3682 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3683 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3684 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3685 }
3686 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3687 }
3688
c92ca04b 3689 nic->avail_msix_vectors = 0;
cc6e7c44 3690 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
c92ca04b
AR
3691 /* We fail init if error or we get less vectors than min required */
3692 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3693 nic->avail_msix_vectors = ret;
3694 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3695 }
cc6e7c44
RA
3696 if (ret) {
3697 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3698 kfree(nic->entries);
3699 kfree(nic->s2io_entries);
3700 nic->entries = NULL;
3701 nic->s2io_entries = NULL;
c92ca04b 3702 nic->avail_msix_vectors = 0;
cc6e7c44
RA
3703 return -ENOMEM;
3704 }
c92ca04b
AR
3705 if (!nic->avail_msix_vectors)
3706 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
cc6e7c44
RA
3707
3708 /*
3709 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3710 * in the herc NIC. (Temp change, needs to be removed later)
3711 */
3712 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3713 msi_control |= 0x1; /* Enable MSI */
3714 pci_write_config_word(nic->pdev, 0x42, msi_control);
3715
3716 return 0;
3717}
3718
1da177e4
LT
3719/* ********************************************************* *
3720 * Functions defined below concern the OS part of the driver *
3721 * ********************************************************* */
3722
20346722 3723/**
1da177e4
LT
3724 * s2io_open - open entry point of the driver
3725 * @dev : pointer to the device structure.
3726 * Description:
3727 * This function is the open entry point of the driver. It mainly calls a
3728 * function to allocate Rx buffers and inserts them into the buffer
20346722 3729 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3730 * Return value:
3731 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3732 * file on failure.
3733 */
3734
ac1f60db 3735static int s2io_open(struct net_device *dev)
1da177e4
LT
3736{
3737 nic_t *sp = dev->priv;
3738 int err = 0;
3739
20346722
K
3740 /*
3741 * Make sure you have link off by default every time
1da177e4
LT
3742 * Nic is initialized
3743 */
3744 netif_carrier_off(dev);
0b1f7ebe 3745 sp->last_link_state = 0;
1da177e4
LT
3746
3747 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3748 err = s2io_card_up(sp);
3749 if (err) {
1da177e4
LT
3750 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3751 dev->name);
c92ca04b
AR
3752 if (err == -ENODEV)
3753 goto hw_init_failed;
3754 else
3755 goto hw_enable_failed;
1da177e4
LT
3756 }
3757
cc6e7c44
RA
3758 /* Store the values of the MSIX table in the nic_t structure */
3759 store_xmsi_data(sp);
3760
1da177e4 3761 /* After proper initialization of H/W, register ISR */
cc6e7c44
RA
3762 if (sp->intr_type == MSI) {
3763 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
1fb9df5d 3764 IRQF_SHARED, sp->name, dev);
cc6e7c44
RA
3765 if (err) {
3766 DBG_PRINT(ERR_DBG, "%s: MSI registration \
3767failed\n", dev->name);
3768 goto isr_registration_failed;
3769 }
3770 }
3771 if (sp->intr_type == MSI_X) {
c92ca04b
AR
3772 int i;
3773
cc6e7c44
RA
3774 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3775 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3776 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3777 dev->name, i);
3778 err = request_irq(sp->entries[i].vector,
3779 s2io_msix_fifo_handle, 0, sp->desc1,
3780 sp->s2io_entries[i].arg);
3781 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
26b7625c 3782 (unsigned long long)sp->msix_info[i].addr);
cc6e7c44
RA
3783 } else {
3784 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3785 dev->name, i);
3786 err = request_irq(sp->entries[i].vector,
3787 s2io_msix_ring_handle, 0, sp->desc2,
3788 sp->s2io_entries[i].arg);
3789 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
26b7625c 3790 (unsigned long long)sp->msix_info[i].addr);
cc6e7c44
RA
3791 }
3792 if (err) {
3793 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3794failed\n", dev->name, i);
3795 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3796 goto isr_registration_failed;
3797 }
3798 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3799 }
3800 }
3801 if (sp->intr_type == INTA) {
1fb9df5d 3802 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
cc6e7c44
RA
3803 sp->name, dev);
3804 if (err) {
3805 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3806 dev->name);
3807 goto isr_registration_failed;
3808 }
1da177e4
LT
3809 }
3810
3811 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3812 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
20346722
K
3813 err = -ENODEV;
3814 goto setting_mac_address_failed;
1da177e4
LT
3815 }
3816
3817 netif_start_queue(dev);
3818 return 0;
20346722
K
3819
3820setting_mac_address_failed:
cc6e7c44
RA
3821 if (sp->intr_type != MSI_X)
3822 free_irq(sp->pdev->irq, dev);
20346722 3823isr_registration_failed:
25fff88e 3824 del_timer_sync(&sp->alarm_timer);
cc6e7c44 3825 if (sp->intr_type == MSI_X) {
c92ca04b
AR
3826 int i;
3827 u16 msi_control; /* Temp variable */
bd1034f0 3828
c92ca04b 3829 for (i=1; (sp->s2io_entries[i].in_use ==
cc6e7c44 3830 MSIX_REGISTERED_SUCCESS); i++) {
c92ca04b
AR
3831 int vector = sp->entries[i].vector;
3832 void *arg = sp->s2io_entries[i].arg;
cc6e7c44 3833
c92ca04b 3834 free_irq(vector, arg);
cc6e7c44 3835 }
c92ca04b
AR
3836 pci_disable_msix(sp->pdev);
3837
3838 /* Temp */
3839 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3840 msi_control &= 0xFFFE; /* Disable MSI */
3841 pci_write_config_word(sp->pdev, 0x42, msi_control);
cc6e7c44
RA
3842 }
3843 else if (sp->intr_type == MSI)
3844 pci_disable_msi(sp->pdev);
c92ca04b 3845hw_enable_failed:
20346722
K
3846 s2io_reset(sp);
3847hw_init_failed:
cc6e7c44
RA
3848 if (sp->intr_type == MSI_X) {
3849 if (sp->entries)
3850 kfree(sp->entries);
3851 if (sp->s2io_entries)
3852 kfree(sp->s2io_entries);
3853 }
20346722 3854 return err;
1da177e4
LT
3855}
3856
3857/**
3858 * s2io_close -close entry point of the driver
3859 * @dev : device pointer.
3860 * Description:
3861 * This is the stop entry point of the driver. It needs to undo exactly
3862 * whatever was done by the open entry point,thus it's usually referred to
3863 * as the close function.Among other things this function mainly stops the
3864 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3865 * Return value:
3866 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3867 * file on failure.
3868 */
3869
ac1f60db 3870static int s2io_close(struct net_device *dev)
1da177e4
LT
3871{
3872 nic_t *sp = dev->priv;
cc6e7c44 3873
1da177e4
LT
3874 flush_scheduled_work();
3875 netif_stop_queue(dev);
3876 /* Reset card, kill tasklet and free Tx and Rx buffers. */
c92ca04b 3877 s2io_card_down(sp, 1);
cc6e7c44 3878
1da177e4
LT
3879 sp->device_close_flag = TRUE; /* Device is shut down. */
3880 return 0;
3881}
3882
3883/**
3884 * s2io_xmit - Tx entry point of te driver
3885 * @skb : the socket buffer containing the Tx data.
3886 * @dev : device pointer.
3887 * Description :
3888 * This function is the Tx entry point of the driver. S2IO NIC supports
3889 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3890 * NOTE: when device cant queue the pkt,just the trans_start variable will
3891 * not be upadted.
3892 * Return value:
3893 * 0 on success & 1 on failure.
3894 */
3895
ac1f60db 3896static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3897{
3898 nic_t *sp = dev->priv;
3899 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3900 register u64 val64;
3901 TxD_t *txdp;
3902 TxFIFO_element_t __iomem *tx_fifo;
3903 unsigned long flags;
3904#ifdef NETIF_F_TSO
3905 int mss;
3906#endif
be3a6b02
K
3907 u16 vlan_tag = 0;
3908 int vlan_priority = 0;
1da177e4
LT
3909 mac_info_t *mac_control;
3910 struct config_param *config;
1da177e4
LT
3911
3912 mac_control = &sp->mac_control;
3913 config = &sp->config;
3914
20346722 3915 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3916 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3917 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3918 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3919 dev->name);
3920 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722
K
3921 dev_kfree_skb(skb);
3922 return 0;
1da177e4
LT
3923 }
3924
3925 queue = 0;
1da177e4 3926
be3a6b02
K
3927 /* Get Fifo number to Transmit based on vlan priority */
3928 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3929 vlan_tag = vlan_tx_tag_get(skb);
3930 vlan_priority = vlan_tag >> 13;
3931 queue = config->fifo_mapping[vlan_priority];
3932 }
3933
20346722
K
3934 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3935 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3936 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3937 list_virt_addr;
3938
3939 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4 3940 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
3941 if (txdp->Host_Control ||
3942 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 3943 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
1da177e4
LT
3944 netif_stop_queue(dev);
3945 dev_kfree_skb(skb);
3946 spin_unlock_irqrestore(&sp->tx_lock, flags);
3947 return 0;
3948 }
0b1f7ebe
K
3949
3950 /* A buffer with no data will be dropped */
3951 if (!skb->len) {
3952 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3953 dev_kfree_skb(skb);
3954 spin_unlock_irqrestore(&sp->tx_lock, flags);
3955 return 0;
3956 }
3957
fed5eccd
AR
3958 txdp->Control_1 = 0;
3959 txdp->Control_2 = 0;
1da177e4 3960#ifdef NETIF_F_TSO
7967168c 3961 mss = skb_shinfo(skb)->gso_size;
f83ef8c0 3962 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4
LT
3963 txdp->Control_1 |= TXD_TCP_LSO_EN;
3964 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3965 }
3966#endif
1da177e4
LT
3967 if (skb->ip_summed == CHECKSUM_HW) {
3968 txdp->Control_2 |=
3969 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3970 TXD_TX_CKO_UDP_EN);
3971 }
fed5eccd
AR
3972 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3973 txdp->Control_1 |= TXD_LIST_OWN_XENA;
1da177e4 3974 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3975
be3a6b02
K
3976 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3977 txdp->Control_2 |= TXD_VLAN_ENABLE;
3978 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3979 }
3980
fed5eccd 3981 frg_len = skb->len - skb->data_len;
f83ef8c0 3982 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) {
fed5eccd
AR
3983 int ufo_size;
3984
7967168c 3985 ufo_size = skb_shinfo(skb)->gso_size;
fed5eccd
AR
3986 ufo_size &= ~7;
3987 txdp->Control_1 |= TXD_UFO_EN;
3988 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3989 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3990#ifdef __BIG_ENDIAN
3991 sp->ufo_in_band_v[put_off] =
3992 (u64)skb_shinfo(skb)->ip6_frag_id;
3993#else
3994 sp->ufo_in_band_v[put_off] =
3995 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3996#endif
3997 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3998 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3999 sp->ufo_in_band_v,
4000 sizeof(u64), PCI_DMA_TODEVICE);
4001 txdp++;
4002 txdp->Control_1 = 0;
4003 txdp->Control_2 = 0;
4004 }
1da177e4 4005
fed5eccd
AR
4006 txdp->Buffer_Pointer = pci_map_single
4007 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4008 txdp->Host_Control = (unsigned long) skb;
4009 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4010
f83ef8c0 4011 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
fed5eccd
AR
4012 txdp->Control_1 |= TXD_UFO_EN;
4013
4014 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4015 /* For fragmented SKB. */
4016 for (i = 0; i < frg_cnt; i++) {
4017 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
4018 /* A '0' length fragment will be ignored */
4019 if (!frag->size)
4020 continue;
1da177e4
LT
4021 txdp++;
4022 txdp->Buffer_Pointer = (u64) pci_map_page
4023 (sp->pdev, frag->page, frag->page_offset,
4024 frag->size, PCI_DMA_TODEVICE);
efd51b5c 4025 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
f83ef8c0 4026 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
fed5eccd 4027 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
4028 }
4029 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4030
f83ef8c0 4031 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
fed5eccd
AR
4032 frg_cnt++; /* as Txd0 was used for inband header */
4033
1da177e4 4034 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 4035 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
4036 writeq(val64, &tx_fifo->TxDL_Pointer);
4037
4038 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4039 TX_FIFO_LAST_LIST);
20346722 4040
1da177e4
LT
4041#ifdef NETIF_F_TSO
4042 if (mss)
4043 val64 |= TX_FIFO_SPECIAL_FUNC;
4044#endif
f83ef8c0 4045 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
fed5eccd 4046 val64 |= TX_FIFO_SPECIAL_FUNC;
1da177e4
LT
4047 writeq(val64, &tx_fifo->List_Control);
4048
303bcb4b
K
4049 mmiowb();
4050
1da177e4 4051 put_off++;
863c11a9
AR
4052 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4053 put_off = 0;
20346722 4054 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
4055
4056 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4057 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 4058 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
4059 DBG_PRINT(TX_DBG,
4060 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4061 put_off, get_off);
4062 netif_stop_queue(dev);
4063 }
4064
4065 dev->trans_start = jiffies;
4066 spin_unlock_irqrestore(&sp->tx_lock, flags);
4067
4068 return 0;
4069}
4070
25fff88e
K
4071static void
4072s2io_alarm_handle(unsigned long data)
4073{
4074 nic_t *sp = (nic_t *)data;
4075
4076 alarm_intr_handler(sp);
4077 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4078}
4079
cc6e7c44
RA
4080static irqreturn_t
4081s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4082{
4083 struct net_device *dev = (struct net_device *) dev_id;
4084 nic_t *sp = dev->priv;
4085 int i;
4086 int ret;
4087 mac_info_t *mac_control;
4088 struct config_param *config;
4089
4090 atomic_inc(&sp->isr_cnt);
4091 mac_control = &sp->mac_control;
4092 config = &sp->config;
4093 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4094
4095 /* If Intr is because of Rx Traffic */
4096 for (i = 0; i < config->rx_ring_num; i++)
4097 rx_intr_handler(&mac_control->rings[i]);
4098
4099 /* If Intr is because of Tx Traffic */
4100 for (i = 0; i < config->tx_fifo_num; i++)
4101 tx_intr_handler(&mac_control->fifos[i]);
4102
4103 /*
4104 * If the Rx buffer count is below the panic threshold then
4105 * reallocate the buffers from the interrupt handler itself,
4106 * else schedule a tasklet to reallocate the buffers.
4107 */
4108 for (i = 0; i < config->rx_ring_num; i++) {
7d3d0439
RA
4109 if (!sp->lro) {
4110 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4111 int level = rx_buffer_level(sp, rxb_size, i);
4112
4113 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4114 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4115 dev->name);
4116 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4117 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4118 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4119 dev->name);
4120 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4121 clear_bit(0, (&sp->tasklet_status));
4122 atomic_dec(&sp->isr_cnt);
4123 return IRQ_HANDLED;
4124 }
cc6e7c44 4125 clear_bit(0, (&sp->tasklet_status));
7d3d0439
RA
4126 } else if (level == LOW) {
4127 tasklet_schedule(&sp->task);
cc6e7c44 4128 }
7d3d0439
RA
4129 }
4130 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4131 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4132 dev->name);
4133 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4134 break;
cc6e7c44
RA
4135 }
4136 }
4137
4138 atomic_dec(&sp->isr_cnt);
4139 return IRQ_HANDLED;
4140}
4141
4142static irqreturn_t
4143s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4144{
4145 ring_info_t *ring = (ring_info_t *)dev_id;
4146 nic_t *sp = ring->nic;
7d3d0439 4147 struct net_device *dev = (struct net_device *) dev_id;
cc6e7c44
RA
4148 int rxb_size, level, rng_n;
4149
4150 atomic_inc(&sp->isr_cnt);
4151 rx_intr_handler(ring);
4152
4153 rng_n = ring->ring_no;
7d3d0439
RA
4154 if (!sp->lro) {
4155 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4156 level = rx_buffer_level(sp, rxb_size, rng_n);
4157
4158 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4159 int ret;
4160 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4161 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4162 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4163 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4164 __FUNCTION__);
4165 clear_bit(0, (&sp->tasklet_status));
4166 return IRQ_HANDLED;
4167 }
cc6e7c44 4168 clear_bit(0, (&sp->tasklet_status));
7d3d0439
RA
4169 } else if (level == LOW) {
4170 tasklet_schedule(&sp->task);
cc6e7c44 4171 }
cc6e7c44 4172 }
7d3d0439
RA
4173 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4174 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
4175 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4176 }
4177
cc6e7c44
RA
4178 atomic_dec(&sp->isr_cnt);
4179
4180 return IRQ_HANDLED;
4181}
4182
4183static irqreturn_t
4184s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
4185{
4186 fifo_info_t *fifo = (fifo_info_t *)dev_id;
4187 nic_t *sp = fifo->nic;
4188
4189 atomic_inc(&sp->isr_cnt);
4190 tx_intr_handler(fifo);
4191 atomic_dec(&sp->isr_cnt);
4192 return IRQ_HANDLED;
4193}
a371a07d
K
4194static void s2io_txpic_intr_handle(nic_t *sp)
4195{
509a2671 4196 XENA_dev_config_t __iomem *bar0 = sp->bar0;
a371a07d
K
4197 u64 val64;
4198
4199 val64 = readq(&bar0->pic_int_status);
4200 if (val64 & PIC_INT_GPIO) {
4201 val64 = readq(&bar0->gpio_int_reg);
4202 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4203 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4204 /*
4205 * This is unstable state so clear both up/down
4206 * interrupt and adapter to re-evaluate the link state.
4207 */
a371a07d
K
4208 val64 |= GPIO_INT_REG_LINK_DOWN;
4209 val64 |= GPIO_INT_REG_LINK_UP;
4210 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4211 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4212 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4213 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4214 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4215 }
c92ca04b
AR
4216 else if (val64 & GPIO_INT_REG_LINK_UP) {
4217 val64 = readq(&bar0->adapter_status);
4218 if (verify_xena_quiescence(sp, val64,
4219 sp->device_enabled_once)) {
4220 /* Enable Adapter */
4221 val64 = readq(&bar0->adapter_control);
4222 val64 |= ADAPTER_CNTL_EN;
4223 writeq(val64, &bar0->adapter_control);
4224 val64 |= ADAPTER_LED_ON;
4225 writeq(val64, &bar0->adapter_control);
4226 if (!sp->device_enabled_once)
4227 sp->device_enabled_once = 1;
4228
4229 s2io_link(sp, LINK_UP);
4230 /*
4231 * unmask link down interrupt and mask link-up
4232 * intr
4233 */
4234 val64 = readq(&bar0->gpio_int_mask);
4235 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4236 val64 |= GPIO_INT_MASK_LINK_UP;
4237 writeq(val64, &bar0->gpio_int_mask);
4238
4239 }
4240 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4241 val64 = readq(&bar0->adapter_status);
4242 if (verify_xena_quiescence(sp, val64,
4243 sp->device_enabled_once)) {
4244 s2io_link(sp, LINK_DOWN);
4245 /* Link is down so unmaks link up interrupt */
4246 val64 = readq(&bar0->gpio_int_mask);
4247 val64 &= ~GPIO_INT_MASK_LINK_UP;
4248 val64 |= GPIO_INT_MASK_LINK_DOWN;
4249 writeq(val64, &bar0->gpio_int_mask);
4250 }
a371a07d
K
4251 }
4252 }
c92ca04b 4253 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4254}
4255
1da177e4
LT
4256/**
4257 * s2io_isr - ISR handler of the device .
4258 * @irq: the irq of the device.
4259 * @dev_id: a void pointer to the dev structure of the NIC.
4260 * @pt_regs: pointer to the registers pushed on the stack.
20346722
K
4261 * Description: This function is the ISR handler of the device. It
4262 * identifies the reason for the interrupt and calls the relevant
4263 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4264 * recv buffers, if their numbers are below the panic value which is
4265 * presently set to 25% of the original number of rcv buffers allocated.
4266 * Return value:
20346722 4267 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4268 * IRQ_NONE: will be returned if interrupt is not from our device
4269 */
4270static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4271{
4272 struct net_device *dev = (struct net_device *) dev_id;
4273 nic_t *sp = dev->priv;
4274 XENA_dev_config_t __iomem *bar0 = sp->bar0;
20346722 4275 int i;
5d3213cc 4276 u64 reason = 0, val64, org_mask;
1da177e4
LT
4277 mac_info_t *mac_control;
4278 struct config_param *config;
4279
7ba013ac 4280 atomic_inc(&sp->isr_cnt);
1da177e4
LT
4281 mac_control = &sp->mac_control;
4282 config = &sp->config;
4283
20346722 4284 /*
1da177e4
LT
4285 * Identify the cause for interrupt and call the appropriate
4286 * interrupt handler. Causes for the interrupt could be;
4287 * 1. Rx of packet.
4288 * 2. Tx complete.
4289 * 3. Link down.
20346722 4290 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
4291 */
4292 reason = readq(&bar0->general_int_status);
4293
4294 if (!reason) {
4295 /* The interrupt was not raised by Xena. */
7ba013ac 4296 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4297 return IRQ_NONE;
4298 }
4299
863c11a9 4300 val64 = 0xFFFFFFFFFFFFFFFFULL;
5d3213cc
AR
4301 /* Store current mask before masking all interrupts */
4302 org_mask = readq(&bar0->general_int_mask);
4303 writeq(val64, &bar0->general_int_mask);
4304
1da177e4
LT
4305#ifdef CONFIG_S2IO_NAPI
4306 if (reason & GEN_INTR_RXTRAFFIC) {
4307 if (netif_rx_schedule_prep(dev)) {
863c11a9 4308 writeq(val64, &bar0->rx_traffic_mask);
1da177e4
LT
4309 __netif_rx_schedule(dev);
4310 }
4311 }
4312#else
863c11a9
AR
4313 /*
4314 * Rx handler is called by default, without checking for the
4315 * cause of interrupt.
4316 * rx_traffic_int reg is an R1 register, writing all 1's
4317 * will ensure that the actual interrupt causing bit get's
4318 * cleared and hence a read can be avoided.
4319 */
4320 writeq(val64, &bar0->rx_traffic_int);
4321 for (i = 0; i < config->rx_ring_num; i++) {
4322 rx_intr_handler(&mac_control->rings[i]);
1da177e4
LT
4323 }
4324#endif
4325
863c11a9
AR
4326 /*
4327 * tx_traffic_int reg is an R1 register, writing all 1's
4328 * will ensure that the actual interrupt causing bit get's
4329 * cleared and hence a read can be avoided.
4330 */
4331 writeq(val64, &bar0->tx_traffic_int);
fe113638 4332
863c11a9
AR
4333 for (i = 0; i < config->tx_fifo_num; i++)
4334 tx_intr_handler(&mac_control->fifos[i]);
20346722 4335
a371a07d
K
4336 if (reason & GEN_INTR_TXPIC)
4337 s2io_txpic_intr_handle(sp);
20346722
K
4338 /*
4339 * If the Rx buffer count is below the panic threshold then
4340 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
4341 * else schedule a tasklet to reallocate the buffers.
4342 */
4343#ifndef CONFIG_S2IO_NAPI
4344 for (i = 0; i < config->rx_ring_num; i++) {
7d3d0439
RA
4345 if (!sp->lro) {
4346 int ret;
4347 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4348 int level = rx_buffer_level(sp, rxb_size, i);
4349
4350 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4351 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4352 dev->name);
4353 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4354 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4355 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4356 dev->name);
4357 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4358 clear_bit(0, (&sp->tasklet_status));
4359 atomic_dec(&sp->isr_cnt);
5d3213cc 4360 writeq(org_mask, &bar0->general_int_mask);
7d3d0439
RA
4361 return IRQ_HANDLED;
4362 }
1da177e4 4363 clear_bit(0, (&sp->tasklet_status));
7d3d0439
RA
4364 } else if (level == LOW) {
4365 tasklet_schedule(&sp->task);
1da177e4 4366 }
7d3d0439
RA
4367 }
4368 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4369 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4370 dev->name);
4371 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
4372 break;
1da177e4
LT
4373 }
4374 }
4375#endif
5d3213cc 4376 writeq(org_mask, &bar0->general_int_mask);
7ba013ac 4377 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4378 return IRQ_HANDLED;
4379}
4380
7ba013ac
K
4381/**
4382 * s2io_updt_stats -
4383 */
4384static void s2io_updt_stats(nic_t *sp)
4385{
4386 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4387 u64 val64;
4388 int cnt = 0;
4389
4390 if (atomic_read(&sp->card_state) == CARD_UP) {
4391 /* Apprx 30us on a 133 MHz bus */
4392 val64 = SET_UPDT_CLICKS(10) |
4393 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4394 writeq(val64, &bar0->stat_cfg);
4395 do {
4396 udelay(100);
4397 val64 = readq(&bar0->stat_cfg);
4398 if (!(val64 & BIT(0)))
4399 break;
4400 cnt++;
4401 if (cnt == 5)
4402 break; /* Updt failed */
4403 } while(1);
4404 }
4405}
4406
1da177e4 4407/**
20346722 4408 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4409 * @dev : pointer to the device structure.
4410 * Description:
20346722 4411 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4412 * structure and returns a pointer to the same.
4413 * Return value:
4414 * pointer to the updated net_device_stats structure.
4415 */
4416
ac1f60db 4417static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4
LT
4418{
4419 nic_t *sp = dev->priv;
4420 mac_info_t *mac_control;
4421 struct config_param *config;
4422
20346722 4423
1da177e4
LT
4424 mac_control = &sp->mac_control;
4425 config = &sp->config;
4426
7ba013ac
K
4427 /* Configure Stats for immediate updt */
4428 s2io_updt_stats(sp);
4429
4430 sp->stats.tx_packets =
4431 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
4432 sp->stats.tx_errors =
4433 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4434 sp->stats.rx_errors =
4435 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
4436 sp->stats.multicast =
4437 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4438 sp->stats.rx_length_errors =
20346722 4439 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
4440
4441 return (&sp->stats);
4442}
4443
4444/**
4445 * s2io_set_multicast - entry point for multicast address enable/disable.
4446 * @dev : pointer to the device structure
4447 * Description:
20346722
K
4448 * This function is a driver entry point which gets called by the kernel
4449 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4450 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4451 * determine, if multicast address must be enabled or if promiscuous mode
4452 * is to be disabled etc.
4453 * Return value:
4454 * void.
4455 */
4456
4457static void s2io_set_multicast(struct net_device *dev)
4458{
4459 int i, j, prev_cnt;
4460 struct dev_mc_list *mclist;
4461 nic_t *sp = dev->priv;
4462 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4463 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4464 0xfeffffffffffULL;
4465 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4466 void __iomem *add;
4467
4468 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4469 /* Enable all Multicast addresses */
4470 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4471 &bar0->rmac_addr_data0_mem);
4472 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4473 &bar0->rmac_addr_data1_mem);
4474 val64 = RMAC_ADDR_CMD_MEM_WE |
4475 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4476 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4477 writeq(val64, &bar0->rmac_addr_cmd_mem);
4478 /* Wait till command completes */
c92ca04b
AR
4479 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4480 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
1da177e4
LT
4481
4482 sp->m_cast_flg = 1;
4483 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4484 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4485 /* Disable all Multicast addresses */
4486 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4487 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4488 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4489 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4490 val64 = RMAC_ADDR_CMD_MEM_WE |
4491 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4492 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4493 writeq(val64, &bar0->rmac_addr_cmd_mem);
4494 /* Wait till command completes */
c92ca04b
AR
4495 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4496 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
1da177e4
LT
4497
4498 sp->m_cast_flg = 0;
4499 sp->all_multi_pos = 0;
4500 }
4501
4502 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4503 /* Put the NIC into promiscuous mode */
4504 add = &bar0->mac_cfg;
4505 val64 = readq(&bar0->mac_cfg);
4506 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4507
4508 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4509 writel((u32) val64, add);
4510 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4511 writel((u32) (val64 >> 32), (add + 4));
4512
4513 val64 = readq(&bar0->mac_cfg);
4514 sp->promisc_flg = 1;
776bd20f 4515 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4516 dev->name);
4517 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4518 /* Remove the NIC from promiscuous mode */
4519 add = &bar0->mac_cfg;
4520 val64 = readq(&bar0->mac_cfg);
4521 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4522
4523 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4524 writel((u32) val64, add);
4525 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4526 writel((u32) (val64 >> 32), (add + 4));
4527
4528 val64 = readq(&bar0->mac_cfg);
4529 sp->promisc_flg = 0;
776bd20f 4530 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
4531 dev->name);
4532 }
4533
4534 /* Update individual M_CAST address list */
4535 if ((!sp->m_cast_flg) && dev->mc_count) {
4536 if (dev->mc_count >
4537 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4538 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4539 dev->name);
4540 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4541 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4542 return;
4543 }
4544
4545 prev_cnt = sp->mc_addr_count;
4546 sp->mc_addr_count = dev->mc_count;
4547
4548 /* Clear out the previous list of Mc in the H/W. */
4549 for (i = 0; i < prev_cnt; i++) {
4550 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4551 &bar0->rmac_addr_data0_mem);
4552 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4553 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4554 val64 = RMAC_ADDR_CMD_MEM_WE |
4555 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4556 RMAC_ADDR_CMD_MEM_OFFSET
4557 (MAC_MC_ADDR_START_OFFSET + i);
4558 writeq(val64, &bar0->rmac_addr_cmd_mem);
4559
4560 /* Wait for command completes */
c92ca04b
AR
4561 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4562 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
1da177e4
LT
4563 DBG_PRINT(ERR_DBG, "%s: Adding ",
4564 dev->name);
4565 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4566 return;
4567 }
4568 }
4569
4570 /* Create the new Rx filter list and update the same in H/W. */
4571 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4572 i++, mclist = mclist->next) {
4573 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4574 ETH_ALEN);
a7a80d5a 4575 mac_addr = 0;
1da177e4
LT
4576 for (j = 0; j < ETH_ALEN; j++) {
4577 mac_addr |= mclist->dmi_addr[j];
4578 mac_addr <<= 8;
4579 }
4580 mac_addr >>= 8;
4581 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4582 &bar0->rmac_addr_data0_mem);
4583 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4584 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4585 val64 = RMAC_ADDR_CMD_MEM_WE |
4586 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4587 RMAC_ADDR_CMD_MEM_OFFSET
4588 (i + MAC_MC_ADDR_START_OFFSET);
4589 writeq(val64, &bar0->rmac_addr_cmd_mem);
4590
4591 /* Wait for command completes */
c92ca04b
AR
4592 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4593 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
1da177e4
LT
4594 DBG_PRINT(ERR_DBG, "%s: Adding ",
4595 dev->name);
4596 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4597 return;
4598 }
4599 }
4600 }
4601}
4602
4603/**
20346722 4604 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
4605 * @dev : pointer to the device structure.
4606 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 4607 * Description : This procedure will program the Xframe to receive
1da177e4 4608 * frames with new Mac Address
20346722 4609 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
4610 * as defined in errno.h file on failure.
4611 */
4612
26df54bf 4613static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
1da177e4
LT
4614{
4615 nic_t *sp = dev->priv;
4616 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4617 register u64 val64, mac_addr = 0;
4618 int i;
4619
20346722 4620 /*
1da177e4
LT
4621 * Set the new MAC address as the new unicast filter and reflect this
4622 * change on the device address registered with the OS. It will be
20346722 4623 * at offset 0.
1da177e4
LT
4624 */
4625 for (i = 0; i < ETH_ALEN; i++) {
4626 mac_addr <<= 8;
4627 mac_addr |= addr[i];
4628 }
4629
4630 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4631 &bar0->rmac_addr_data0_mem);
4632
4633 val64 =
4634 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4635 RMAC_ADDR_CMD_MEM_OFFSET(0);
4636 writeq(val64, &bar0->rmac_addr_cmd_mem);
4637 /* Wait till command completes */
c92ca04b
AR
4638 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4639 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
1da177e4
LT
4640 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4641 return FAILURE;
4642 }
4643
4644 return SUCCESS;
4645}
4646
4647/**
20346722 4648 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
4649 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4650 * @info: pointer to the structure with parameters given by ethtool to set
4651 * link information.
4652 * Description:
20346722 4653 * The function sets different link parameters provided by the user onto
1da177e4
LT
4654 * the NIC.
4655 * Return value:
4656 * 0 on success.
4657*/
4658
4659static int s2io_ethtool_sset(struct net_device *dev,
4660 struct ethtool_cmd *info)
4661{
4662 nic_t *sp = dev->priv;
4663 if ((info->autoneg == AUTONEG_ENABLE) ||
4664 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4665 return -EINVAL;
4666 else {
4667 s2io_close(sp->dev);
4668 s2io_open(sp->dev);
4669 }
4670
4671 return 0;
4672}
4673
4674/**
20346722 4675 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
4676 * @sp : private member of the device structure, pointer to the
4677 * s2io_nic structure.
4678 * @info : pointer to the structure with parameters given by ethtool
4679 * to return link information.
4680 * Description:
4681 * Returns link specific information like speed, duplex etc.. to ethtool.
4682 * Return value :
4683 * return 0 on success.
4684 */
4685
4686static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4687{
4688 nic_t *sp = dev->priv;
4689 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4690 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4691 info->port = PORT_FIBRE;
4692 /* info->transceiver?? TODO */
4693
4694 if (netif_carrier_ok(sp->dev)) {
4695 info->speed = 10000;
4696 info->duplex = DUPLEX_FULL;
4697 } else {
4698 info->speed = -1;
4699 info->duplex = -1;
4700 }
4701
4702 info->autoneg = AUTONEG_DISABLE;
4703 return 0;
4704}
4705
4706/**
20346722
K
4707 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4708 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4709 * s2io_nic structure.
4710 * @info : pointer to the structure with parameters given by ethtool to
4711 * return driver information.
4712 * Description:
4713 * Returns driver specefic information like name, version etc.. to ethtool.
4714 * Return value:
4715 * void
4716 */
4717
4718static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4719 struct ethtool_drvinfo *info)
4720{
4721 nic_t *sp = dev->priv;
4722
dbc2309d
JL
4723 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4724 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4725 strncpy(info->fw_version, "", sizeof(info->fw_version));
4726 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
4727 info->regdump_len = XENA_REG_SPACE;
4728 info->eedump_len = XENA_EEPROM_SPACE;
4729 info->testinfo_len = S2IO_TEST_LEN;
4730 info->n_stats = S2IO_STAT_LEN;
4731}
4732
4733/**
4734 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 4735 * @sp: private member of the device structure, which is a pointer to the
1da177e4 4736 * s2io_nic structure.
20346722 4737 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
4738 * dumping the registers.
4739 * @reg_space: The input argumnet into which all the registers are dumped.
4740 * Description:
4741 * Dumps the entire register space of xFrame NIC into the user given
4742 * buffer area.
4743 * Return value :
4744 * void .
4745*/
4746
4747static void s2io_ethtool_gregs(struct net_device *dev,
4748 struct ethtool_regs *regs, void *space)
4749{
4750 int i;
4751 u64 reg;
4752 u8 *reg_space = (u8 *) space;
4753 nic_t *sp = dev->priv;
4754
4755 regs->len = XENA_REG_SPACE;
4756 regs->version = sp->pdev->subsystem_device;
4757
4758 for (i = 0; i < regs->len; i += 8) {
4759 reg = readq(sp->bar0 + i);
4760 memcpy((reg_space + i), &reg, 8);
4761 }
4762}
4763
4764/**
4765 * s2io_phy_id - timer function that alternates adapter LED.
20346722 4766 * @data : address of the private member of the device structure, which
1da177e4 4767 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
4768 * Description: This is actually the timer function that alternates the
4769 * adapter LED bit of the adapter control bit to set/reset every time on
4770 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
4771 * once every second.
4772*/
4773static void s2io_phy_id(unsigned long data)
4774{
4775 nic_t *sp = (nic_t *) data;
4776 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4777 u64 val64 = 0;
4778 u16 subid;
4779
4780 subid = sp->pdev->subsystem_device;
541ae68f
K
4781 if ((sp->device_type == XFRAME_II_DEVICE) ||
4782 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
4783 val64 = readq(&bar0->gpio_control);
4784 val64 ^= GPIO_CTRL_GPIO_0;
4785 writeq(val64, &bar0->gpio_control);
4786 } else {
4787 val64 = readq(&bar0->adapter_control);
4788 val64 ^= ADAPTER_LED_ON;
4789 writeq(val64, &bar0->adapter_control);
4790 }
4791
4792 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4793}
4794
4795/**
4796 * s2io_ethtool_idnic - To physically identify the nic on the system.
4797 * @sp : private member of the device structure, which is a pointer to the
4798 * s2io_nic structure.
20346722 4799 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
4800 * ethtool.
4801 * Description: Used to physically identify the NIC on the system.
20346722 4802 * The Link LED will blink for a time specified by the user for
1da177e4 4803 * identification.
20346722 4804 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
4805 * identification is possible only if it's link is up.
4806 * Return value:
4807 * int , returns 0 on success
4808 */
4809
4810static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4811{
4812 u64 val64 = 0, last_gpio_ctrl_val;
4813 nic_t *sp = dev->priv;
4814 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4815 u16 subid;
4816
4817 subid = sp->pdev->subsystem_device;
4818 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
4819 if ((sp->device_type == XFRAME_I_DEVICE) &&
4820 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
4821 val64 = readq(&bar0->adapter_control);
4822 if (!(val64 & ADAPTER_CNTL_EN)) {
4823 printk(KERN_ERR
4824 "Adapter Link down, cannot blink LED\n");
4825 return -EFAULT;
4826 }
4827 }
4828 if (sp->id_timer.function == NULL) {
4829 init_timer(&sp->id_timer);
4830 sp->id_timer.function = s2io_phy_id;
4831 sp->id_timer.data = (unsigned long) sp;
4832 }
4833 mod_timer(&sp->id_timer, jiffies);
4834 if (data)
20346722 4835 msleep_interruptible(data * HZ);
1da177e4 4836 else
20346722 4837 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
4838 del_timer_sync(&sp->id_timer);
4839
541ae68f 4840 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
4841 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4842 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4843 }
4844
4845 return 0;
4846}
4847
4848/**
4849 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
4850 * @sp : private member of the device structure, which is a pointer to the
4851 * s2io_nic structure.
1da177e4
LT
4852 * @ep : pointer to the structure with pause parameters given by ethtool.
4853 * Description:
4854 * Returns the Pause frame generation and reception capability of the NIC.
4855 * Return value:
4856 * void
4857 */
4858static void s2io_ethtool_getpause_data(struct net_device *dev,
4859 struct ethtool_pauseparam *ep)
4860{
4861 u64 val64;
4862 nic_t *sp = dev->priv;
4863 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4864
4865 val64 = readq(&bar0->rmac_pause_cfg);
4866 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4867 ep->tx_pause = TRUE;
4868 if (val64 & RMAC_PAUSE_RX_ENABLE)
4869 ep->rx_pause = TRUE;
4870 ep->autoneg = FALSE;
4871}
4872
4873/**
4874 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 4875 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4876 * s2io_nic structure.
4877 * @ep : pointer to the structure with pause parameters given by ethtool.
4878 * Description:
4879 * It can be used to set or reset Pause frame generation or reception
4880 * support of the NIC.
4881 * Return value:
4882 * int, returns 0 on Success
4883 */
4884
4885static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 4886 struct ethtool_pauseparam *ep)
1da177e4
LT
4887{
4888 u64 val64;
4889 nic_t *sp = dev->priv;
4890 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4891
4892 val64 = readq(&bar0->rmac_pause_cfg);
4893 if (ep->tx_pause)
4894 val64 |= RMAC_PAUSE_GEN_ENABLE;
4895 else
4896 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4897 if (ep->rx_pause)
4898 val64 |= RMAC_PAUSE_RX_ENABLE;
4899 else
4900 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4901 writeq(val64, &bar0->rmac_pause_cfg);
4902 return 0;
4903}
4904
4905/**
4906 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 4907 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4908 * s2io_nic structure.
4909 * @off : offset at which the data must be written
4910 * @data : Its an output parameter where the data read at the given
20346722 4911 * offset is stored.
1da177e4 4912 * Description:
20346722 4913 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
4914 * read data.
4915 * NOTE: Will allow to read only part of the EEPROM visible through the
4916 * I2C bus.
4917 * Return value:
4918 * -1 on failure and 0 on success.
4919 */
4920
4921#define S2IO_DEV_ID 5
ad4ebed0 4922static int read_eeprom(nic_t * sp, int off, u64 * data)
1da177e4
LT
4923{
4924 int ret = -1;
4925 u32 exit_cnt = 0;
4926 u64 val64;
4927 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4928
ad4ebed0 4929 if (sp->device_type == XFRAME_I_DEVICE) {
4930 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4931 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4932 I2C_CONTROL_CNTL_START;
4933 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 4934
ad4ebed0 4935 while (exit_cnt < 5) {
4936 val64 = readq(&bar0->i2c_control);
4937 if (I2C_CONTROL_CNTL_END(val64)) {
4938 *data = I2C_CONTROL_GET_DATA(val64);
4939 ret = 0;
4940 break;
4941 }
4942 msleep(50);
4943 exit_cnt++;
1da177e4 4944 }
1da177e4
LT
4945 }
4946
ad4ebed0 4947 if (sp->device_type == XFRAME_II_DEVICE) {
4948 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4949 SPI_CONTROL_BYTECNT(0x3) |
4950 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4951 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4952 val64 |= SPI_CONTROL_REQ;
4953 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4954 while (exit_cnt < 5) {
4955 val64 = readq(&bar0->spi_control);
4956 if (val64 & SPI_CONTROL_NACK) {
4957 ret = 1;
4958 break;
4959 } else if (val64 & SPI_CONTROL_DONE) {
4960 *data = readq(&bar0->spi_data);
4961 *data &= 0xffffff;
4962 ret = 0;
4963 break;
4964 }
4965 msleep(50);
4966 exit_cnt++;
4967 }
4968 }
1da177e4
LT
4969 return ret;
4970}
4971
4972/**
4973 * write_eeprom - actually writes the relevant part of the data value.
4974 * @sp : private member of the device structure, which is a pointer to the
4975 * s2io_nic structure.
4976 * @off : offset at which the data must be written
4977 * @data : The data that is to be written
20346722 4978 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
4979 * the Eeprom. (max of 3)
4980 * Description:
4981 * Actually writes the relevant part of the data value into the Eeprom
4982 * through the I2C bus.
4983 * Return value:
4984 * 0 on success, -1 on failure.
4985 */
4986
ad4ebed0 4987static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
1da177e4
LT
4988{
4989 int exit_cnt = 0, ret = -1;
4990 u64 val64;
4991 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4992
ad4ebed0 4993 if (sp->device_type == XFRAME_I_DEVICE) {
4994 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4995 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4996 I2C_CONTROL_CNTL_START;
4997 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4998
4999 while (exit_cnt < 5) {
5000 val64 = readq(&bar0->i2c_control);
5001 if (I2C_CONTROL_CNTL_END(val64)) {
5002 if (!(val64 & I2C_CONTROL_NACK))
5003 ret = 0;
5004 break;
5005 }
5006 msleep(50);
5007 exit_cnt++;
5008 }
5009 }
1da177e4 5010
ad4ebed0 5011 if (sp->device_type == XFRAME_II_DEVICE) {
5012 int write_cnt = (cnt == 8) ? 0 : cnt;
5013 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5014
5015 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5016 SPI_CONTROL_BYTECNT(write_cnt) |
5017 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5018 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5019 val64 |= SPI_CONTROL_REQ;
5020 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5021 while (exit_cnt < 5) {
5022 val64 = readq(&bar0->spi_control);
5023 if (val64 & SPI_CONTROL_NACK) {
5024 ret = 1;
5025 break;
5026 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5027 ret = 0;
ad4ebed0 5028 break;
5029 }
5030 msleep(50);
5031 exit_cnt++;
1da177e4 5032 }
1da177e4 5033 }
1da177e4
LT
5034 return ret;
5035}
9dc737a7
AR
5036static void s2io_vpd_read(nic_t *nic)
5037{
5038 u8 vpd_data[256],data;
5039 int i=0, cnt, fail = 0;
5040 int vpd_addr = 0x80;
5041
5042 if (nic->device_type == XFRAME_II_DEVICE) {
5043 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5044 vpd_addr = 0x80;
5045 }
5046 else {
5047 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5048 vpd_addr = 0x50;
5049 }
5050
5051 for (i = 0; i < 256; i +=4 ) {
5052 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5053 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5054 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5055 for (cnt = 0; cnt <5; cnt++) {
5056 msleep(2);
5057 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5058 if (data == 0x80)
5059 break;
5060 }
5061 if (cnt >= 5) {
5062 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5063 fail = 1;
5064 break;
5065 }
5066 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5067 (u32 *)&vpd_data[i]);
5068 }
5069 if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) {
5070 memset(nic->product_name, 0, vpd_data[1]);
5071 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5072 }
5073}
5074
1da177e4
LT
5075/**
5076 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5077 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5078 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5079 * containing all relevant information.
5080 * @data_buf : user defined value to be written into Eeprom.
5081 * Description: Reads the values stored in the Eeprom at given offset
5082 * for a given length. Stores these values int the input argument data
5083 * buffer 'data_buf' and returns these to the caller (ethtool.)
5084 * Return value:
5085 * int 0 on success
5086 */
5087
5088static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 5089 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5090{
ad4ebed0 5091 u32 i, valid;
5092 u64 data;
1da177e4
LT
5093 nic_t *sp = dev->priv;
5094
5095 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5096
5097 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5098 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5099
5100 for (i = 0; i < eeprom->len; i += 4) {
5101 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5102 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5103 return -EFAULT;
5104 }
5105 valid = INV(data);
5106 memcpy((data_buf + i), &valid, 4);
5107 }
5108 return 0;
5109}
5110
5111/**
5112 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5113 * @sp : private member of the device structure, which is a pointer to the
5114 * s2io_nic structure.
20346722 5115 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5116 * containing all relevant information.
5117 * @data_buf ; user defined value to be written into Eeprom.
5118 * Description:
5119 * Tries to write the user provided value in the Eeprom, at the offset
5120 * given by the user.
5121 * Return value:
5122 * 0 on success, -EFAULT on failure.
5123 */
5124
5125static int s2io_ethtool_seeprom(struct net_device *dev,
5126 struct ethtool_eeprom *eeprom,
5127 u8 * data_buf)
5128{
5129 int len = eeprom->len, cnt = 0;
ad4ebed0 5130 u64 valid = 0, data;
1da177e4
LT
5131 nic_t *sp = dev->priv;
5132
5133 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5134 DBG_PRINT(ERR_DBG,
5135 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5136 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5137 eeprom->magic);
5138 return -EFAULT;
5139 }
5140
5141 while (len) {
5142 data = (u32) data_buf[cnt] & 0x000000FF;
5143 if (data) {
5144 valid = (u32) (data << 24);
5145 } else
5146 valid = data;
5147
5148 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5149 DBG_PRINT(ERR_DBG,
5150 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5151 DBG_PRINT(ERR_DBG,
5152 "write into the specified offset\n");
5153 return -EFAULT;
5154 }
5155 cnt++;
5156 len--;
5157 }
5158
5159 return 0;
5160}
5161
5162/**
20346722
K
5163 * s2io_register_test - reads and writes into all clock domains.
5164 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5165 * s2io_nic structure.
5166 * @data : variable that returns the result of each of the test conducted b
5167 * by the driver.
5168 * Description:
5169 * Read and write into all clock domains. The NIC has 3 clock domains,
5170 * see that registers in all the three regions are accessible.
5171 * Return value:
5172 * 0 on success.
5173 */
5174
5175static int s2io_register_test(nic_t * sp, uint64_t * data)
5176{
5177 XENA_dev_config_t __iomem *bar0 = sp->bar0;
ad4ebed0 5178 u64 val64 = 0, exp_val;
1da177e4
LT
5179 int fail = 0;
5180
20346722
K
5181 val64 = readq(&bar0->pif_rd_swapper_fb);
5182 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5183 fail = 1;
5184 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5185 }
5186
5187 val64 = readq(&bar0->rmac_pause_cfg);
5188 if (val64 != 0xc000ffff00000000ULL) {
5189 fail = 1;
5190 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5191 }
5192
5193 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5194 if (sp->device_type == XFRAME_II_DEVICE)
5195 exp_val = 0x0404040404040404ULL;
5196 else
5197 exp_val = 0x0808080808080808ULL;
5198 if (val64 != exp_val) {
1da177e4
LT
5199 fail = 1;
5200 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5201 }
5202
5203 val64 = readq(&bar0->xgxs_efifo_cfg);
5204 if (val64 != 0x000000001923141EULL) {
5205 fail = 1;
5206 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5207 }
5208
5209 val64 = 0x5A5A5A5A5A5A5A5AULL;
5210 writeq(val64, &bar0->xmsi_data);
5211 val64 = readq(&bar0->xmsi_data);
5212 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5213 fail = 1;
5214 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5215 }
5216
5217 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5218 writeq(val64, &bar0->xmsi_data);
5219 val64 = readq(&bar0->xmsi_data);
5220 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5221 fail = 1;
5222 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5223 }
5224
5225 *data = fail;
ad4ebed0 5226 return fail;
1da177e4
LT
5227}
5228
5229/**
20346722 5230 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5231 * @sp : private member of the device structure, which is a pointer to the
5232 * s2io_nic structure.
5233 * @data:variable that returns the result of each of the test conducted by
5234 * the driver.
5235 * Description:
20346722 5236 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5237 * register.
5238 * Return value:
5239 * 0 on success.
5240 */
5241
5242static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
5243{
5244 int fail = 0;
ad4ebed0 5245 u64 ret_data, org_4F0, org_7F0;
5246 u8 saved_4F0 = 0, saved_7F0 = 0;
5247 struct net_device *dev = sp->dev;
1da177e4
LT
5248
5249 /* Test Write Error at offset 0 */
ad4ebed0 5250 /* Note that SPI interface allows write access to all areas
5251 * of EEPROM. Hence doing all negative testing only for Xframe I.
5252 */
5253 if (sp->device_type == XFRAME_I_DEVICE)
5254 if (!write_eeprom(sp, 0, 0, 3))
5255 fail = 1;
5256
5257 /* Save current values at offsets 0x4F0 and 0x7F0 */
5258 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5259 saved_4F0 = 1;
5260 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5261 saved_7F0 = 1;
1da177e4
LT
5262
5263 /* Test Write at offset 4f0 */
ad4ebed0 5264 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5265 fail = 1;
5266 if (read_eeprom(sp, 0x4F0, &ret_data))
5267 fail = 1;
5268
ad4ebed0 5269 if (ret_data != 0x012345) {
26b7625c
AM
5270 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5271 "Data written %llx Data read %llx\n",
5272 dev->name, (unsigned long long)0x12345,
5273 (unsigned long long)ret_data);
1da177e4 5274 fail = 1;
ad4ebed0 5275 }
1da177e4
LT
5276
5277 /* Reset the EEPROM data go FFFF */
ad4ebed0 5278 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5279
5280 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5281 if (sp->device_type == XFRAME_I_DEVICE)
5282 if (!write_eeprom(sp, 0x07C, 0, 3))
5283 fail = 1;
1da177e4 5284
ad4ebed0 5285 /* Test Write Request at offset 0x7f0 */
5286 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5287 fail = 1;
ad4ebed0 5288 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5289 fail = 1;
5290
ad4ebed0 5291 if (ret_data != 0x012345) {
26b7625c
AM
5292 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5293 "Data written %llx Data read %llx\n",
5294 dev->name, (unsigned long long)0x12345,
5295 (unsigned long long)ret_data);
1da177e4 5296 fail = 1;
ad4ebed0 5297 }
1da177e4
LT
5298
5299 /* Reset the EEPROM data go FFFF */
ad4ebed0 5300 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5301
ad4ebed0 5302 if (sp->device_type == XFRAME_I_DEVICE) {
5303 /* Test Write Error at offset 0x80 */
5304 if (!write_eeprom(sp, 0x080, 0, 3))
5305 fail = 1;
1da177e4 5306
ad4ebed0 5307 /* Test Write Error at offset 0xfc */
5308 if (!write_eeprom(sp, 0x0FC, 0, 3))
5309 fail = 1;
1da177e4 5310
ad4ebed0 5311 /* Test Write Error at offset 0x100 */
5312 if (!write_eeprom(sp, 0x100, 0, 3))
5313 fail = 1;
1da177e4 5314
ad4ebed0 5315 /* Test Write Error at offset 4ec */
5316 if (!write_eeprom(sp, 0x4EC, 0, 3))
5317 fail = 1;
5318 }
5319
5320 /* Restore values at offsets 0x4F0 and 0x7F0 */
5321 if (saved_4F0)
5322 write_eeprom(sp, 0x4F0, org_4F0, 3);
5323 if (saved_7F0)
5324 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
5325
5326 *data = fail;
ad4ebed0 5327 return fail;
1da177e4
LT
5328}
5329
5330/**
5331 * s2io_bist_test - invokes the MemBist test of the card .
20346722 5332 * @sp : private member of the device structure, which is a pointer to the
1da177e4 5333 * s2io_nic structure.
20346722 5334 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
5335 * the driver.
5336 * Description:
5337 * This invokes the MemBist test of the card. We give around
5338 * 2 secs time for the Test to complete. If it's still not complete
20346722 5339 * within this peiod, we consider that the test failed.
1da177e4
LT
5340 * Return value:
5341 * 0 on success and -1 on failure.
5342 */
5343
5344static int s2io_bist_test(nic_t * sp, uint64_t * data)
5345{
5346 u8 bist = 0;
5347 int cnt = 0, ret = -1;
5348
5349 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5350 bist |= PCI_BIST_START;
5351 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5352
5353 while (cnt < 20) {
5354 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5355 if (!(bist & PCI_BIST_START)) {
5356 *data = (bist & PCI_BIST_CODE_MASK);
5357 ret = 0;
5358 break;
5359 }
5360 msleep(100);
5361 cnt++;
5362 }
5363
5364 return ret;
5365}
5366
5367/**
20346722
K
5368 * s2io-link_test - verifies the link state of the nic
5369 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
5370 * s2io_nic structure.
5371 * @data: variable that returns the result of each of the test conducted by
5372 * the driver.
5373 * Description:
20346722 5374 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
5375 * argument 'data' appropriately.
5376 * Return value:
5377 * 0 on success.
5378 */
5379
5380static int s2io_link_test(nic_t * sp, uint64_t * data)
5381{
5382 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5383 u64 val64;
5384
5385 val64 = readq(&bar0->adapter_status);
c92ca04b 5386 if(!(LINK_IS_UP(val64)))
1da177e4 5387 *data = 1;
c92ca04b
AR
5388 else
5389 *data = 0;
1da177e4
LT
5390
5391 return 0;
5392}
5393
5394/**
20346722
K
5395 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5396 * @sp - private member of the device structure, which is a pointer to the
1da177e4 5397 * s2io_nic structure.
20346722 5398 * @data - variable that returns the result of each of the test
1da177e4
LT
5399 * conducted by the driver.
5400 * Description:
20346722 5401 * This is one of the offline test that tests the read and write
1da177e4
LT
5402 * access to the RldRam chip on the NIC.
5403 * Return value:
5404 * 0 on success.
5405 */
5406
5407static int s2io_rldram_test(nic_t * sp, uint64_t * data)
5408{
5409 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5410 u64 val64;
ad4ebed0 5411 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
5412
5413 val64 = readq(&bar0->adapter_control);
5414 val64 &= ~ADAPTER_ECC_EN;
5415 writeq(val64, &bar0->adapter_control);
5416
5417 val64 = readq(&bar0->mc_rldram_test_ctrl);
5418 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 5419 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5420
5421 val64 = readq(&bar0->mc_rldram_mrs);
5422 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5423 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5424
5425 val64 |= MC_RLDRAM_MRS_ENABLE;
5426 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5427
5428 while (iteration < 2) {
5429 val64 = 0x55555555aaaa0000ULL;
5430 if (iteration == 1) {
5431 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5432 }
5433 writeq(val64, &bar0->mc_rldram_test_d0);
5434
5435 val64 = 0xaaaa5a5555550000ULL;
5436 if (iteration == 1) {
5437 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5438 }
5439 writeq(val64, &bar0->mc_rldram_test_d1);
5440
5441 val64 = 0x55aaaaaaaa5a0000ULL;
5442 if (iteration == 1) {
5443 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5444 }
5445 writeq(val64, &bar0->mc_rldram_test_d2);
5446
ad4ebed0 5447 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
5448 writeq(val64, &bar0->mc_rldram_test_add);
5449
ad4ebed0 5450 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5451 MC_RLDRAM_TEST_GO;
5452 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5453
5454 for (cnt = 0; cnt < 5; cnt++) {
5455 val64 = readq(&bar0->mc_rldram_test_ctrl);
5456 if (val64 & MC_RLDRAM_TEST_DONE)
5457 break;
5458 msleep(200);
5459 }
5460
5461 if (cnt == 5)
5462 break;
5463
ad4ebed0 5464 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5465 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5466
5467 for (cnt = 0; cnt < 5; cnt++) {
5468 val64 = readq(&bar0->mc_rldram_test_ctrl);
5469 if (val64 & MC_RLDRAM_TEST_DONE)
5470 break;
5471 msleep(500);
5472 }
5473
5474 if (cnt == 5)
5475 break;
5476
5477 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 5478 if (!(val64 & MC_RLDRAM_TEST_PASS))
5479 test_fail = 1;
1da177e4
LT
5480
5481 iteration++;
5482 }
5483
ad4ebed0 5484 *data = test_fail;
1da177e4 5485
ad4ebed0 5486 /* Bring the adapter out of test mode */
5487 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5488
5489 return test_fail;
1da177e4
LT
5490}
5491
5492/**
5493 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5494 * @sp : private member of the device structure, which is a pointer to the
5495 * s2io_nic structure.
5496 * @ethtest : pointer to a ethtool command specific structure that will be
5497 * returned to the user.
20346722 5498 * @data : variable that returns the result of each of the test
1da177e4
LT
5499 * conducted by the driver.
5500 * Description:
5501 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5502 * the health of the card.
5503 * Return value:
5504 * void
5505 */
5506
5507static void s2io_ethtool_test(struct net_device *dev,
5508 struct ethtool_test *ethtest,
5509 uint64_t * data)
5510{
5511 nic_t *sp = dev->priv;
5512 int orig_state = netif_running(sp->dev);
5513
5514 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5515 /* Offline Tests. */
20346722 5516 if (orig_state)
1da177e4 5517 s2io_close(sp->dev);
1da177e4
LT
5518
5519 if (s2io_register_test(sp, &data[0]))
5520 ethtest->flags |= ETH_TEST_FL_FAILED;
5521
5522 s2io_reset(sp);
1da177e4
LT
5523
5524 if (s2io_rldram_test(sp, &data[3]))
5525 ethtest->flags |= ETH_TEST_FL_FAILED;
5526
5527 s2io_reset(sp);
1da177e4
LT
5528
5529 if (s2io_eeprom_test(sp, &data[1]))
5530 ethtest->flags |= ETH_TEST_FL_FAILED;
5531
5532 if (s2io_bist_test(sp, &data[4]))
5533 ethtest->flags |= ETH_TEST_FL_FAILED;
5534
5535 if (orig_state)
5536 s2io_open(sp->dev);
5537
5538 data[2] = 0;
5539 } else {
5540 /* Online Tests. */
5541 if (!orig_state) {
5542 DBG_PRINT(ERR_DBG,
5543 "%s: is not up, cannot run test\n",
5544 dev->name);
5545 data[0] = -1;
5546 data[1] = -1;
5547 data[2] = -1;
5548 data[3] = -1;
5549 data[4] = -1;
5550 }
5551
5552 if (s2io_link_test(sp, &data[2]))
5553 ethtest->flags |= ETH_TEST_FL_FAILED;
5554
5555 data[0] = 0;
5556 data[1] = 0;
5557 data[3] = 0;
5558 data[4] = 0;
5559 }
5560}
5561
5562static void s2io_get_ethtool_stats(struct net_device *dev,
5563 struct ethtool_stats *estats,
5564 u64 * tmp_stats)
5565{
5566 int i = 0;
5567 nic_t *sp = dev->priv;
5568 StatInfo_t *stat_info = sp->mac_control.stats_info;
5569
7ba013ac 5570 s2io_updt_stats(sp);
541ae68f
K
5571 tmp_stats[i++] =
5572 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5573 le32_to_cpu(stat_info->tmac_frms);
5574 tmp_stats[i++] =
5575 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5576 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 5577 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
5578 tmp_stats[i++] =
5579 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5580 le32_to_cpu(stat_info->tmac_mcst_frms);
5581 tmp_stats[i++] =
5582 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5583 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 5584 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
5585 tmp_stats[i++] =
5586 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5587 le32_to_cpu(stat_info->tmac_ttl_octets);
5588 tmp_stats[i++] =
5589 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5590 le32_to_cpu(stat_info->tmac_ucst_frms);
5591 tmp_stats[i++] =
5592 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5593 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f
K
5594 tmp_stats[i++] =
5595 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5596 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 5597 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 5598 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
5599 tmp_stats[i++] =
5600 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5601 le32_to_cpu(stat_info->tmac_vld_ip);
5602 tmp_stats[i++] =
5603 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5604 le32_to_cpu(stat_info->tmac_drop_ip);
5605 tmp_stats[i++] =
5606 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5607 le32_to_cpu(stat_info->tmac_icmp);
5608 tmp_stats[i++] =
5609 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5610 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 5611 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
5612 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5613 le32_to_cpu(stat_info->tmac_udp);
5614 tmp_stats[i++] =
5615 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5616 le32_to_cpu(stat_info->rmac_vld_frms);
5617 tmp_stats[i++] =
5618 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5619 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
5620 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5621 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
5622 tmp_stats[i++] =
5623 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5624 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5625 tmp_stats[i++] =
5626 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5627 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 5628 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 5629 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
5630 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5631 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
5632 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5633 tmp_stats[i++] =
5634 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5635 le32_to_cpu(stat_info->rmac_ttl_octets);
5636 tmp_stats[i++] =
5637 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5638 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5639 tmp_stats[i++] =
5640 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5641 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f
K
5642 tmp_stats[i++] =
5643 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5644 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
5645 tmp_stats[i++] =
5646 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5647 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5648 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5649 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f
K
5650 tmp_stats[i++] =
5651 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5652 le32_to_cpu(stat_info->rmac_usized_frms);
5653 tmp_stats[i++] =
5654 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5655 le32_to_cpu(stat_info->rmac_osized_frms);
5656 tmp_stats[i++] =
5657 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5658 le32_to_cpu(stat_info->rmac_frag_frms);
5659 tmp_stats[i++] =
5660 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5661 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
5662 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5663 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5664 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5665 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5666 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5667 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5668 tmp_stats[i++] =
5669 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 5670 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
5671 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5672 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
5673 tmp_stats[i++] =
5674 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 5675 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
5676 tmp_stats[i++] =
5677 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 5678 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 5679 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
5680 tmp_stats[i++] =
5681 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f
K
5682 le32_to_cpu(stat_info->rmac_udp);
5683 tmp_stats[i++] =
5684 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5685 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
5686 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5687 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5688 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5689 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5690 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5691 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5692 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5693 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5694 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5695 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5696 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5697 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5698 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5699 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5700 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5701 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5702 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f
K
5703 tmp_stats[i++] =
5704 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5705 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
5706 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5707 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f
K
5708 tmp_stats[i++] =
5709 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5710 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 5711 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
5712 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5713 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5714 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5715 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5716 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5717 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5718 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5719 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5720 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5721 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5722 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5723 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5724 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5725 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5726 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5727 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5728 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5729 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5730 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5731 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5732 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5733 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5734 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5735 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5736 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5737 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5738 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5739 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5740 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5741 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5742 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5743 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5744 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5745 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
7ba013ac
K
5746 tmp_stats[i++] = 0;
5747 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5748 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
5749 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5750 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5751 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5752 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5753 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5754 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5755 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5756 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5757 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5758 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5759 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5760 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5761 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5762 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5763 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5764 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5765 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
5766 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5767 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5768 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5769 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 5770 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
5771 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5772 int count = 0;
5773 /*
5774 * Since 64-bit divide does not work on all platforms,
5775 * do repeated subtraction.
5776 */
5777 while (tmp >= stat_info->sw_stat.num_aggregations) {
5778 tmp -= stat_info->sw_stat.num_aggregations;
5779 count++;
5780 }
5781 tmp_stats[i++] = count;
fe931395 5782 }
bd1034f0
AR
5783 else
5784 tmp_stats[i++] = 0;
1da177e4
LT
5785}
5786
ac1f60db 5787static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
5788{
5789 return (XENA_REG_SPACE);
5790}
5791
5792
ac1f60db 5793static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4
LT
5794{
5795 nic_t *sp = dev->priv;
5796
5797 return (sp->rx_csum);
5798}
ac1f60db
AB
5799
5800static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4
LT
5801{
5802 nic_t *sp = dev->priv;
5803
5804 if (data)
5805 sp->rx_csum = 1;
5806 else
5807 sp->rx_csum = 0;
5808
5809 return 0;
5810}
ac1f60db
AB
5811
5812static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
5813{
5814 return (XENA_EEPROM_SPACE);
5815}
5816
ac1f60db 5817static int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
5818{
5819 return (S2IO_TEST_LEN);
5820}
ac1f60db
AB
5821
5822static void s2io_ethtool_get_strings(struct net_device *dev,
5823 u32 stringset, u8 * data)
1da177e4
LT
5824{
5825 switch (stringset) {
5826 case ETH_SS_TEST:
5827 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5828 break;
5829 case ETH_SS_STATS:
5830 memcpy(data, &ethtool_stats_keys,
5831 sizeof(ethtool_stats_keys));
5832 }
5833}
1da177e4
LT
5834static int s2io_ethtool_get_stats_count(struct net_device *dev)
5835{
5836 return (S2IO_STAT_LEN);
5837}
5838
ac1f60db 5839static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
5840{
5841 if (data)
5842 dev->features |= NETIF_F_IP_CSUM;
5843 else
5844 dev->features &= ~NETIF_F_IP_CSUM;
5845
5846 return 0;
5847}
5848
5849
5850static struct ethtool_ops netdev_ethtool_ops = {
5851 .get_settings = s2io_ethtool_gset,
5852 .set_settings = s2io_ethtool_sset,
5853 .get_drvinfo = s2io_ethtool_gdrvinfo,
5854 .get_regs_len = s2io_ethtool_get_regs_len,
5855 .get_regs = s2io_ethtool_gregs,
5856 .get_link = ethtool_op_get_link,
5857 .get_eeprom_len = s2io_get_eeprom_len,
5858 .get_eeprom = s2io_ethtool_geeprom,
5859 .set_eeprom = s2io_ethtool_seeprom,
5860 .get_pauseparam = s2io_ethtool_getpause_data,
5861 .set_pauseparam = s2io_ethtool_setpause_data,
5862 .get_rx_csum = s2io_ethtool_get_rx_csum,
5863 .set_rx_csum = s2io_ethtool_set_rx_csum,
5864 .get_tx_csum = ethtool_op_get_tx_csum,
5865 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5866 .get_sg = ethtool_op_get_sg,
5867 .set_sg = ethtool_op_set_sg,
5868#ifdef NETIF_F_TSO
5869 .get_tso = ethtool_op_get_tso,
5870 .set_tso = ethtool_op_set_tso,
5871#endif
fed5eccd
AR
5872 .get_ufo = ethtool_op_get_ufo,
5873 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
5874 .self_test_count = s2io_ethtool_self_test_count,
5875 .self_test = s2io_ethtool_test,
5876 .get_strings = s2io_ethtool_get_strings,
5877 .phys_id = s2io_ethtool_idnic,
5878 .get_stats_count = s2io_ethtool_get_stats_count,
5879 .get_ethtool_stats = s2io_get_ethtool_stats
5880};
5881
5882/**
20346722 5883 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
5884 * @dev : Device pointer.
5885 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5886 * a proprietary structure used to pass information to the driver.
5887 * @cmd : This is used to distinguish between the different commands that
5888 * can be passed to the IOCTL functions.
5889 * Description:
20346722
K
5890 * Currently there are no special functionality supported in IOCTL, hence
5891 * function always return EOPNOTSUPPORTED
1da177e4
LT
5892 */
5893
ac1f60db 5894static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
5895{
5896 return -EOPNOTSUPP;
5897}
5898
5899/**
5900 * s2io_change_mtu - entry point to change MTU size for the device.
5901 * @dev : device pointer.
5902 * @new_mtu : the new MTU size for the device.
5903 * Description: A driver entry point to change MTU size for the device.
5904 * Before changing the MTU the device must be stopped.
5905 * Return value:
5906 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5907 * file on failure.
5908 */
5909
ac1f60db 5910static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4
LT
5911{
5912 nic_t *sp = dev->priv;
1da177e4
LT
5913
5914 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5915 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5916 dev->name);
5917 return -EPERM;
5918 }
5919
1da177e4 5920 dev->mtu = new_mtu;
d8892c6e 5921 if (netif_running(dev)) {
c92ca04b 5922 s2io_card_down(sp, 0);
d8892c6e
K
5923 netif_stop_queue(dev);
5924 if (s2io_card_up(sp)) {
5925 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5926 __FUNCTION__);
5927 }
5928 if (netif_queue_stopped(dev))
5929 netif_wake_queue(dev);
5930 } else { /* Device is down */
5931 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5932 u64 val64 = new_mtu;
5933
5934 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5935 }
1da177e4
LT
5936
5937 return 0;
5938}
5939
5940/**
5941 * s2io_tasklet - Bottom half of the ISR.
5942 * @dev_adr : address of the device structure in dma_addr_t format.
5943 * Description:
5944 * This is the tasklet or the bottom half of the ISR. This is
20346722 5945 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 5946 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 5947 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
5948 * replenish the Rx buffers in the Rx buffer descriptors.
5949 * Return value:
5950 * void.
5951 */
5952
5953static void s2io_tasklet(unsigned long dev_addr)
5954{
5955 struct net_device *dev = (struct net_device *) dev_addr;
5956 nic_t *sp = dev->priv;
5957 int i, ret;
5958 mac_info_t *mac_control;
5959 struct config_param *config;
5960
5961 mac_control = &sp->mac_control;
5962 config = &sp->config;
5963
5964 if (!TASKLET_IN_USE) {
5965 for (i = 0; i < config->rx_ring_num; i++) {
5966 ret = fill_rx_buffers(sp, i);
5967 if (ret == -ENOMEM) {
5968 DBG_PRINT(ERR_DBG, "%s: Out of ",
5969 dev->name);
5970 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5971 break;
5972 } else if (ret == -EFILL) {
5973 DBG_PRINT(ERR_DBG,
5974 "%s: Rx Ring %d is full\n",
5975 dev->name, i);
5976 break;
5977 }
5978 }
5979 clear_bit(0, (&sp->tasklet_status));
5980 }
5981}
5982
5983/**
5984 * s2io_set_link - Set the LInk status
5985 * @data: long pointer to device private structue
5986 * Description: Sets the link status for the adapter
5987 */
5988
5989static void s2io_set_link(unsigned long data)
5990{
5991 nic_t *nic = (nic_t *) data;
5992 struct net_device *dev = nic->dev;
5993 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5994 register u64 val64;
5995 u16 subid;
5996
5997 if (test_and_set_bit(0, &(nic->link_state))) {
5998 /* The card is being reset, no point doing anything */
5999 return;
6000 }
6001
6002 subid = nic->pdev->subsystem_device;
a371a07d
K
6003 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6004 /*
6005 * Allow a small delay for the NICs self initiated
6006 * cleanup to complete.
6007 */
6008 msleep(100);
6009 }
1da177e4
LT
6010
6011 val64 = readq(&bar0->adapter_status);
20346722 6012 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
6013 if (LINK_IS_UP(val64)) {
6014 val64 = readq(&bar0->adapter_control);
6015 val64 |= ADAPTER_CNTL_EN;
6016 writeq(val64, &bar0->adapter_control);
541ae68f
K
6017 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6018 subid)) {
1da177e4
LT
6019 val64 = readq(&bar0->gpio_control);
6020 val64 |= GPIO_CTRL_GPIO_0;
6021 writeq(val64, &bar0->gpio_control);
6022 val64 = readq(&bar0->gpio_control);
6023 } else {
6024 val64 |= ADAPTER_LED_ON;
6025 writeq(val64, &bar0->adapter_control);
6026 }
a371a07d
K
6027 if (s2io_link_fault_indication(nic) ==
6028 MAC_RMAC_ERR_TIMER) {
6029 val64 = readq(&bar0->adapter_status);
6030 if (!LINK_IS_UP(val64)) {
6031 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6032 DBG_PRINT(ERR_DBG, " Link down");
6033 DBG_PRINT(ERR_DBG, "after ");
6034 DBG_PRINT(ERR_DBG, "enabling ");
6035 DBG_PRINT(ERR_DBG, "device \n");
6036 }
1da177e4
LT
6037 }
6038 if (nic->device_enabled_once == FALSE) {
6039 nic->device_enabled_once = TRUE;
6040 }
6041 s2io_link(nic, LINK_UP);
6042 } else {
541ae68f
K
6043 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6044 subid)) {
1da177e4
LT
6045 val64 = readq(&bar0->gpio_control);
6046 val64 &= ~GPIO_CTRL_GPIO_0;
6047 writeq(val64, &bar0->gpio_control);
6048 val64 = readq(&bar0->gpio_control);
6049 }
6050 s2io_link(nic, LINK_DOWN);
6051 }
6052 } else { /* NIC is not Quiescent. */
6053 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6054 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6055 netif_stop_queue(dev);
6056 }
6057 clear_bit(0, &(nic->link_state));
6058}
6059
5d3213cc
AR
6060static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6061 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6062 u64 *temp2, int size)
6063{
6064 struct net_device *dev = sp->dev;
6065 struct sk_buff *frag_list;
6066
6067 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6068 /* allocate skb */
6069 if (*skb) {
6070 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6071 /*
6072 * As Rx frame are not going to be processed,
6073 * using same mapped address for the Rxd
6074 * buffer pointer
6075 */
6076 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
6077 } else {
6078 *skb = dev_alloc_skb(size);
6079 if (!(*skb)) {
6080 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
6081 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
6082 return -ENOMEM ;
6083 }
6084 /* storing the mapped addr in a temp variable
6085 * such it will be used for next rxd whose
6086 * Host Control is NULL
6087 */
6088 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
6089 pci_map_single( sp->pdev, (*skb)->data,
6090 size - NET_IP_ALIGN,
6091 PCI_DMA_FROMDEVICE);
6092 rxdp->Host_Control = (unsigned long) (*skb);
6093 }
6094 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6095 /* Two buffer Mode */
6096 if (*skb) {
6097 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6098 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6099 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6100 } else {
6101 *skb = dev_alloc_skb(size);
6102 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6103 pci_map_single(sp->pdev, (*skb)->data,
6104 dev->mtu + 4,
6105 PCI_DMA_FROMDEVICE);
6106 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6107 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6108 PCI_DMA_FROMDEVICE);
6109 rxdp->Host_Control = (unsigned long) (*skb);
6110
6111 /* Buffer-1 will be dummy buffer not used */
6112 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6113 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6114 PCI_DMA_FROMDEVICE);
6115 }
6116 } else if ((rxdp->Host_Control == 0)) {
6117 /* Three buffer mode */
6118 if (*skb) {
6119 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6120 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6121 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6122 } else {
6123 *skb = dev_alloc_skb(size);
6124
6125 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6126 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6127 PCI_DMA_FROMDEVICE);
6128 /* Buffer-1 receives L3/L4 headers */
6129 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6130 pci_map_single( sp->pdev, (*skb)->data,
6131 l3l4hdr_size + 4,
6132 PCI_DMA_FROMDEVICE);
6133 /*
6134 * skb_shinfo(skb)->frag_list will have L4
6135 * data payload
6136 */
6137 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6138 ALIGN_SIZE);
6139 if (skb_shinfo(*skb)->frag_list == NULL) {
6140 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6141 failed\n ", dev->name);
6142 return -ENOMEM ;
6143 }
6144 frag_list = skb_shinfo(*skb)->frag_list;
6145 frag_list->next = NULL;
6146 /*
6147 * Buffer-2 receives L4 data payload
6148 */
6149 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6150 pci_map_single( sp->pdev, frag_list->data,
6151 dev->mtu, PCI_DMA_FROMDEVICE);
6152 }
6153 }
6154 return 0;
6155}
6156static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6157{
6158 struct net_device *dev = sp->dev;
6159 if (sp->rxd_mode == RXD_MODE_1) {
6160 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6161 } else if (sp->rxd_mode == RXD_MODE_3B) {
6162 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6163 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6164 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6165 } else {
6166 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6167 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6168 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6169 }
6170}
6171
6172static int rxd_owner_bit_reset(nic_t *sp)
6173{
6174 int i, j, k, blk_cnt = 0, size;
6175 mac_info_t * mac_control = &sp->mac_control;
6176 struct config_param *config = &sp->config;
6177 struct net_device *dev = sp->dev;
6178 RxD_t *rxdp = NULL;
6179 struct sk_buff *skb = NULL;
6180 buffAdd_t *ba = NULL;
6181 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6182
6183 /* Calculate the size based on ring mode */
6184 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6185 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6186 if (sp->rxd_mode == RXD_MODE_1)
6187 size += NET_IP_ALIGN;
6188 else if (sp->rxd_mode == RXD_MODE_3B)
6189 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6190 else
6191 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6192
6193 for (i = 0; i < config->rx_ring_num; i++) {
6194 blk_cnt = config->rx_cfg[i].num_rxd /
6195 (rxd_count[sp->rxd_mode] +1);
6196
6197 for (j = 0; j < blk_cnt; j++) {
6198 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6199 rxdp = mac_control->rings[i].
6200 rx_blocks[j].rxds[k].virt_addr;
6201 if(sp->rxd_mode >= RXD_MODE_3A)
6202 ba = &mac_control->rings[i].ba[j][k];
6203 set_rxd_buffer_pointer(sp, rxdp, ba,
6204 &skb,(u64 *)&temp0_64,
6205 (u64 *)&temp1_64,
6206 (u64 *)&temp2_64, size);
6207
6208 set_rxd_buffer_size(sp, rxdp, size);
6209 wmb();
6210 /* flip the Ownership bit to Hardware */
6211 rxdp->Control_1 |= RXD_OWN_XENA;
6212 }
6213 }
6214 }
6215 return 0;
6216
6217}
6218
c92ca04b 6219static void s2io_card_down(nic_t * sp, int flag)
1da177e4
LT
6220{
6221 int cnt = 0;
6222 XENA_dev_config_t __iomem *bar0 = sp->bar0;
6223 unsigned long flags;
6224 register u64 val64 = 0;
c92ca04b 6225 struct net_device *dev = sp->dev;
1da177e4 6226
25fff88e 6227 del_timer_sync(&sp->alarm_timer);
1da177e4 6228 /* If s2io_set_link task is executing, wait till it completes. */
20346722 6229 while (test_and_set_bit(0, &(sp->link_state))) {
1da177e4 6230 msleep(50);
20346722 6231 }
1da177e4
LT
6232 atomic_set(&sp->card_state, CARD_DOWN);
6233
6234 /* disable Tx and Rx traffic on the NIC */
6235 stop_nic(sp);
c92ca04b
AR
6236 if (flag) {
6237 if (sp->intr_type == MSI_X) {
6238 int i;
6239 u16 msi_control;
6240
6241 for (i=1; (sp->s2io_entries[i].in_use ==
6242 MSIX_REGISTERED_SUCCESS); i++) {
6243 int vector = sp->entries[i].vector;
6244 void *arg = sp->s2io_entries[i].arg;
6245
6246 free_irq(vector, arg);
6247 }
6248 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6249 msi_control &= 0xFFFE; /* Disable MSI */
6250 pci_write_config_word(sp->pdev, 0x42, msi_control);
6251 pci_disable_msix(sp->pdev);
6252 } else {
6253 free_irq(sp->pdev->irq, dev);
6254 if (sp->intr_type == MSI)
6255 pci_disable_msi(sp->pdev);
6256 }
6257 }
6258 /* Waiting till all Interrupt handlers are complete */
6259 cnt = 0;
6260 do {
6261 msleep(10);
6262 if (!atomic_read(&sp->isr_cnt))
6263 break;
6264 cnt++;
6265 } while(cnt < 5);
1da177e4
LT
6266
6267 /* Kill tasklet. */
6268 tasklet_kill(&sp->task);
6269
6270 /* Check if the device is Quiescent and then Reset the NIC */
6271 do {
5d3213cc
AR
6272 /* As per the HW requirement we need to replenish the
6273 * receive buffer to avoid the ring bump. Since there is
6274 * no intention of processing the Rx frame at this pointwe are
6275 * just settting the ownership bit of rxd in Each Rx
6276 * ring to HW and set the appropriate buffer size
6277 * based on the ring mode
6278 */
6279 rxd_owner_bit_reset(sp);
6280
1da177e4 6281 val64 = readq(&bar0->adapter_status);
20346722 6282 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
1da177e4
LT
6283 break;
6284 }
6285
6286 msleep(50);
6287 cnt++;
6288 if (cnt == 10) {
6289 DBG_PRINT(ERR_DBG,
6290 "s2io_close:Device not Quiescent ");
6291 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6292 (unsigned long long) val64);
6293 break;
6294 }
6295 } while (1);
1da177e4
LT
6296 s2io_reset(sp);
6297
7ba013ac
K
6298 spin_lock_irqsave(&sp->tx_lock, flags);
6299 /* Free all Tx buffers */
1da177e4 6300 free_tx_buffers(sp);
7ba013ac
K
6301 spin_unlock_irqrestore(&sp->tx_lock, flags);
6302
6303 /* Free all Rx buffers */
6304 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 6305 free_rx_buffers(sp);
7ba013ac 6306 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 6307
1da177e4
LT
6308 clear_bit(0, &(sp->link_state));
6309}
6310
6311static int s2io_card_up(nic_t * sp)
6312{
cc6e7c44 6313 int i, ret = 0;
1da177e4
LT
6314 mac_info_t *mac_control;
6315 struct config_param *config;
6316 struct net_device *dev = (struct net_device *) sp->dev;
6317
6318 /* Initialize the H/W I/O registers */
6319 if (init_nic(sp) != 0) {
6320 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6321 dev->name);
6322 return -ENODEV;
6323 }
6324
cc6e7c44
RA
6325 if (sp->intr_type == MSI)
6326 ret = s2io_enable_msi(sp);
6327 else if (sp->intr_type == MSI_X)
6328 ret = s2io_enable_msi_x(sp);
6329 if (ret) {
6330 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6331 sp->intr_type = INTA;
6332 }
6333
20346722
K
6334 /*
6335 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
6336 * Rx ring and initializing buffers into 30 Rx blocks
6337 */
6338 mac_control = &sp->mac_control;
6339 config = &sp->config;
6340
6341 for (i = 0; i < config->rx_ring_num; i++) {
6342 if ((ret = fill_rx_buffers(sp, i))) {
6343 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6344 dev->name);
6345 s2io_reset(sp);
6346 free_rx_buffers(sp);
6347 return -ENOMEM;
6348 }
6349 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6350 atomic_read(&sp->rx_bufs_left[i]));
6351 }
6352
6353 /* Setting its receive mode */
6354 s2io_set_multicast(dev);
6355
7d3d0439
RA
6356 if (sp->lro) {
6357 /* Initialize max aggregatable pkts based on MTU */
6358 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6359 /* Check if we can use(if specified) user provided value */
6360 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6361 sp->lro_max_aggr_per_sess = lro_max_pkts;
6362 }
6363
1da177e4
LT
6364 /* Enable tasklet for the device */
6365 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6366
6367 /* Enable Rx Traffic and interrupts on the NIC */
6368 if (start_nic(sp)) {
6369 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6370 tasklet_kill(&sp->task);
6371 s2io_reset(sp);
6372 free_irq(dev->irq, dev);
6373 free_rx_buffers(sp);
6374 return -ENODEV;
6375 }
6376
25fff88e
K
6377 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6378
1da177e4
LT
6379 atomic_set(&sp->card_state, CARD_UP);
6380 return 0;
6381}
6382
20346722 6383/**
1da177e4
LT
6384 * s2io_restart_nic - Resets the NIC.
6385 * @data : long pointer to the device private structure
6386 * Description:
6387 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 6388 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
6389 * the run time of the watch dog routine which is run holding a
6390 * spin lock.
6391 */
6392
6393static void s2io_restart_nic(unsigned long data)
6394{
6395 struct net_device *dev = (struct net_device *) data;
6396 nic_t *sp = dev->priv;
6397
c92ca04b 6398 s2io_card_down(sp, 0);
1da177e4
LT
6399 if (s2io_card_up(sp)) {
6400 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6401 dev->name);
6402 }
6403 netif_wake_queue(dev);
6404 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6405 dev->name);
20346722 6406
1da177e4
LT
6407}
6408
20346722
K
6409/**
6410 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
6411 * @dev : Pointer to net device structure
6412 * Description:
6413 * This function is triggered if the Tx Queue is stopped
6414 * for a pre-defined amount of time when the Interface is still up.
6415 * If the Interface is jammed in such a situation, the hardware is
6416 * reset (by s2io_close) and restarted again (by s2io_open) to
6417 * overcome any problem that might have been caused in the hardware.
6418 * Return value:
6419 * void
6420 */
6421
6422static void s2io_tx_watchdog(struct net_device *dev)
6423{
6424 nic_t *sp = dev->priv;
6425
6426 if (netif_carrier_ok(dev)) {
6427 schedule_work(&sp->rst_timer_task);
bd1034f0 6428 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
6429 }
6430}
6431
6432/**
6433 * rx_osm_handler - To perform some OS related operations on SKB.
6434 * @sp: private member of the device structure,pointer to s2io_nic structure.
6435 * @skb : the socket buffer pointer.
6436 * @len : length of the packet
6437 * @cksum : FCS checksum of the frame.
6438 * @ring_no : the ring from which this RxD was extracted.
20346722 6439 * Description:
1da177e4
LT
6440 * This function is called by the Tx interrupt serivce routine to perform
6441 * some OS related operations on the SKB before passing it to the upper
6442 * layers. It mainly checks if the checksum is OK, if so adds it to the
6443 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6444 * to the upper layer. If the checksum is wrong, it increments the Rx
6445 * packet error count, frees the SKB and returns error.
6446 * Return value:
6447 * SUCCESS on success and -1 on failure.
6448 */
20346722 6449static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
1da177e4 6450{
20346722 6451 nic_t *sp = ring_data->nic;
1da177e4 6452 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
6453 struct sk_buff *skb = (struct sk_buff *)
6454 ((unsigned long) rxdp->Host_Control);
6455 int ring_no = ring_data->ring_no;
1da177e4 6456 u16 l3_csum, l4_csum;
863c11a9 6457 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7d3d0439 6458 lro_t *lro;
da6971d8 6459
20346722 6460 skb->dev = dev;
c92ca04b 6461
863c11a9 6462 if (err) {
bd1034f0
AR
6463 /* Check for parity error */
6464 if (err & 0x1) {
6465 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6466 }
6467
863c11a9
AR
6468 /*
6469 * Drop the packet if bad transfer code. Exception being
6470 * 0x5, which could be due to unsupported IPv6 extension header.
6471 * In this case, we let stack handle the packet.
6472 * Note that in this case, since checksum will be incorrect,
6473 * stack will validate the same.
6474 */
6475 if (err && ((err >> 48) != 0x5)) {
6476 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6477 dev->name, err);
6478 sp->stats.rx_crc_errors++;
6479 dev_kfree_skb(skb);
6480 atomic_dec(&sp->rx_bufs_left[ring_no]);
6481 rxdp->Host_Control = 0;
6482 return 0;
6483 }
20346722 6484 }
1da177e4 6485
20346722
K
6486 /* Updating statistics */
6487 rxdp->Host_Control = 0;
6488 sp->rx_pkt_count++;
6489 sp->stats.rx_packets++;
da6971d8
AR
6490 if (sp->rxd_mode == RXD_MODE_1) {
6491 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 6492
da6971d8
AR
6493 sp->stats.rx_bytes += len;
6494 skb_put(skb, len);
6495
6496 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6497 int get_block = ring_data->rx_curr_get_info.block_index;
6498 int get_off = ring_data->rx_curr_get_info.offset;
6499 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6500 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6501 unsigned char *buff = skb_push(skb, buf0_len);
6502
6503 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
6504 sp->stats.rx_bytes += buf0_len + buf2_len;
6505 memcpy(buff, ba->ba_0, buf0_len);
6506
6507 if (sp->rxd_mode == RXD_MODE_3A) {
6508 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6509
6510 skb_put(skb, buf1_len);
6511 skb->len += buf2_len;
6512 skb->data_len += buf2_len;
6513 skb->truesize += buf2_len;
6514 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6515 sp->stats.rx_bytes += buf1_len;
6516
6517 } else
6518 skb_put(skb, buf2_len);
6519 }
20346722 6520
7d3d0439
RA
6521 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6522 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
6523 (sp->rx_csum)) {
6524 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
6525 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6526 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 6527 /*
1da177e4
LT
6528 * NIC verifies if the Checksum of the received
6529 * frame is Ok or not and accordingly returns
6530 * a flag in the RxD.
6531 */
6532 skb->ip_summed = CHECKSUM_UNNECESSARY;
7d3d0439
RA
6533 if (sp->lro) {
6534 u32 tcp_len;
6535 u8 *tcp;
6536 int ret = 0;
6537
6538 ret = s2io_club_tcp_session(skb->data, &tcp,
6539 &tcp_len, &lro, rxdp, sp);
6540 switch (ret) {
6541 case 3: /* Begin anew */
6542 lro->parent = skb;
6543 goto aggregate;
6544 case 1: /* Aggregate */
6545 {
6546 lro_append_pkt(sp, lro,
6547 skb, tcp_len);
6548 goto aggregate;
6549 }
6550 case 4: /* Flush session */
6551 {
6552 lro_append_pkt(sp, lro,
6553 skb, tcp_len);
6554 queue_rx_frame(lro->parent);
6555 clear_lro_session(lro);
6556 sp->mac_control.stats_info->
6557 sw_stat.flush_max_pkts++;
6558 goto aggregate;
6559 }
6560 case 2: /* Flush both */
6561 lro->parent->data_len =
6562 lro->frags_len;
6563 sp->mac_control.stats_info->
6564 sw_stat.sending_both++;
6565 queue_rx_frame(lro->parent);
6566 clear_lro_session(lro);
6567 goto send_up;
6568 case 0: /* sessions exceeded */
c92ca04b
AR
6569 case -1: /* non-TCP or not
6570 * L2 aggregatable
6571 */
7d3d0439
RA
6572 case 5: /*
6573 * First pkt in session not
6574 * L3/L4 aggregatable
6575 */
6576 break;
6577 default:
6578 DBG_PRINT(ERR_DBG,
6579 "%s: Samadhana!!\n",
6580 __FUNCTION__);
6581 BUG();
6582 }
6583 }
1da177e4 6584 } else {
20346722
K
6585 /*
6586 * Packet with erroneous checksum, let the
1da177e4
LT
6587 * upper layers deal with it.
6588 */
6589 skb->ip_summed = CHECKSUM_NONE;
6590 }
6591 } else {
6592 skb->ip_summed = CHECKSUM_NONE;
6593 }
6594
7d3d0439
RA
6595 if (!sp->lro) {
6596 skb->protocol = eth_type_trans(skb, dev);
1da177e4 6597#ifdef CONFIG_S2IO_NAPI
7d3d0439
RA
6598 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6599 /* Queueing the vlan frame to the upper layer */
6600 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6601 RXD_GET_VLAN_TAG(rxdp->Control_2));
6602 } else {
6603 netif_receive_skb(skb);
6604 }
1da177e4 6605#else
7d3d0439
RA
6606 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6607 /* Queueing the vlan frame to the upper layer */
6608 vlan_hwaccel_rx(skb, sp->vlgrp,
6609 RXD_GET_VLAN_TAG(rxdp->Control_2));
6610 } else {
6611 netif_rx(skb);
6612 }
1da177e4 6613#endif
7d3d0439
RA
6614 } else {
6615send_up:
6616 queue_rx_frame(skb);
6617 }
1da177e4 6618 dev->last_rx = jiffies;
7d3d0439 6619aggregate:
1da177e4 6620 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
6621 return SUCCESS;
6622}
6623
6624/**
6625 * s2io_link - stops/starts the Tx queue.
6626 * @sp : private member of the device structure, which is a pointer to the
6627 * s2io_nic structure.
6628 * @link : inidicates whether link is UP/DOWN.
6629 * Description:
6630 * This function stops/starts the Tx queue depending on whether the link
20346722
K
6631 * status of the NIC is is down or up. This is called by the Alarm
6632 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
6633 * Return value:
6634 * void.
6635 */
6636
26df54bf 6637static void s2io_link(nic_t * sp, int link)
1da177e4
LT
6638{
6639 struct net_device *dev = (struct net_device *) sp->dev;
6640
6641 if (link != sp->last_link_state) {
6642 if (link == LINK_DOWN) {
6643 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6644 netif_carrier_off(dev);
6645 } else {
6646 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6647 netif_carrier_on(dev);
6648 }
6649 }
6650 sp->last_link_state = link;
6651}
6652
6653/**
20346722
K
6654 * get_xena_rev_id - to identify revision ID of xena.
6655 * @pdev : PCI Dev structure
6656 * Description:
6657 * Function to identify the Revision ID of xena.
6658 * Return value:
6659 * returns the revision ID of the device.
6660 */
6661
26df54bf 6662static int get_xena_rev_id(struct pci_dev *pdev)
20346722
K
6663{
6664 u8 id = 0;
6665 int ret;
6666 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6667 return id;
6668}
6669
6670/**
6671 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6672 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
6673 * s2io_nic structure.
6674 * Description:
6675 * This function initializes a few of the PCI and PCI-X configuration registers
6676 * with recommended values.
6677 * Return value:
6678 * void
6679 */
6680
6681static void s2io_init_pci(nic_t * sp)
6682{
20346722 6683 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
6684
6685 /* Enable Data Parity Error Recovery in PCI-X command register. */
6686 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6687 &(pcix_cmd));
1da177e4 6688 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6689 (pcix_cmd | 1));
1da177e4 6690 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6691 &(pcix_cmd));
1da177e4
LT
6692
6693 /* Set the PErr Response bit in PCI command register. */
6694 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6695 pci_write_config_word(sp->pdev, PCI_COMMAND,
6696 (pci_cmd | PCI_COMMAND_PARITY));
6697 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
6698}
6699
6700MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
6701MODULE_LICENSE("GPL");
6c1792f4
JL
6702MODULE_VERSION(DRV_VERSION);
6703
1da177e4 6704module_param(tx_fifo_num, int, 0);
1da177e4 6705module_param(rx_ring_num, int, 0);
da6971d8 6706module_param(rx_ring_mode, int, 0);
20346722
K
6707module_param_array(tx_fifo_len, uint, NULL, 0);
6708module_param_array(rx_ring_sz, uint, NULL, 0);
20346722 6709module_param_array(rts_frm_len, uint, NULL, 0);
5e25b9dd 6710module_param(use_continuous_tx_intrs, int, 1);
1da177e4
LT
6711module_param(rmac_pause_time, int, 0);
6712module_param(mc_pause_threshold_q0q3, int, 0);
6713module_param(mc_pause_threshold_q4q7, int, 0);
6714module_param(shared_splits, int, 0);
6715module_param(tmac_util_period, int, 0);
6716module_param(rmac_util_period, int, 0);
b6e3f982 6717module_param(bimodal, bool, 0);
da6971d8 6718module_param(l3l4hdr_size, int , 0);
1da177e4
LT
6719#ifndef CONFIG_S2IO_NAPI
6720module_param(indicate_max_pkts, int, 0);
6721#endif
303bcb4b 6722module_param(rxsync_frequency, int, 0);
cc6e7c44 6723module_param(intr_type, int, 0);
7d3d0439
RA
6724module_param(lro, int, 0);
6725module_param(lro_max_pkts, int, 0);
20346722 6726
9dc737a7
AR
6727static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6728{
6729 if ( tx_fifo_num > 8) {
6730 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6731 "supported\n");
6732 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6733 tx_fifo_num = 8;
6734 }
6735 if ( rx_ring_num > 8) {
6736 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6737 "supported\n");
6738 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6739 rx_ring_num = 8;
6740 }
6741#ifdef CONFIG_S2IO_NAPI
6742 if (*dev_intr_type != INTA) {
6743 DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when "
6744 "MSI/MSI-X is enabled. Defaulting to INTA\n");
6745 *dev_intr_type = INTA;
6746 }
6747#endif
6748#ifndef CONFIG_PCI_MSI
6749 if (*dev_intr_type != INTA) {
6750 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6751 "MSI/MSI-X. Defaulting to INTA\n");
6752 *dev_intr_type = INTA;
6753 }
6754#else
6755 if (*dev_intr_type > MSI_X) {
6756 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6757 "Defaulting to INTA\n");
6758 *dev_intr_type = INTA;
6759 }
6760#endif
6761 if ((*dev_intr_type == MSI_X) &&
6762 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6763 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6764 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6765 "Defaulting to INTA\n");
6766 *dev_intr_type = INTA;
6767 }
6768 if (rx_ring_mode > 3) {
6769 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6770 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6771 rx_ring_mode = 3;
6772 }
6773 return SUCCESS;
6774}
6775
1da177e4 6776/**
20346722 6777 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
6778 * @pdev : structure containing the PCI related information of the device.
6779 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6780 * Description:
6781 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
6782 * All OS related initialization including memory and device structure and
6783 * initlaization of the device private variable is done. Also the swapper
6784 * control register is initialized to enable read and write into the I/O
1da177e4
LT
6785 * registers of the device.
6786 * Return value:
6787 * returns 0 on success and negative on failure.
6788 */
6789
6790static int __devinit
6791s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6792{
6793 nic_t *sp;
6794 struct net_device *dev;
1da177e4
LT
6795 int i, j, ret;
6796 int dma_flag = FALSE;
6797 u32 mac_up, mac_down;
6798 u64 val64 = 0, tmp64 = 0;
6799 XENA_dev_config_t __iomem *bar0 = NULL;
6800 u16 subid;
6801 mac_info_t *mac_control;
6802 struct config_param *config;
541ae68f 6803 int mode;
cc6e7c44 6804 u8 dev_intr_type = intr_type;
1da177e4 6805
9dc737a7
AR
6806 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6807 return ret;
1da177e4
LT
6808
6809 if ((ret = pci_enable_device(pdev))) {
6810 DBG_PRINT(ERR_DBG,
6811 "s2io_init_nic: pci_enable_device failed\n");
6812 return ret;
6813 }
6814
1e7f0bd8 6815 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6816 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6817 dma_flag = TRUE;
1da177e4 6818 if (pci_set_consistent_dma_mask
1e7f0bd8 6819 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6820 DBG_PRINT(ERR_DBG,
6821 "Unable to obtain 64bit DMA for \
6822 consistent allocations\n");
6823 pci_disable_device(pdev);
6824 return -ENOMEM;
6825 }
1e7f0bd8 6826 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
6827 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6828 } else {
6829 pci_disable_device(pdev);
6830 return -ENOMEM;
6831 }
cc6e7c44
RA
6832 if (dev_intr_type != MSI_X) {
6833 if (pci_request_regions(pdev, s2io_driver_name)) {
6834 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
6835 pci_disable_device(pdev);
6836 return -ENODEV;
6837 }
6838 }
6839 else {
6840 if (!(request_mem_region(pci_resource_start(pdev, 0),
6841 pci_resource_len(pdev, 0), s2io_driver_name))) {
6842 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6843 pci_disable_device(pdev);
6844 return -ENODEV;
6845 }
6846 if (!(request_mem_region(pci_resource_start(pdev, 2),
6847 pci_resource_len(pdev, 2), s2io_driver_name))) {
6848 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6849 release_mem_region(pci_resource_start(pdev, 0),
6850 pci_resource_len(pdev, 0));
6851 pci_disable_device(pdev);
6852 return -ENODEV;
6853 }
1da177e4
LT
6854 }
6855
6856 dev = alloc_etherdev(sizeof(nic_t));
6857 if (dev == NULL) {
6858 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6859 pci_disable_device(pdev);
6860 pci_release_regions(pdev);
6861 return -ENODEV;
6862 }
6863
6864 pci_set_master(pdev);
6865 pci_set_drvdata(pdev, dev);
6866 SET_MODULE_OWNER(dev);
6867 SET_NETDEV_DEV(dev, &pdev->dev);
6868
6869 /* Private member variable initialized to s2io NIC structure */
6870 sp = dev->priv;
6871 memset(sp, 0, sizeof(nic_t));
6872 sp->dev = dev;
6873 sp->pdev = pdev;
1da177e4 6874 sp->high_dma_flag = dma_flag;
1da177e4 6875 sp->device_enabled_once = FALSE;
da6971d8
AR
6876 if (rx_ring_mode == 1)
6877 sp->rxd_mode = RXD_MODE_1;
6878 if (rx_ring_mode == 2)
6879 sp->rxd_mode = RXD_MODE_3B;
6880 if (rx_ring_mode == 3)
6881 sp->rxd_mode = RXD_MODE_3A;
6882
cc6e7c44 6883 sp->intr_type = dev_intr_type;
1da177e4 6884
541ae68f
K
6885 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6886 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6887 sp->device_type = XFRAME_II_DEVICE;
6888 else
6889 sp->device_type = XFRAME_I_DEVICE;
6890
7d3d0439 6891 sp->lro = lro;
cc6e7c44 6892
1da177e4
LT
6893 /* Initialize some PCI/PCI-X fields of the NIC. */
6894 s2io_init_pci(sp);
6895
20346722 6896 /*
1da177e4 6897 * Setting the device configuration parameters.
20346722
K
6898 * Most of these parameters can be specified by the user during
6899 * module insertion as they are module loadable parameters. If
6900 * these parameters are not not specified during load time, they
1da177e4
LT
6901 * are initialized with default values.
6902 */
6903 mac_control = &sp->mac_control;
6904 config = &sp->config;
6905
6906 /* Tx side parameters. */
1da177e4
LT
6907 config->tx_fifo_num = tx_fifo_num;
6908 for (i = 0; i < MAX_TX_FIFOS; i++) {
6909 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6910 config->tx_cfg[i].fifo_priority = i;
6911 }
6912
20346722
K
6913 /* mapping the QoS priority to the configured fifos */
6914 for (i = 0; i < MAX_TX_FIFOS; i++)
6915 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6916
1da177e4
LT
6917 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6918 for (i = 0; i < config->tx_fifo_num; i++) {
6919 config->tx_cfg[i].f_no_snoop =
6920 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6921 if (config->tx_cfg[i].fifo_len < 65) {
6922 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6923 break;
6924 }
6925 }
fed5eccd
AR
6926 /* + 2 because one Txd for skb->data and one Txd for UFO */
6927 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
6928
6929 /* Rx side parameters. */
1da177e4
LT
6930 config->rx_ring_num = rx_ring_num;
6931 for (i = 0; i < MAX_RX_RINGS; i++) {
6932 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 6933 (rxd_count[sp->rxd_mode] + 1);
1da177e4
LT
6934 config->rx_cfg[i].ring_priority = i;
6935 }
6936
6937 for (i = 0; i < rx_ring_num; i++) {
6938 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6939 config->rx_cfg[i].f_no_snoop =
6940 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6941 }
6942
6943 /* Setting Mac Control parameters */
6944 mac_control->rmac_pause_time = rmac_pause_time;
6945 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6946 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6947
6948
6949 /* Initialize Ring buffer parameters. */
6950 for (i = 0; i < config->rx_ring_num; i++)
6951 atomic_set(&sp->rx_bufs_left[i], 0);
6952
7ba013ac
K
6953 /* Initialize the number of ISRs currently running */
6954 atomic_set(&sp->isr_cnt, 0);
6955
1da177e4
LT
6956 /* initialize the shared memory used by the NIC and the host */
6957 if (init_shared_mem(sp)) {
6958 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
0b1f7ebe 6959 __FUNCTION__);
1da177e4
LT
6960 ret = -ENOMEM;
6961 goto mem_alloc_failed;
6962 }
6963
6964 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6965 pci_resource_len(pdev, 0));
6966 if (!sp->bar0) {
6967 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
6968 dev->name);
6969 ret = -ENOMEM;
6970 goto bar0_remap_failed;
6971 }
6972
6973 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6974 pci_resource_len(pdev, 2));
6975 if (!sp->bar1) {
6976 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
6977 dev->name);
6978 ret = -ENOMEM;
6979 goto bar1_remap_failed;
6980 }
6981
6982 dev->irq = pdev->irq;
6983 dev->base_addr = (unsigned long) sp->bar0;
6984
6985 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6986 for (j = 0; j < MAX_TX_FIFOS; j++) {
6987 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
6988 (sp->bar1 + (j * 0x00020000));
6989 }
6990
6991 /* Driver entry points */
6992 dev->open = &s2io_open;
6993 dev->stop = &s2io_close;
6994 dev->hard_start_xmit = &s2io_xmit;
6995 dev->get_stats = &s2io_get_stats;
6996 dev->set_multicast_list = &s2io_set_multicast;
6997 dev->do_ioctl = &s2io_ioctl;
6998 dev->change_mtu = &s2io_change_mtu;
6999 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
7000 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7001 dev->vlan_rx_register = s2io_vlan_rx_register;
7002 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 7003
1da177e4
LT
7004 /*
7005 * will use eth_mac_addr() for dev->set_mac_address
7006 * mac address will be set every time dev->open() is called
7007 */
20346722 7008#if defined(CONFIG_S2IO_NAPI)
1da177e4 7009 dev->poll = s2io_poll;
20346722 7010 dev->weight = 32;
1da177e4
LT
7011#endif
7012
612eff0e
BH
7013#ifdef CONFIG_NET_POLL_CONTROLLER
7014 dev->poll_controller = s2io_netpoll;
7015#endif
7016
1da177e4
LT
7017 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7018 if (sp->high_dma_flag == TRUE)
7019 dev->features |= NETIF_F_HIGHDMA;
7020#ifdef NETIF_F_TSO
7021 dev->features |= NETIF_F_TSO;
f83ef8c0
HX
7022#endif
7023#ifdef NETIF_F_TSO6
7024 dev->features |= NETIF_F_TSO6;
1da177e4 7025#endif
fed5eccd
AR
7026 if (sp->device_type & XFRAME_II_DEVICE) {
7027 dev->features |= NETIF_F_UFO;
7028 dev->features |= NETIF_F_HW_CSUM;
7029 }
1da177e4
LT
7030
7031 dev->tx_timeout = &s2io_tx_watchdog;
7032 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7033 INIT_WORK(&sp->rst_timer_task,
7034 (void (*)(void *)) s2io_restart_nic, dev);
7035 INIT_WORK(&sp->set_link_task,
7036 (void (*)(void *)) s2io_set_link, sp);
7037
e960fc5c 7038 pci_save_state(sp->pdev);
1da177e4
LT
7039
7040 /* Setting swapper control on the NIC, for proper reset operation */
7041 if (s2io_set_swapper(sp)) {
7042 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7043 dev->name);
7044 ret = -EAGAIN;
7045 goto set_swap_failed;
7046 }
7047
541ae68f
K
7048 /* Verify if the Herc works on the slot its placed into */
7049 if (sp->device_type & XFRAME_II_DEVICE) {
7050 mode = s2io_verify_pci_mode(sp);
7051 if (mode < 0) {
7052 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7053 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7054 ret = -EBADSLT;
7055 goto set_swap_failed;
7056 }
7057 }
7058
7059 /* Not needed for Herc */
7060 if (sp->device_type & XFRAME_I_DEVICE) {
7061 /*
7062 * Fix for all "FFs" MAC address problems observed on
7063 * Alpha platforms
7064 */
7065 fix_mac_address(sp);
7066 s2io_reset(sp);
7067 }
1da177e4
LT
7068
7069 /*
1da177e4
LT
7070 * MAC address initialization.
7071 * For now only one mac address will be read and used.
7072 */
7073 bar0 = sp->bar0;
7074 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7075 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7076 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b
AR
7077 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7078 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
1da177e4
LT
7079 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7080 mac_down = (u32) tmp64;
7081 mac_up = (u32) (tmp64 >> 32);
7082
7083 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7084
7085 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7086 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7087 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7088 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7089 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7090 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7091
1da177e4
LT
7092 /* Set the factory defined MAC address initially */
7093 dev->addr_len = ETH_ALEN;
7094 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7095
7096 /*
20346722 7097 * Initialize the tasklet status and link state flags
541ae68f 7098 * and the card state parameter
1da177e4
LT
7099 */
7100 atomic_set(&(sp->card_state), 0);
7101 sp->tasklet_status = 0;
7102 sp->link_state = 0;
7103
1da177e4
LT
7104 /* Initialize spinlocks */
7105 spin_lock_init(&sp->tx_lock);
7106#ifndef CONFIG_S2IO_NAPI
7107 spin_lock_init(&sp->put_lock);
7108#endif
7ba013ac 7109 spin_lock_init(&sp->rx_lock);
1da177e4 7110
20346722
K
7111 /*
7112 * SXE-002: Configure link and activity LED to init state
7113 * on driver load.
1da177e4
LT
7114 */
7115 subid = sp->pdev->subsystem_device;
7116 if ((subid & 0xFF) >= 0x07) {
7117 val64 = readq(&bar0->gpio_control);
7118 val64 |= 0x0000800000000000ULL;
7119 writeq(val64, &bar0->gpio_control);
7120 val64 = 0x0411040400000000ULL;
7121 writeq(val64, (void __iomem *) bar0 + 0x2700);
7122 val64 = readq(&bar0->gpio_control);
7123 }
7124
7125 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7126
7127 if (register_netdev(dev)) {
7128 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7129 ret = -ENODEV;
7130 goto register_failed;
7131 }
9dc737a7
AR
7132 s2io_vpd_read(sp);
7133 DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
7134 DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
541ae68f
K
7135 get_xena_rev_id(sp->pdev),
7136 s2io_driver_version);
9dc737a7
AR
7137 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7138 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7139 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
541ae68f
K
7140 sp->def_mac_addr[0].mac_addr[0],
7141 sp->def_mac_addr[0].mac_addr[1],
7142 sp->def_mac_addr[0].mac_addr[2],
7143 sp->def_mac_addr[0].mac_addr[3],
7144 sp->def_mac_addr[0].mac_addr[4],
7145 sp->def_mac_addr[0].mac_addr[5]);
9dc737a7 7146 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 7147 mode = s2io_print_pci_mode(sp);
541ae68f 7148 if (mode < 0) {
9dc737a7 7149 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 7150 ret = -EBADSLT;
9dc737a7 7151 unregister_netdev(dev);
541ae68f
K
7152 goto set_swap_failed;
7153 }
541ae68f 7154 }
9dc737a7
AR
7155 switch(sp->rxd_mode) {
7156 case RXD_MODE_1:
7157 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7158 dev->name);
7159 break;
7160 case RXD_MODE_3B:
7161 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7162 dev->name);
7163 break;
7164 case RXD_MODE_3A:
7165 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7166 dev->name);
7167 break;
7168 }
7169#ifdef CONFIG_S2IO_NAPI
7170 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7171#endif
7172 switch(sp->intr_type) {
7173 case INTA:
7174 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7175 break;
7176 case MSI:
7177 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7178 break;
7179 case MSI_X:
7180 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7181 break;
7182 }
7d3d0439
RA
7183 if (sp->lro)
7184 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 7185 dev->name);
7d3d0439 7186
7ba013ac 7187 /* Initialize device name */
9dc737a7 7188 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 7189
b6e3f982
K
7190 /* Initialize bimodal Interrupts */
7191 sp->config.bimodal = bimodal;
7192 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7193 sp->config.bimodal = 0;
7194 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7195 dev->name);
7196 }
7197
20346722
K
7198 /*
7199 * Make Link state as off at this point, when the Link change
7200 * interrupt comes the state will be automatically changed to
1da177e4
LT
7201 * the right state.
7202 */
7203 netif_carrier_off(dev);
1da177e4
LT
7204
7205 return 0;
7206
7207 register_failed:
7208 set_swap_failed:
7209 iounmap(sp->bar1);
7210 bar1_remap_failed:
7211 iounmap(sp->bar0);
7212 bar0_remap_failed:
7213 mem_alloc_failed:
7214 free_shared_mem(sp);
7215 pci_disable_device(pdev);
cc6e7c44
RA
7216 if (dev_intr_type != MSI_X)
7217 pci_release_regions(pdev);
7218 else {
7219 release_mem_region(pci_resource_start(pdev, 0),
7220 pci_resource_len(pdev, 0));
7221 release_mem_region(pci_resource_start(pdev, 2),
7222 pci_resource_len(pdev, 2));
7223 }
1da177e4
LT
7224 pci_set_drvdata(pdev, NULL);
7225 free_netdev(dev);
7226
7227 return ret;
7228}
7229
7230/**
20346722 7231 * s2io_rem_nic - Free the PCI device
1da177e4 7232 * @pdev: structure containing the PCI related information of the device.
20346722 7233 * Description: This function is called by the Pci subsystem to release a
1da177e4 7234 * PCI device and free up all resource held up by the device. This could
20346722 7235 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
7236 * from memory.
7237 */
7238
7239static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7240{
7241 struct net_device *dev =
7242 (struct net_device *) pci_get_drvdata(pdev);
7243 nic_t *sp;
7244
7245 if (dev == NULL) {
7246 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7247 return;
7248 }
7249
7250 sp = dev->priv;
7251 unregister_netdev(dev);
7252
7253 free_shared_mem(sp);
7254 iounmap(sp->bar0);
7255 iounmap(sp->bar1);
7256 pci_disable_device(pdev);
cc6e7c44
RA
7257 if (sp->intr_type != MSI_X)
7258 pci_release_regions(pdev);
7259 else {
7260 release_mem_region(pci_resource_start(pdev, 0),
7261 pci_resource_len(pdev, 0));
7262 release_mem_region(pci_resource_start(pdev, 2),
7263 pci_resource_len(pdev, 2));
7264 }
1da177e4 7265 pci_set_drvdata(pdev, NULL);
1da177e4
LT
7266 free_netdev(dev);
7267}
7268
7269/**
7270 * s2io_starter - Entry point for the driver
7271 * Description: This function is the entry point for the driver. It verifies
7272 * the module loadable parameters and initializes PCI configuration space.
7273 */
7274
7275int __init s2io_starter(void)
7276{
7277 return pci_module_init(&s2io_driver);
7278}
7279
7280/**
20346722 7281 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
7282 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7283 */
7284
26df54bf 7285static void s2io_closer(void)
1da177e4
LT
7286{
7287 pci_unregister_driver(&s2io_driver);
7288 DBG_PRINT(INIT_DBG, "cleanup done\n");
7289}
7290
7291module_init(s2io_starter);
7292module_exit(s2io_closer);
7d3d0439
RA
7293
7294static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7295 struct tcphdr **tcp, RxD_t *rxdp)
7296{
7297 int ip_off;
7298 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7299
7300 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7301 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7302 __FUNCTION__);
7303 return -1;
7304 }
7305
7306 /* TODO:
7307 * By default the VLAN field in the MAC is stripped by the card, if this
7308 * feature is turned off in rx_pa_cfg register, then the ip_off field
7309 * has to be shifted by a further 2 bytes
7310 */
7311 switch (l2_type) {
7312 case 0: /* DIX type */
7313 case 4: /* DIX type with VLAN */
7314 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7315 break;
7316 /* LLC, SNAP etc are considered non-mergeable */
7317 default:
7318 return -1;
7319 }
7320
7321 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7322 ip_len = (u8)((*ip)->ihl);
7323 ip_len <<= 2;
7324 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7325
7326 return 0;
7327}
7328
7329static int check_for_socket_match(lro_t *lro, struct iphdr *ip,
7330 struct tcphdr *tcp)
7331{
7332 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7333 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7334 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7335 return -1;
7336 return 0;
7337}
7338
7339static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7340{
7341 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7342}
7343
7344static void initiate_new_session(lro_t *lro, u8 *l2h,
7345 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7346{
7347 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7348 lro->l2h = l2h;
7349 lro->iph = ip;
7350 lro->tcph = tcp;
7351 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7352 lro->tcp_ack = ntohl(tcp->ack_seq);
7353 lro->sg_num = 1;
7354 lro->total_len = ntohs(ip->tot_len);
7355 lro->frags_len = 0;
7356 /*
7357 * check if we saw TCP timestamp. Other consistency checks have
7358 * already been done.
7359 */
7360 if (tcp->doff == 8) {
7361 u32 *ptr;
7362 ptr = (u32 *)(tcp+1);
7363 lro->saw_ts = 1;
7364 lro->cur_tsval = *(ptr+1);
7365 lro->cur_tsecr = *(ptr+2);
7366 }
7367 lro->in_use = 1;
7368}
7369
7370static void update_L3L4_header(nic_t *sp, lro_t *lro)
7371{
7372 struct iphdr *ip = lro->iph;
7373 struct tcphdr *tcp = lro->tcph;
7374 u16 nchk;
7375 StatInfo_t *statinfo = sp->mac_control.stats_info;
7376 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7377
7378 /* Update L3 header */
7379 ip->tot_len = htons(lro->total_len);
7380 ip->check = 0;
7381 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7382 ip->check = nchk;
7383
7384 /* Update L4 header */
7385 tcp->ack_seq = lro->tcp_ack;
7386 tcp->window = lro->window;
7387
7388 /* Update tsecr field if this session has timestamps enabled */
7389 if (lro->saw_ts) {
7390 u32 *ptr = (u32 *)(tcp + 1);
7391 *(ptr+2) = lro->cur_tsecr;
7392 }
7393
7394 /* Update counters required for calculation of
7395 * average no. of packets aggregated.
7396 */
7397 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7398 statinfo->sw_stat.num_aggregations++;
7399}
7400
7401static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
7402 struct tcphdr *tcp, u32 l4_pyld)
7403{
7404 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7405 lro->total_len += l4_pyld;
7406 lro->frags_len += l4_pyld;
7407 lro->tcp_next_seq += l4_pyld;
7408 lro->sg_num++;
7409
7410 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7411 lro->tcp_ack = tcp->ack_seq;
7412 lro->window = tcp->window;
7413
7414 if (lro->saw_ts) {
7415 u32 *ptr;
7416 /* Update tsecr and tsval from this packet */
7417 ptr = (u32 *) (tcp + 1);
7418 lro->cur_tsval = *(ptr + 1);
7419 lro->cur_tsecr = *(ptr + 2);
7420 }
7421}
7422
7423static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7424 struct tcphdr *tcp, u32 tcp_pyld_len)
7425{
7d3d0439
RA
7426 u8 *ptr;
7427
79dc1901
AM
7428 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7429
7d3d0439
RA
7430 if (!tcp_pyld_len) {
7431 /* Runt frame or a pure ack */
7432 return -1;
7433 }
7434
7435 if (ip->ihl != 5) /* IP has options */
7436 return -1;
7437
7438 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7439 !tcp->ack) {
7440 /*
7441 * Currently recognize only the ack control word and
7442 * any other control field being set would result in
7443 * flushing the LRO session
7444 */
7445 return -1;
7446 }
7447
7448 /*
7449 * Allow only one TCP timestamp option. Don't aggregate if
7450 * any other options are detected.
7451 */
7452 if (tcp->doff != 5 && tcp->doff != 8)
7453 return -1;
7454
7455 if (tcp->doff == 8) {
7456 ptr = (u8 *)(tcp + 1);
7457 while (*ptr == TCPOPT_NOP)
7458 ptr++;
7459 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7460 return -1;
7461
7462 /* Ensure timestamp value increases monotonically */
7463 if (l_lro)
7464 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7465 return -1;
7466
7467 /* timestamp echo reply should be non-zero */
7468 if (*((u32 *)(ptr+6)) == 0)
7469 return -1;
7470 }
7471
7472 return 0;
7473}
7474
7475static int
7476s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7477 RxD_t *rxdp, nic_t *sp)
7478{
7479 struct iphdr *ip;
7480 struct tcphdr *tcph;
7481 int ret = 0, i;
7482
7483 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7484 rxdp))) {
7485 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7486 ip->saddr, ip->daddr);
7487 } else {
7488 return ret;
7489 }
7490
7491 tcph = (struct tcphdr *)*tcp;
7492 *tcp_len = get_l4_pyld_length(ip, tcph);
7493 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7494 lro_t *l_lro = &sp->lro0_n[i];
7495 if (l_lro->in_use) {
7496 if (check_for_socket_match(l_lro, ip, tcph))
7497 continue;
7498 /* Sock pair matched */
7499 *lro = l_lro;
7500
7501 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7502 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7503 "0x%x, actual 0x%x\n", __FUNCTION__,
7504 (*lro)->tcp_next_seq,
7505 ntohl(tcph->seq));
7506
7507 sp->mac_control.stats_info->
7508 sw_stat.outof_sequence_pkts++;
7509 ret = 2;
7510 break;
7511 }
7512
7513 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7514 ret = 1; /* Aggregate */
7515 else
7516 ret = 2; /* Flush both */
7517 break;
7518 }
7519 }
7520
7521 if (ret == 0) {
7522 /* Before searching for available LRO objects,
7523 * check if the pkt is L3/L4 aggregatable. If not
7524 * don't create new LRO session. Just send this
7525 * packet up.
7526 */
7527 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7528 return 5;
7529 }
7530
7531 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7532 lro_t *l_lro = &sp->lro0_n[i];
7533 if (!(l_lro->in_use)) {
7534 *lro = l_lro;
7535 ret = 3; /* Begin anew */
7536 break;
7537 }
7538 }
7539 }
7540
7541 if (ret == 0) { /* sessions exceeded */
7542 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7543 __FUNCTION__);
7544 *lro = NULL;
7545 return ret;
7546 }
7547
7548 switch (ret) {
7549 case 3:
7550 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7551 break;
7552 case 2:
7553 update_L3L4_header(sp, *lro);
7554 break;
7555 case 1:
7556 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7557 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7558 update_L3L4_header(sp, *lro);
7559 ret = 4; /* Flush the LRO */
7560 }
7561 break;
7562 default:
7563 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7564 __FUNCTION__);
7565 break;
7566 }
7567
7568 return ret;
7569}
7570
7571static void clear_lro_session(lro_t *lro)
7572{
7573 static u16 lro_struct_size = sizeof(lro_t);
7574
7575 memset(lro, 0, lro_struct_size);
7576}
7577
7578static void queue_rx_frame(struct sk_buff *skb)
7579{
7580 struct net_device *dev = skb->dev;
7581
7582 skb->protocol = eth_type_trans(skb, dev);
7583#ifdef CONFIG_S2IO_NAPI
7584 netif_receive_skb(skb);
7585#else
7586 netif_rx(skb);
7587#endif
7588}
7589
7590static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7591 u32 tcp_len)
7592{
7593 struct sk_buff *tmp, *first = lro->parent;
7594
7595 first->len += tcp_len;
7596 first->data_len = lro->frags_len;
7597 skb_pull(skb, (skb->len - tcp_len));
7598 if ((tmp = skb_shinfo(first)->frag_list)) {
7599 while (tmp->next)
7600 tmp = tmp->next;
7601 tmp->next = skb;
7602 }
7603 else
7604 skb_shinfo(first)->frag_list = skb;
7605 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7606 return;
7607}