net: fix section mismatches
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
926bd900 3 * Copyright(c) 2002-2010 Exar Corp.
d44570e4 4 *
1da177e4
LT
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4 27 * The module loadable parameters that are supported by the driver and a brief
a2a20aef 28 * explanation of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
9dc737a7
AR
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
926930b2
SS
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46 * Possible values '1' for enable and '0' for disable. Default is '0'
47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48 * Possible values '1' for enable , '0' for disable.
49 * Default is '2' - which means disable in promisc mode
50 * and enable in non-promiscuous mode.
3a3d5756
SH
51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
52 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
53 ************************************************************************/
54
6cef2b8e
JP
55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56
1da177e4
LT
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
1e7f0bd8 62#include <linux/dma-mapping.h>
1da177e4
LT
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
40239396 66#include <linux/mdio.h>
1da177e4
LT
67#include <linux/skbuff.h>
68#include <linux/init.h>
69#include <linux/delay.h>
70#include <linux/stddef.h>
71#include <linux/ioctl.h>
72#include <linux/timex.h>
1da177e4 73#include <linux/ethtool.h>
1da177e4 74#include <linux/workqueue.h>
be3a6b02 75#include <linux/if_vlan.h>
7d3d0439
RA
76#include <linux/ip.h>
77#include <linux/tcp.h>
d44570e4
JP
78#include <linux/uaccess.h>
79#include <linux/io.h>
5a0e3ad6 80#include <linux/slab.h>
7d3d0439 81#include <net/tcp.h>
1da177e4 82
1da177e4 83#include <asm/system.h>
fe931395 84#include <asm/div64.h>
330ce0de 85#include <asm/irq.h>
1da177e4
LT
86
87/* local include */
88#include "s2io.h"
89#include "s2io-regs.h"
90
11410b62 91#define DRV_VERSION "2.0.26.28"
6c1792f4 92
1da177e4 93/* S2io Driver name & version. */
c0dbf37e
JM
94static const char s2io_driver_name[] = "Neterion";
95static const char s2io_driver_version[] = DRV_VERSION;
1da177e4 96
c0dbf37e
JM
97static const int rxd_size[2] = {32, 48};
98static const int rxd_count[2] = {127, 85};
da6971d8 99
1ee6dd77 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
101{
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
d44570e4 105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
5e25b9dd
K
106
107 return ret;
108}
109
20346722 110/*
1da177e4
LT
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
114 */
d44570e4
JP
115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
119
120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 122
d44570e4 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
92b84437
SS
124{
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126}
127
1da177e4 128/* Ethtool related variables and Macros. */
6fce365d 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
1da177e4
LT
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135};
136
6fce365d 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
1da177e4 147 {"tmac_any_err_frms"},
bd1034f0 148 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
bd1034f0 163 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
1da177e4 170 {"rmac_discarded_frms"},
bd1034f0
AR
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
1da177e4
LT
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
bd1034f0
AR
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
bd1034f0
AR
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
1da177e4 209 {"rmac_pause_cnt"},
bd1034f0
AR
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
bd1034f0
AR
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
fa1f0cb3
SS
231 {"rxf_wr_cnt"}
232};
233
6fce365d 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
250 {"link_fault_cnt"}
251};
252
6fce365d 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
bd1034f0
AR
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
8116f3cf
SS
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
43b7c451
SH
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
1da177e4
LT
326};
327
4c3616cd
AMR
328#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3 331
d44570e4
JP
332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
fa1f0cb3 334
d44570e4
JP
335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
1da177e4 337
4c3616cd 338#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
d44570e4 339#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
1da177e4 340
d44570e4
JP
341#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
342 init_timer(&timer); \
343 timer.function = handle; \
344 timer.data = (unsigned long)arg; \
345 mod_timer(&timer, (jiffies + exp)) \
25fff88e 346
2fd37688
SS
347/* copy mac addr to def_mac_addr array */
348static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
349{
350 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
351 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
352 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
353 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
354 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
355 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356}
04025095 357
be3a6b02
K
358/* Add the vlan */
359static void s2io_vlan_rx_register(struct net_device *dev,
04025095 360 struct vlan_group *grp)
be3a6b02 361{
2fda096d 362 int i;
4cf1653a 363 struct s2io_nic *nic = netdev_priv(dev);
2fda096d 364 unsigned long flags[MAX_TX_FIFOS];
2fda096d 365 struct config_param *config = &nic->config;
ffb5df6c 366 struct mac_info *mac_control = &nic->mac_control;
2fda096d 367
13d866a9
JP
368 for (i = 0; i < config->tx_fifo_num; i++) {
369 struct fifo_info *fifo = &mac_control->fifos[i];
370
371 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
372 }
be3a6b02 373
be3a6b02 374 nic->vlgrp = grp;
13d866a9
JP
375
376 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
377 struct fifo_info *fifo = &mac_control->fifos[i];
378
379 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
380 }
be3a6b02
K
381}
382
cdb5bf02 383/* Unregister the vlan */
04025095 384static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
cdb5bf02
SH
385{
386 int i;
4cf1653a 387 struct s2io_nic *nic = netdev_priv(dev);
cdb5bf02 388 unsigned long flags[MAX_TX_FIFOS];
cdb5bf02 389 struct config_param *config = &nic->config;
ffb5df6c 390 struct mac_info *mac_control = &nic->mac_control;
cdb5bf02 391
13d866a9
JP
392 for (i = 0; i < config->tx_fifo_num; i++) {
393 struct fifo_info *fifo = &mac_control->fifos[i];
394
395 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
396 }
cdb5bf02
SH
397
398 if (nic->vlgrp)
399 vlan_group_set_device(nic->vlgrp, vid, NULL);
400
13d866a9
JP
401 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
402 struct fifo_info *fifo = &mac_control->fifos[i];
403
404 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
405 }
cdb5bf02
SH
406}
407
20346722 408/*
1da177e4
LT
409 * Constants to be programmed into the Xena's registers, to configure
410 * the XAUI.
411 */
412
1da177e4 413#define END_SIGN 0x0
f71e1309 414static const u64 herc_act_dtx_cfg[] = {
541ae68f 415 /* Set address */
e960fc5c 416 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 417 /* Write data */
e960fc5c 418 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
419 /* Set address */
420 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
421 /* Write data */
422 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
423 /* Set address */
e960fc5c 424 0x801205150D440000ULL, 0x801205150D4400E0ULL,
425 /* Write data */
426 0x801205150D440004ULL, 0x801205150D4400E4ULL,
427 /* Set address */
541ae68f
K
428 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
429 /* Write data */
430 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
431 /* Done */
432 END_SIGN
433};
434
f71e1309 435static const u64 xena_dtx_cfg[] = {
c92ca04b 436 /* Set address */
1da177e4 437 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
438 /* Write data */
439 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
440 /* Set address */
441 0x8001051500000000ULL, 0x80010515000000E0ULL,
442 /* Write data */
443 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
444 /* Set address */
1da177e4 445 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
446 /* Write data */
447 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
448 END_SIGN
449};
450
20346722 451/*
1da177e4
LT
452 * Constants for Fixing the MacAddress problem seen mostly on
453 * Alpha machines.
454 */
f71e1309 455static const u64 fix_mac[] = {
1da177e4
LT
456 0x0060000000000000ULL, 0x0060600000000000ULL,
457 0x0040600000000000ULL, 0x0000600000000000ULL,
458 0x0020600000000000ULL, 0x0060600000000000ULL,
459 0x0020600000000000ULL, 0x0060600000000000ULL,
460 0x0020600000000000ULL, 0x0060600000000000ULL,
461 0x0020600000000000ULL, 0x0060600000000000ULL,
462 0x0020600000000000ULL, 0x0060600000000000ULL,
463 0x0020600000000000ULL, 0x0060600000000000ULL,
464 0x0020600000000000ULL, 0x0060600000000000ULL,
465 0x0020600000000000ULL, 0x0060600000000000ULL,
466 0x0020600000000000ULL, 0x0060600000000000ULL,
467 0x0020600000000000ULL, 0x0060600000000000ULL,
468 0x0020600000000000ULL, 0x0000600000000000ULL,
469 0x0040600000000000ULL, 0x0060600000000000ULL,
470 END_SIGN
471};
472
b41477f3
AR
473MODULE_LICENSE("GPL");
474MODULE_VERSION(DRV_VERSION);
475
476
1da177e4 477/* Module Loadable parameters. */
6cfc482b 478S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 479S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 480S2IO_PARM_INT(multiq, 0);
b41477f3
AR
481S2IO_PARM_INT(rx_ring_mode, 1);
482S2IO_PARM_INT(use_continuous_tx_intrs, 1);
483S2IO_PARM_INT(rmac_pause_time, 0x100);
484S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
485S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
486S2IO_PARM_INT(shared_splits, 0);
487S2IO_PARM_INT(tmac_util_period, 5);
488S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 489S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
490/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
491S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 492/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 493S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 494/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 495S2IO_PARM_INT(intr_type, 2);
7d3d0439 496/* Large receive offload feature */
43b7c451 497
7d3d0439
RA
498/* Max pkts to be aggregated by LRO at one time. If not specified,
499 * aggregation happens until we hit max IP pkt size(64K)
500 */
b41477f3 501S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 502S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
503
504S2IO_PARM_INT(napi, 1);
505S2IO_PARM_INT(ufo, 0);
926930b2 506S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
507
508static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
d44570e4 509{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
b41477f3 510static unsigned int rx_ring_sz[MAX_RX_RINGS] =
d44570e4 511{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
b41477f3 512static unsigned int rts_frm_len[MAX_RX_RINGS] =
d44570e4 513{[0 ...(MAX_RX_RINGS - 1)] = 0 };
b41477f3
AR
514
515module_param_array(tx_fifo_len, uint, NULL, 0);
516module_param_array(rx_ring_sz, uint, NULL, 0);
517module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 518
20346722 519/*
1da177e4 520 * S2IO device table.
20346722 521 * This table lists all the devices that this driver supports.
1da177e4 522 */
a3aa1884 523static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
1da177e4
LT
524 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
525 PCI_ANY_ID, PCI_ANY_ID},
526 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
527 PCI_ANY_ID, PCI_ANY_ID},
528 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
d44570e4
JP
529 PCI_ANY_ID, PCI_ANY_ID},
530 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
531 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
532 {0,}
533};
534
535MODULE_DEVICE_TABLE(pci, s2io_tbl);
536
d796fdb7
LV
537static struct pci_error_handlers s2io_err_handler = {
538 .error_detected = s2io_io_error_detected,
539 .slot_reset = s2io_io_slot_reset,
540 .resume = s2io_io_resume,
541};
542
1da177e4 543static struct pci_driver s2io_driver = {
d44570e4
JP
544 .name = "S2IO",
545 .id_table = s2io_tbl,
546 .probe = s2io_init_nic,
547 .remove = __devexit_p(s2io_rem_nic),
548 .err_handler = &s2io_err_handler,
1da177e4
LT
549};
550
551/* A simplifier macro used both by init and free shared_mem Fns(). */
552#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
553
3a3d5756
SH
554/* netqueue manipulation helper functions */
555static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
556{
fd2ea0a7
DM
557 if (!sp->config.multiq) {
558 int i;
559
3a3d5756
SH
560 for (i = 0; i < sp->config.tx_fifo_num; i++)
561 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
3a3d5756 562 }
fd2ea0a7 563 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
564}
565
566static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
567{
fd2ea0a7 568 if (!sp->config.multiq)
3a3d5756
SH
569 sp->mac_control.fifos[fifo_no].queue_state =
570 FIFO_QUEUE_STOP;
fd2ea0a7
DM
571
572 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
573}
574
575static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
576{
fd2ea0a7
DM
577 if (!sp->config.multiq) {
578 int i;
579
3a3d5756
SH
580 for (i = 0; i < sp->config.tx_fifo_num; i++)
581 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 582 }
fd2ea0a7 583 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
584}
585
586static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
587{
fd2ea0a7 588 if (!sp->config.multiq)
3a3d5756
SH
589 sp->mac_control.fifos[fifo_no].queue_state =
590 FIFO_QUEUE_START;
fd2ea0a7
DM
591
592 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
593}
594
595static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
596{
fd2ea0a7
DM
597 if (!sp->config.multiq) {
598 int i;
599
3a3d5756
SH
600 for (i = 0; i < sp->config.tx_fifo_num; i++)
601 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 602 }
fd2ea0a7 603 netif_tx_wake_all_queues(sp->dev);
3a3d5756
SH
604}
605
606static inline void s2io_wake_tx_queue(
607 struct fifo_info *fifo, int cnt, u8 multiq)
608{
609
3a3d5756
SH
610 if (multiq) {
611 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
612 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 613 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
614 if (netif_queue_stopped(fifo->dev)) {
615 fifo->queue_state = FIFO_QUEUE_START;
616 netif_wake_queue(fifo->dev);
617 }
618 }
619}
620
1da177e4
LT
621/**
622 * init_shared_mem - Allocation and Initialization of Memory
623 * @nic: Device private variable.
20346722
K
624 * Description: The function allocates all the memory areas shared
625 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
626 * Rx descriptors and the statistics block.
627 */
628
629static int init_shared_mem(struct s2io_nic *nic)
630{
631 u32 size;
632 void *tmp_v_addr, *tmp_v_addr_next;
633 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 634 struct RxD_block *pre_rxd_blk = NULL;
372cc597 635 int i, j, blk_cnt;
1da177e4
LT
636 int lst_size, lst_per_page;
637 struct net_device *dev = nic->dev;
8ae418cf 638 unsigned long tmp;
1ee6dd77 639 struct buffAdd *ba;
ffb5df6c
JP
640 struct config_param *config = &nic->config;
641 struct mac_info *mac_control = &nic->mac_control;
491976b2 642 unsigned long long mem_allocated = 0;
1da177e4 643
13d866a9 644 /* Allocation and initialization of TXDLs in FIFOs */
1da177e4
LT
645 size = 0;
646 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
647 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
648
649 size += tx_cfg->fifo_len;
1da177e4
LT
650 }
651 if (size > MAX_AVAILABLE_TXDS) {
9e39f7c5
JP
652 DBG_PRINT(ERR_DBG,
653 "Too many TxDs requested: %d, max supported: %d\n",
654 size, MAX_AVAILABLE_TXDS);
b41477f3 655 return -EINVAL;
1da177e4
LT
656 }
657
2fda096d
SR
658 size = 0;
659 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
660 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
661
662 size = tx_cfg->fifo_len;
2fda096d
SR
663 /*
664 * Legal values are from 2 to 8192
665 */
666 if (size < 2) {
9e39f7c5
JP
667 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
668 "Valid lengths are 2 through 8192\n",
669 i, size);
2fda096d
SR
670 return -EINVAL;
671 }
672 }
673
1ee6dd77 674 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
675 lst_per_page = PAGE_SIZE / lst_size;
676
677 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
678 struct fifo_info *fifo = &mac_control->fifos[i];
679 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
680 int fifo_len = tx_cfg->fifo_len;
1ee6dd77 681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
13d866a9
JP
682
683 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
684 if (!fifo->list_info) {
d44570e4 685 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
1da177e4
LT
686 return -ENOMEM;
687 }
491976b2 688 mem_allocated += list_holder_size;
1da177e4
LT
689 }
690 for (i = 0; i < config->tx_fifo_num; i++) {
691 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
692 lst_per_page);
13d866a9
JP
693 struct fifo_info *fifo = &mac_control->fifos[i];
694 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
695
696 fifo->tx_curr_put_info.offset = 0;
697 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
698 fifo->tx_curr_get_info.offset = 0;
699 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
700 fifo->fifo_no = i;
701 fifo->nic = nic;
702 fifo->max_txds = MAX_SKB_FRAGS + 2;
703 fifo->dev = dev;
20346722 704
1da177e4
LT
705 for (j = 0; j < page_num; j++) {
706 int k = 0;
707 dma_addr_t tmp_p;
708 void *tmp_v;
709 tmp_v = pci_alloc_consistent(nic->pdev,
710 PAGE_SIZE, &tmp_p);
711 if (!tmp_v) {
9e39f7c5
JP
712 DBG_PRINT(INFO_DBG,
713 "pci_alloc_consistent failed for TxDL\n");
1da177e4
LT
714 return -ENOMEM;
715 }
776bd20f 716 /* If we got a zero DMA address(can happen on
717 * certain platforms like PPC), reallocate.
718 * Store virtual address of page we don't want,
719 * to be freed later.
720 */
721 if (!tmp_p) {
722 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 723 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
724 "%s: Zero DMA address for TxDL. "
725 "Virtual address %p\n",
726 dev->name, tmp_v);
776bd20f 727 tmp_v = pci_alloc_consistent(nic->pdev,
d44570e4 728 PAGE_SIZE, &tmp_p);
776bd20f 729 if (!tmp_v) {
0c61ed5f 730 DBG_PRINT(INFO_DBG,
9e39f7c5 731 "pci_alloc_consistent failed for TxDL\n");
776bd20f 732 return -ENOMEM;
733 }
491976b2 734 mem_allocated += PAGE_SIZE;
776bd20f 735 }
1da177e4
LT
736 while (k < lst_per_page) {
737 int l = (j * lst_per_page) + k;
13d866a9 738 if (l == tx_cfg->fifo_len)
20346722 739 break;
13d866a9 740 fifo->list_info[l].list_virt_addr =
d44570e4 741 tmp_v + (k * lst_size);
13d866a9 742 fifo->list_info[l].list_phy_addr =
d44570e4 743 tmp_p + (k * lst_size);
1da177e4
LT
744 k++;
745 }
746 }
747 }
1da177e4 748
2fda096d 749 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
750 struct fifo_info *fifo = &mac_control->fifos[i];
751 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
752
753 size = tx_cfg->fifo_len;
754 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!fifo->ufo_in_band_v)
2fda096d
SR
756 return -ENOMEM;
757 mem_allocated += (size * sizeof(u64));
758 }
fed5eccd 759
1da177e4
LT
760 /* Allocation and initialization of RXDs in Rings */
761 size = 0;
762 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
763 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
764 struct ring_info *ring = &mac_control->rings[i];
765
766 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
9e39f7c5
JP
767 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
768 "multiple of RxDs per Block\n",
769 dev->name, i);
1da177e4
LT
770 return FAILURE;
771 }
13d866a9
JP
772 size += rx_cfg->num_rxd;
773 ring->block_count = rx_cfg->num_rxd /
d44570e4 774 (rxd_count[nic->rxd_mode] + 1);
13d866a9 775 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
1da177e4 776 }
da6971d8 777 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 778 size = (size * (sizeof(struct RxD1)));
da6971d8 779 else
1ee6dd77 780 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
781
782 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
783 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
784 struct ring_info *ring = &mac_control->rings[i];
785
786 ring->rx_curr_get_info.block_index = 0;
787 ring->rx_curr_get_info.offset = 0;
788 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
789 ring->rx_curr_put_info.block_index = 0;
790 ring->rx_curr_put_info.offset = 0;
791 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
792 ring->nic = nic;
793 ring->ring_no = i;
13d866a9
JP
794
795 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
796 /* Allocating all the Rx blocks */
797 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 798 struct rx_block_info *rx_blocks;
da6971d8
AR
799 int l;
800
13d866a9 801 rx_blocks = &ring->rx_blocks[j];
d44570e4 802 size = SIZE_OF_BLOCK; /* size is always page size */
1da177e4
LT
803 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
804 &tmp_p_addr);
805 if (tmp_v_addr == NULL) {
806 /*
20346722
K
807 * In case of failure, free_shared_mem()
808 * is called, which should free any
809 * memory that was alloced till the
1da177e4
LT
810 * failure happened.
811 */
da6971d8 812 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
813 return -ENOMEM;
814 }
491976b2 815 mem_allocated += size;
1da177e4 816 memset(tmp_v_addr, 0, size);
4f870320
JP
817
818 size = sizeof(struct rxd_info) *
819 rxd_count[nic->rxd_mode];
da6971d8
AR
820 rx_blocks->block_virt_addr = tmp_v_addr;
821 rx_blocks->block_dma_addr = tmp_p_addr;
4f870320 822 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
372cc597
SS
823 if (!rx_blocks->rxds)
824 return -ENOMEM;
4f870320 825 mem_allocated += size;
d44570e4 826 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
da6971d8
AR
827 rx_blocks->rxds[l].virt_addr =
828 rx_blocks->block_virt_addr +
829 (rxd_size[nic->rxd_mode] * l);
830 rx_blocks->rxds[l].dma_addr =
831 rx_blocks->block_dma_addr +
832 (rxd_size[nic->rxd_mode] * l);
833 }
1da177e4
LT
834 }
835 /* Interlinking all Rx Blocks */
836 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
837 int next = (j + 1) % blk_cnt;
838 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
839 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
840 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
841 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
1da177e4 842
d44570e4 843 pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
1da177e4 844 pre_rxd_blk->reserved_2_pNext_RxD_block =
d44570e4 845 (unsigned long)tmp_v_addr_next;
1da177e4 846 pre_rxd_blk->pNext_RxD_Blk_physical =
d44570e4 847 (u64)tmp_p_addr_next;
1da177e4
LT
848 }
849 }
6d517a27 850 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
851 /*
852 * Allocation of Storages for buffer addresses in 2BUFF mode
853 * and the buffers as well.
854 */
855 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
856 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
857 struct ring_info *ring = &mac_control->rings[i];
858
859 blk_cnt = rx_cfg->num_rxd /
d44570e4 860 (rxd_count[nic->rxd_mode] + 1);
4f870320
JP
861 size = sizeof(struct buffAdd *) * blk_cnt;
862 ring->ba = kmalloc(size, GFP_KERNEL);
13d866a9 863 if (!ring->ba)
1da177e4 864 return -ENOMEM;
4f870320 865 mem_allocated += size;
da6971d8
AR
866 for (j = 0; j < blk_cnt; j++) {
867 int k = 0;
4f870320
JP
868
869 size = sizeof(struct buffAdd) *
870 (rxd_count[nic->rxd_mode] + 1);
871 ring->ba[j] = kmalloc(size, GFP_KERNEL);
13d866a9 872 if (!ring->ba[j])
1da177e4 873 return -ENOMEM;
4f870320 874 mem_allocated += size;
da6971d8 875 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 876 ba = &ring->ba[j][k];
4f870320
JP
877 size = BUF0_LEN + ALIGN_SIZE;
878 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
879 if (!ba->ba_0_org)
880 return -ENOMEM;
4f870320 881 mem_allocated += size;
da6971d8
AR
882 tmp = (unsigned long)ba->ba_0_org;
883 tmp += ALIGN_SIZE;
d44570e4
JP
884 tmp &= ~((unsigned long)ALIGN_SIZE);
885 ba->ba_0 = (void *)tmp;
da6971d8 886
4f870320
JP
887 size = BUF1_LEN + ALIGN_SIZE;
888 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
889 if (!ba->ba_1_org)
890 return -ENOMEM;
4f870320 891 mem_allocated += size;
d44570e4 892 tmp = (unsigned long)ba->ba_1_org;
da6971d8 893 tmp += ALIGN_SIZE;
d44570e4
JP
894 tmp &= ~((unsigned long)ALIGN_SIZE);
895 ba->ba_1 = (void *)tmp;
da6971d8
AR
896 k++;
897 }
1da177e4
LT
898 }
899 }
900 }
1da177e4
LT
901
902 /* Allocation and initialization of Statistics block */
1ee6dd77 903 size = sizeof(struct stat_block);
d44570e4
JP
904 mac_control->stats_mem =
905 pci_alloc_consistent(nic->pdev, size,
906 &mac_control->stats_mem_phy);
1da177e4
LT
907
908 if (!mac_control->stats_mem) {
20346722
K
909 /*
910 * In case of failure, free_shared_mem() is called, which
911 * should free any memory that was alloced till the
1da177e4
LT
912 * failure happened.
913 */
914 return -ENOMEM;
915 }
491976b2 916 mem_allocated += size;
1da177e4
LT
917 mac_control->stats_mem_sz = size;
918
919 tmp_v_addr = mac_control->stats_mem;
d44570e4 920 mac_control->stats_info = (struct stat_block *)tmp_v_addr;
1da177e4 921 memset(tmp_v_addr, 0, size);
3a22813a
BL
922 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
923 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
491976b2 924 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
925 return SUCCESS;
926}
927
20346722
K
928/**
929 * free_shared_mem - Free the allocated Memory
1da177e4
LT
930 * @nic: Device private variable.
931 * Description: This function is to free all memory locations allocated by
932 * the init_shared_mem() function and return it to the kernel.
933 */
934
935static void free_shared_mem(struct s2io_nic *nic)
936{
937 int i, j, blk_cnt, size;
938 void *tmp_v_addr;
939 dma_addr_t tmp_p_addr;
1da177e4 940 int lst_size, lst_per_page;
8910b49f 941 struct net_device *dev;
491976b2 942 int page_num = 0;
ffb5df6c
JP
943 struct config_param *config;
944 struct mac_info *mac_control;
945 struct stat_block *stats;
946 struct swStat *swstats;
1da177e4
LT
947
948 if (!nic)
949 return;
950
8910b49f
MG
951 dev = nic->dev;
952
1da177e4 953 config = &nic->config;
ffb5df6c
JP
954 mac_control = &nic->mac_control;
955 stats = mac_control->stats_info;
956 swstats = &stats->sw_stat;
1da177e4 957
d44570e4 958 lst_size = sizeof(struct TxD) * config->max_txds;
1da177e4
LT
959 lst_per_page = PAGE_SIZE / lst_size;
960
961 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
962 struct fifo_info *fifo = &mac_control->fifos[i];
963 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
964
965 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
1da177e4
LT
966 for (j = 0; j < page_num; j++) {
967 int mem_blks = (j * lst_per_page);
13d866a9
JP
968 struct list_info_hold *fli;
969
970 if (!fifo->list_info)
6aa20a22 971 return;
13d866a9
JP
972
973 fli = &fifo->list_info[mem_blks];
974 if (!fli->list_virt_addr)
1da177e4
LT
975 break;
976 pci_free_consistent(nic->pdev, PAGE_SIZE,
13d866a9
JP
977 fli->list_virt_addr,
978 fli->list_phy_addr);
ffb5df6c 979 swstats->mem_freed += PAGE_SIZE;
1da177e4 980 }
776bd20f 981 /* If we got a zero DMA address during allocation,
982 * free the page now
983 */
984 if (mac_control->zerodma_virt_addr) {
985 pci_free_consistent(nic->pdev, PAGE_SIZE,
986 mac_control->zerodma_virt_addr,
987 (dma_addr_t)0);
6aa20a22 988 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
989 "%s: Freeing TxDL with zero DMA address. "
990 "Virtual address %p\n",
991 dev->name, mac_control->zerodma_virt_addr);
ffb5df6c 992 swstats->mem_freed += PAGE_SIZE;
776bd20f 993 }
13d866a9 994 kfree(fifo->list_info);
82c2d023 995 swstats->mem_freed += tx_cfg->fifo_len *
d44570e4 996 sizeof(struct list_info_hold);
1da177e4
LT
997 }
998
1da177e4 999 size = SIZE_OF_BLOCK;
1da177e4 1000 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1001 struct ring_info *ring = &mac_control->rings[i];
1002
1003 blk_cnt = ring->block_count;
1da177e4 1004 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
1005 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1006 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1da177e4
LT
1007 if (tmp_v_addr == NULL)
1008 break;
1009 pci_free_consistent(nic->pdev, size,
1010 tmp_v_addr, tmp_p_addr);
ffb5df6c 1011 swstats->mem_freed += size;
13d866a9 1012 kfree(ring->rx_blocks[j].rxds);
ffb5df6c
JP
1013 swstats->mem_freed += sizeof(struct rxd_info) *
1014 rxd_count[nic->rxd_mode];
1da177e4
LT
1015 }
1016 }
1017
6d517a27 1018 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
1019 /* Freeing buffer storage addresses in 2BUFF mode. */
1020 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1021 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1022 struct ring_info *ring = &mac_control->rings[i];
1023
1024 blk_cnt = rx_cfg->num_rxd /
1025 (rxd_count[nic->rxd_mode] + 1);
da6971d8
AR
1026 for (j = 0; j < blk_cnt; j++) {
1027 int k = 0;
13d866a9 1028 if (!ring->ba[j])
da6971d8
AR
1029 continue;
1030 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 1031 struct buffAdd *ba = &ring->ba[j][k];
da6971d8 1032 kfree(ba->ba_0_org);
ffb5df6c
JP
1033 swstats->mem_freed +=
1034 BUF0_LEN + ALIGN_SIZE;
da6971d8 1035 kfree(ba->ba_1_org);
ffb5df6c
JP
1036 swstats->mem_freed +=
1037 BUF1_LEN + ALIGN_SIZE;
da6971d8
AR
1038 k++;
1039 }
13d866a9 1040 kfree(ring->ba[j]);
ffb5df6c
JP
1041 swstats->mem_freed += sizeof(struct buffAdd) *
1042 (rxd_count[nic->rxd_mode] + 1);
1da177e4 1043 }
13d866a9 1044 kfree(ring->ba);
ffb5df6c
JP
1045 swstats->mem_freed += sizeof(struct buffAdd *) *
1046 blk_cnt;
1da177e4 1047 }
1da177e4 1048 }
1da177e4 1049
2fda096d 1050 for (i = 0; i < nic->config.tx_fifo_num; i++) {
13d866a9
JP
1051 struct fifo_info *fifo = &mac_control->fifos[i];
1052 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1053
1054 if (fifo->ufo_in_band_v) {
ffb5df6c
JP
1055 swstats->mem_freed += tx_cfg->fifo_len *
1056 sizeof(u64);
13d866a9 1057 kfree(fifo->ufo_in_band_v);
2fda096d
SR
1058 }
1059 }
1060
1da177e4 1061 if (mac_control->stats_mem) {
ffb5df6c 1062 swstats->mem_freed += mac_control->stats_mem_sz;
1da177e4
LT
1063 pci_free_consistent(nic->pdev,
1064 mac_control->stats_mem_sz,
1065 mac_control->stats_mem,
1066 mac_control->stats_mem_phy);
491976b2 1067 }
1da177e4
LT
1068}
1069
541ae68f
K
1070/**
1071 * s2io_verify_pci_mode -
1072 */
1073
1ee6dd77 1074static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1075{
1ee6dd77 1076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1077 register u64 val64 = 0;
1078 int mode;
1079
1080 val64 = readq(&bar0->pci_mode);
1081 mode = (u8)GET_PCI_MODE(val64);
1082
d44570e4 1083 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1084 return -1; /* Unknown PCI mode */
1085 return mode;
1086}
1087
c92ca04b
AR
1088#define NEC_VENID 0x1033
1089#define NEC_DEVID 0x0125
1090static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1091{
1092 struct pci_dev *tdev = NULL;
26d36b64
AC
1093 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1094 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1095 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1096 pci_dev_put(tdev);
c92ca04b 1097 return 1;
7ad62dbc 1098 }
c92ca04b
AR
1099 }
1100 }
1101 return 0;
1102}
541ae68f 1103
7b32a312 1104static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
1105/**
1106 * s2io_print_pci_mode -
1107 */
1ee6dd77 1108static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1109{
1ee6dd77 1110 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1111 register u64 val64 = 0;
1112 int mode;
1113 struct config_param *config = &nic->config;
9e39f7c5 1114 const char *pcimode;
541ae68f
K
1115
1116 val64 = readq(&bar0->pci_mode);
1117 mode = (u8)GET_PCI_MODE(val64);
1118
d44570e4 1119 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1120 return -1; /* Unknown PCI mode */
1121
c92ca04b
AR
1122 config->bus_speed = bus_speed[mode];
1123
1124 if (s2io_on_nec_bridge(nic->pdev)) {
1125 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
d44570e4 1126 nic->dev->name);
c92ca04b
AR
1127 return mode;
1128 }
1129
d44570e4
JP
1130 switch (mode) {
1131 case PCI_MODE_PCI_33:
9e39f7c5 1132 pcimode = "33MHz PCI bus";
d44570e4
JP
1133 break;
1134 case PCI_MODE_PCI_66:
9e39f7c5 1135 pcimode = "66MHz PCI bus";
d44570e4
JP
1136 break;
1137 case PCI_MODE_PCIX_M1_66:
9e39f7c5 1138 pcimode = "66MHz PCIX(M1) bus";
d44570e4
JP
1139 break;
1140 case PCI_MODE_PCIX_M1_100:
9e39f7c5 1141 pcimode = "100MHz PCIX(M1) bus";
d44570e4
JP
1142 break;
1143 case PCI_MODE_PCIX_M1_133:
9e39f7c5 1144 pcimode = "133MHz PCIX(M1) bus";
d44570e4
JP
1145 break;
1146 case PCI_MODE_PCIX_M2_66:
9e39f7c5 1147 pcimode = "133MHz PCIX(M2) bus";
d44570e4
JP
1148 break;
1149 case PCI_MODE_PCIX_M2_100:
9e39f7c5 1150 pcimode = "200MHz PCIX(M2) bus";
d44570e4
JP
1151 break;
1152 case PCI_MODE_PCIX_M2_133:
9e39f7c5 1153 pcimode = "266MHz PCIX(M2) bus";
d44570e4
JP
1154 break;
1155 default:
9e39f7c5
JP
1156 pcimode = "unsupported bus!";
1157 mode = -1;
541ae68f
K
1158 }
1159
9e39f7c5
JP
1160 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1161 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1162
541ae68f
K
1163 return mode;
1164}
1165
b7c5678f
RV
1166/**
1167 * init_tti - Initialization transmit traffic interrupt scheme
1168 * @nic: device private variable
1169 * @link: link status (UP/DOWN) used to enable/disable continuous
1170 * transmit interrupts
1171 * Description: The function configures transmit traffic interrupts
1172 * Return Value: SUCCESS on success and
1173 * '-1' on failure
1174 */
1175
0d66afe7 1176static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1177{
1178 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1179 register u64 val64 = 0;
1180 int i;
ffb5df6c 1181 struct config_param *config = &nic->config;
b7c5678f
RV
1182
1183 for (i = 0; i < config->tx_fifo_num; i++) {
1184 /*
1185 * TTI Initialization. Default Tx timer gets us about
1186 * 250 interrupts per sec. Continuous interrupts are enabled
1187 * by default.
1188 */
1189 if (nic->device_type == XFRAME_II_DEVICE) {
1190 int count = (nic->config.bus_speed * 125)/2;
1191 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1192 } else
1193 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1194
1195 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
d44570e4
JP
1196 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1197 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1198 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1199 if (i == 0)
1200 if (use_continuous_tx_intrs && (link == LINK_UP))
1201 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1202 writeq(val64, &bar0->tti_data1_mem);
1203
ac731ab6
SH
1204 if (nic->config.intr_type == MSI_X) {
1205 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1206 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1207 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1208 TTI_DATA2_MEM_TX_UFC_D(0x300);
1209 } else {
1210 if ((nic->config.tx_steering_type ==
d44570e4
JP
1211 TX_DEFAULT_STEERING) &&
1212 (config->tx_fifo_num > 1) &&
1213 (i >= nic->udp_fifo_idx) &&
1214 (i < (nic->udp_fifo_idx +
1215 nic->total_udp_fifos)))
ac731ab6
SH
1216 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1217 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1218 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1219 TTI_DATA2_MEM_TX_UFC_D(0x120);
1220 else
1221 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1222 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1223 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1224 TTI_DATA2_MEM_TX_UFC_D(0x80);
1225 }
b7c5678f
RV
1226
1227 writeq(val64, &bar0->tti_data2_mem);
1228
d44570e4
JP
1229 val64 = TTI_CMD_MEM_WE |
1230 TTI_CMD_MEM_STROBE_NEW_CMD |
1231 TTI_CMD_MEM_OFFSET(i);
b7c5678f
RV
1232 writeq(val64, &bar0->tti_command_mem);
1233
1234 if (wait_for_cmd_complete(&bar0->tti_command_mem,
d44570e4
JP
1235 TTI_CMD_MEM_STROBE_NEW_CMD,
1236 S2IO_BIT_RESET) != SUCCESS)
b7c5678f
RV
1237 return FAILURE;
1238 }
1239
1240 return SUCCESS;
1241}
1242
20346722
K
1243/**
1244 * init_nic - Initialization of hardware
b7c5678f 1245 * @nic: device private variable
20346722
K
1246 * Description: The function sequentially configures every block
1247 * of the H/W from their reset values.
1248 * Return Value: SUCCESS on success and
1da177e4
LT
1249 * '-1' on failure (endian settings incorrect).
1250 */
1251
1252static int init_nic(struct s2io_nic *nic)
1253{
1ee6dd77 1254 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1255 struct net_device *dev = nic->dev;
1256 register u64 val64 = 0;
1257 void __iomem *add;
1258 u32 time;
1259 int i, j;
c92ca04b 1260 int dtx_cnt = 0;
1da177e4 1261 unsigned long long mem_share;
20346722 1262 int mem_size;
ffb5df6c
JP
1263 struct config_param *config = &nic->config;
1264 struct mac_info *mac_control = &nic->mac_control;
1da177e4 1265
5e25b9dd 1266 /* to set the swapper controle on the card */
d44570e4
JP
1267 if (s2io_set_swapper(nic)) {
1268 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
9f74ffde 1269 return -EIO;
1da177e4
LT
1270 }
1271
541ae68f
K
1272 /*
1273 * Herc requires EOI to be removed from reset before XGXS, so..
1274 */
1275 if (nic->device_type & XFRAME_II_DEVICE) {
1276 val64 = 0xA500000000ULL;
1277 writeq(val64, &bar0->sw_reset);
1278 msleep(500);
1279 val64 = readq(&bar0->sw_reset);
1280 }
1281
1da177e4
LT
1282 /* Remove XGXS from reset state */
1283 val64 = 0;
1284 writeq(val64, &bar0->sw_reset);
1da177e4 1285 msleep(500);
20346722 1286 val64 = readq(&bar0->sw_reset);
1da177e4 1287
7962024e
SH
1288 /* Ensure that it's safe to access registers by checking
1289 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1290 */
1291 if (nic->device_type == XFRAME_II_DEVICE) {
1292 for (i = 0; i < 50; i++) {
1293 val64 = readq(&bar0->adapter_status);
1294 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1295 break;
1296 msleep(10);
1297 }
1298 if (i == 50)
1299 return -ENODEV;
1300 }
1301
1da177e4
LT
1302 /* Enable Receiving broadcasts */
1303 add = &bar0->mac_cfg;
1304 val64 = readq(&bar0->mac_cfg);
1305 val64 |= MAC_RMAC_BCAST_ENABLE;
1306 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 1307 writel((u32)val64, add);
1da177e4
LT
1308 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1309 writel((u32) (val64 >> 32), (add + 4));
1310
1311 /* Read registers in all blocks */
1312 val64 = readq(&bar0->mac_int_mask);
1313 val64 = readq(&bar0->mc_int_mask);
1314 val64 = readq(&bar0->xgxs_int_mask);
1315
1316 /* Set MTU */
1317 val64 = dev->mtu;
1318 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1319
541ae68f
K
1320 if (nic->device_type & XFRAME_II_DEVICE) {
1321 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1322 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1323 &bar0->dtx_control, UF);
541ae68f
K
1324 if (dtx_cnt & 0x1)
1325 msleep(1); /* Necessary!! */
1da177e4
LT
1326 dtx_cnt++;
1327 }
541ae68f 1328 } else {
c92ca04b
AR
1329 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1330 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1331 &bar0->dtx_control, UF);
1332 val64 = readq(&bar0->dtx_control);
1333 dtx_cnt++;
1da177e4
LT
1334 }
1335 }
1336
1337 /* Tx DMA Initialization */
1338 val64 = 0;
1339 writeq(val64, &bar0->tx_fifo_partition_0);
1340 writeq(val64, &bar0->tx_fifo_partition_1);
1341 writeq(val64, &bar0->tx_fifo_partition_2);
1342 writeq(val64, &bar0->tx_fifo_partition_3);
1343
1da177e4 1344 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
1345 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1346
1347 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1348 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1da177e4
LT
1349
1350 if (i == (config->tx_fifo_num - 1)) {
1351 if (i % 2 == 0)
1352 i++;
1353 }
1354
1355 switch (i) {
1356 case 1:
1357 writeq(val64, &bar0->tx_fifo_partition_0);
1358 val64 = 0;
b7c5678f 1359 j = 0;
1da177e4
LT
1360 break;
1361 case 3:
1362 writeq(val64, &bar0->tx_fifo_partition_1);
1363 val64 = 0;
b7c5678f 1364 j = 0;
1da177e4
LT
1365 break;
1366 case 5:
1367 writeq(val64, &bar0->tx_fifo_partition_2);
1368 val64 = 0;
b7c5678f 1369 j = 0;
1da177e4
LT
1370 break;
1371 case 7:
1372 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1373 val64 = 0;
1374 j = 0;
1375 break;
1376 default:
1377 j++;
1da177e4
LT
1378 break;
1379 }
1380 }
1381
5e25b9dd
K
1382 /*
1383 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1384 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1385 */
d44570e4 1386 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
5e25b9dd
K
1387 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1388
1da177e4
LT
1389 val64 = readq(&bar0->tx_fifo_partition_0);
1390 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
d44570e4 1391 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1da177e4 1392
20346722
K
1393 /*
1394 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1395 * integrity checking.
1396 */
1397 val64 = readq(&bar0->tx_pa_cfg);
d44570e4
JP
1398 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1399 TX_PA_CFG_IGNORE_SNAP_OUI |
1400 TX_PA_CFG_IGNORE_LLC_CTRL |
1401 TX_PA_CFG_IGNORE_L2_ERR;
1da177e4
LT
1402 writeq(val64, &bar0->tx_pa_cfg);
1403
1404 /* Rx DMA intialization. */
1405 val64 = 0;
1406 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1407 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1408
1409 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1da177e4
LT
1410 }
1411 writeq(val64, &bar0->rx_queue_priority);
1412
20346722
K
1413 /*
1414 * Allocating equal share of memory to all the
1da177e4
LT
1415 * configured Rings.
1416 */
1417 val64 = 0;
541ae68f
K
1418 if (nic->device_type & XFRAME_II_DEVICE)
1419 mem_size = 32;
1420 else
1421 mem_size = 64;
1422
1da177e4
LT
1423 for (i = 0; i < config->rx_ring_num; i++) {
1424 switch (i) {
1425 case 0:
20346722
K
1426 mem_share = (mem_size / config->rx_ring_num +
1427 mem_size % config->rx_ring_num);
1da177e4
LT
1428 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1429 continue;
1430 case 1:
20346722 1431 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1432 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1433 continue;
1434 case 2:
20346722 1435 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1436 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1437 continue;
1438 case 3:
20346722 1439 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1440 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1441 continue;
1442 case 4:
20346722 1443 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1444 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1445 continue;
1446 case 5:
20346722 1447 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1448 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1449 continue;
1450 case 6:
20346722 1451 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1452 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1453 continue;
1454 case 7:
20346722 1455 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1456 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1457 continue;
1458 }
1459 }
1460 writeq(val64, &bar0->rx_queue_cfg);
1461
20346722 1462 /*
5e25b9dd 1463 * Filling Tx round robin registers
b7c5678f 1464 * as per the number of FIFOs for equal scheduling priority
1da177e4 1465 */
5e25b9dd
K
1466 switch (config->tx_fifo_num) {
1467 case 1:
b7c5678f 1468 val64 = 0x0;
5e25b9dd
K
1469 writeq(val64, &bar0->tx_w_round_robin_0);
1470 writeq(val64, &bar0->tx_w_round_robin_1);
1471 writeq(val64, &bar0->tx_w_round_robin_2);
1472 writeq(val64, &bar0->tx_w_round_robin_3);
1473 writeq(val64, &bar0->tx_w_round_robin_4);
1474 break;
1475 case 2:
b7c5678f 1476 val64 = 0x0001000100010001ULL;
5e25b9dd 1477 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1478 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1479 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1480 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1481 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1482 writeq(val64, &bar0->tx_w_round_robin_4);
1483 break;
1484 case 3:
b7c5678f 1485 val64 = 0x0001020001020001ULL;
5e25b9dd 1486 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1487 val64 = 0x0200010200010200ULL;
5e25b9dd 1488 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1489 val64 = 0x0102000102000102ULL;
5e25b9dd 1490 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1491 val64 = 0x0001020001020001ULL;
5e25b9dd 1492 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1493 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1494 writeq(val64, &bar0->tx_w_round_robin_4);
1495 break;
1496 case 4:
b7c5678f 1497 val64 = 0x0001020300010203ULL;
5e25b9dd 1498 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1499 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1500 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1501 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1502 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1503 writeq(val64, &bar0->tx_w_round_robin_4);
1504 break;
1505 case 5:
b7c5678f 1506 val64 = 0x0001020304000102ULL;
5e25b9dd 1507 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1508 val64 = 0x0304000102030400ULL;
5e25b9dd 1509 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1510 val64 = 0x0102030400010203ULL;
5e25b9dd 1511 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1512 val64 = 0x0400010203040001ULL;
5e25b9dd 1513 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1514 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1515 writeq(val64, &bar0->tx_w_round_robin_4);
1516 break;
1517 case 6:
b7c5678f 1518 val64 = 0x0001020304050001ULL;
5e25b9dd 1519 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1520 val64 = 0x0203040500010203ULL;
5e25b9dd 1521 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1522 val64 = 0x0405000102030405ULL;
5e25b9dd 1523 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1524 val64 = 0x0001020304050001ULL;
5e25b9dd 1525 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1526 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1527 writeq(val64, &bar0->tx_w_round_robin_4);
1528 break;
1529 case 7:
b7c5678f 1530 val64 = 0x0001020304050600ULL;
5e25b9dd 1531 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1532 val64 = 0x0102030405060001ULL;
5e25b9dd 1533 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1534 val64 = 0x0203040506000102ULL;
5e25b9dd 1535 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1536 val64 = 0x0304050600010203ULL;
5e25b9dd 1537 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1538 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1539 writeq(val64, &bar0->tx_w_round_robin_4);
1540 break;
1541 case 8:
b7c5678f 1542 val64 = 0x0001020304050607ULL;
5e25b9dd 1543 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1544 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1545 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1546 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1547 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1548 writeq(val64, &bar0->tx_w_round_robin_4);
1549 break;
1550 }
1551
b41477f3 1552 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1553 val64 = readq(&bar0->tx_fifo_partition_0);
1554 val64 |= (TX_FIFO_PARTITION_EN);
1555 writeq(val64, &bar0->tx_fifo_partition_0);
1556
5e25b9dd 1557 /* Filling the Rx round robin registers as per the
0425b46a
SH
1558 * number of Rings and steering based on QoS with
1559 * equal priority.
1560 */
5e25b9dd
K
1561 switch (config->rx_ring_num) {
1562 case 1:
0425b46a
SH
1563 val64 = 0x0;
1564 writeq(val64, &bar0->rx_w_round_robin_0);
1565 writeq(val64, &bar0->rx_w_round_robin_1);
1566 writeq(val64, &bar0->rx_w_round_robin_2);
1567 writeq(val64, &bar0->rx_w_round_robin_3);
1568 writeq(val64, &bar0->rx_w_round_robin_4);
1569
5e25b9dd
K
1570 val64 = 0x8080808080808080ULL;
1571 writeq(val64, &bar0->rts_qos_steering);
1572 break;
1573 case 2:
0425b46a 1574 val64 = 0x0001000100010001ULL;
5e25b9dd 1575 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1576 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1577 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1578 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1579 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1580 writeq(val64, &bar0->rx_w_round_robin_4);
1581
1582 val64 = 0x8080808040404040ULL;
1583 writeq(val64, &bar0->rts_qos_steering);
1584 break;
1585 case 3:
0425b46a 1586 val64 = 0x0001020001020001ULL;
5e25b9dd 1587 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1588 val64 = 0x0200010200010200ULL;
5e25b9dd 1589 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1590 val64 = 0x0102000102000102ULL;
5e25b9dd 1591 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1592 val64 = 0x0001020001020001ULL;
5e25b9dd 1593 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1594 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1595 writeq(val64, &bar0->rx_w_round_robin_4);
1596
1597 val64 = 0x8080804040402020ULL;
1598 writeq(val64, &bar0->rts_qos_steering);
1599 break;
1600 case 4:
0425b46a 1601 val64 = 0x0001020300010203ULL;
5e25b9dd 1602 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1603 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1604 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1605 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1606 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1607 writeq(val64, &bar0->rx_w_round_robin_4);
1608
1609 val64 = 0x8080404020201010ULL;
1610 writeq(val64, &bar0->rts_qos_steering);
1611 break;
1612 case 5:
0425b46a 1613 val64 = 0x0001020304000102ULL;
5e25b9dd 1614 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1615 val64 = 0x0304000102030400ULL;
5e25b9dd 1616 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1617 val64 = 0x0102030400010203ULL;
5e25b9dd 1618 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1619 val64 = 0x0400010203040001ULL;
5e25b9dd 1620 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1621 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1622 writeq(val64, &bar0->rx_w_round_robin_4);
1623
1624 val64 = 0x8080404020201008ULL;
1625 writeq(val64, &bar0->rts_qos_steering);
1626 break;
1627 case 6:
0425b46a 1628 val64 = 0x0001020304050001ULL;
5e25b9dd 1629 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1630 val64 = 0x0203040500010203ULL;
5e25b9dd 1631 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1632 val64 = 0x0405000102030405ULL;
5e25b9dd 1633 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1634 val64 = 0x0001020304050001ULL;
5e25b9dd 1635 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1636 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1637 writeq(val64, &bar0->rx_w_round_robin_4);
1638
1639 val64 = 0x8080404020100804ULL;
1640 writeq(val64, &bar0->rts_qos_steering);
1641 break;
1642 case 7:
0425b46a 1643 val64 = 0x0001020304050600ULL;
5e25b9dd 1644 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1645 val64 = 0x0102030405060001ULL;
5e25b9dd 1646 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1647 val64 = 0x0203040506000102ULL;
5e25b9dd 1648 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1649 val64 = 0x0304050600010203ULL;
5e25b9dd 1650 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1651 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1652 writeq(val64, &bar0->rx_w_round_robin_4);
1653
1654 val64 = 0x8080402010080402ULL;
1655 writeq(val64, &bar0->rts_qos_steering);
1656 break;
1657 case 8:
0425b46a 1658 val64 = 0x0001020304050607ULL;
5e25b9dd 1659 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1660 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1661 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1662 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1663 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1664 writeq(val64, &bar0->rx_w_round_robin_4);
1665
1666 val64 = 0x8040201008040201ULL;
1667 writeq(val64, &bar0->rts_qos_steering);
1668 break;
1669 }
1da177e4
LT
1670
1671 /* UDP Fix */
1672 val64 = 0;
20346722 1673 for (i = 0; i < 8; i++)
1da177e4
LT
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1675
5e25b9dd
K
1676 /* Set the default rts frame length for the rings configured */
1677 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1678 for (i = 0 ; i < config->rx_ring_num ; i++)
1679 writeq(val64, &bar0->rts_frm_len_n[i]);
1680
1681 /* Set the frame length for the configured rings
1682 * desired by the user
1683 */
1684 for (i = 0; i < config->rx_ring_num; i++) {
1685 /* If rts_frm_len[i] == 0 then it is assumed that user not
1686 * specified frame length steering.
1687 * If the user provides the frame length then program
1688 * the rts_frm_len register for those values or else
1689 * leave it as it is.
1690 */
1691 if (rts_frm_len[i] != 0) {
1692 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
d44570e4 1693 &bar0->rts_frm_len_n[i]);
5e25b9dd
K
1694 }
1695 }
8a4bdbaa 1696
9fc93a41
SS
1697 /* Disable differentiated services steering logic */
1698 for (i = 0; i < 64; i++) {
1699 if (rts_ds_steer(nic, i, 0) == FAILURE) {
9e39f7c5
JP
1700 DBG_PRINT(ERR_DBG,
1701 "%s: rts_ds_steer failed on codepoint %d\n",
1702 dev->name, i);
9f74ffde 1703 return -ENODEV;
9fc93a41
SS
1704 }
1705 }
1706
20346722 1707 /* Program statistics memory */
1da177e4 1708 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1709
541ae68f
K
1710 if (nic->device_type == XFRAME_II_DEVICE) {
1711 val64 = STAT_BC(0x320);
1712 writeq(val64, &bar0->stat_byte_cnt);
1713 }
1714
20346722 1715 /*
1da177e4
LT
1716 * Initializing the sampling rate for the device to calculate the
1717 * bandwidth utilization.
1718 */
1719 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
d44570e4 1720 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1da177e4
LT
1721 writeq(val64, &bar0->mac_link_util);
1722
20346722
K
1723 /*
1724 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1725 * Scheme.
1726 */
1da177e4 1727
b7c5678f
RV
1728 /* Initialize TTI */
1729 if (SUCCESS != init_tti(nic, nic->last_link_state))
1730 return -ENODEV;
1da177e4 1731
8a4bdbaa
SS
1732 /* RTI Initialization */
1733 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1734 /*
8a4bdbaa
SS
1735 * Programmed to generate Apprx 500 Intrs per
1736 * second
1737 */
1738 int count = (nic->config.bus_speed * 125)/4;
1739 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1740 } else
1741 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1742 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
d44570e4
JP
1743 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1744 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1745 RTI_DATA1_MEM_RX_TIMER_AC_EN;
8a4bdbaa
SS
1746
1747 writeq(val64, &bar0->rti_data1_mem);
1748
1749 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1750 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1751 if (nic->config.intr_type == MSI_X)
d44570e4
JP
1752 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1753 RTI_DATA2_MEM_RX_UFC_D(0x40));
8a4bdbaa 1754 else
d44570e4
JP
1755 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1756 RTI_DATA2_MEM_RX_UFC_D(0x80));
8a4bdbaa 1757 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1758
8a4bdbaa 1759 for (i = 0; i < config->rx_ring_num; i++) {
d44570e4
JP
1760 val64 = RTI_CMD_MEM_WE |
1761 RTI_CMD_MEM_STROBE_NEW_CMD |
1762 RTI_CMD_MEM_OFFSET(i);
8a4bdbaa 1763 writeq(val64, &bar0->rti_command_mem);
1da177e4 1764
8a4bdbaa
SS
1765 /*
1766 * Once the operation completes, the Strobe bit of the
1767 * command register will be reset. We poll for this
1768 * particular condition. We wait for a maximum of 500ms
1769 * for the operation to complete, if it's not complete
1770 * by then we return error.
1771 */
1772 time = 0;
f957bcf0 1773 while (true) {
8a4bdbaa
SS
1774 val64 = readq(&bar0->rti_command_mem);
1775 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1776 break;
b6e3f982 1777
8a4bdbaa 1778 if (time > 10) {
9e39f7c5 1779 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
8a4bdbaa 1780 dev->name);
9f74ffde 1781 return -ENODEV;
b6e3f982 1782 }
8a4bdbaa
SS
1783 time++;
1784 msleep(50);
1da177e4 1785 }
1da177e4
LT
1786 }
1787
20346722
K
1788 /*
1789 * Initializing proper values as Pause threshold into all
1da177e4
LT
1790 * the 8 Queues on Rx side.
1791 */
1792 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1793 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1794
1795 /* Disable RMAC PAD STRIPPING */
509a2671 1796 add = &bar0->mac_cfg;
1da177e4
LT
1797 val64 = readq(&bar0->mac_cfg);
1798 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1799 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1800 writel((u32) (val64), add);
1801 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1802 writel((u32) (val64 >> 32), (add + 4));
1803 val64 = readq(&bar0->mac_cfg);
1804
7d3d0439
RA
1805 /* Enable FCS stripping by adapter */
1806 add = &bar0->mac_cfg;
1807 val64 = readq(&bar0->mac_cfg);
1808 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1809 if (nic->device_type == XFRAME_II_DEVICE)
1810 writeq(val64, &bar0->mac_cfg);
1811 else {
1812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1813 writel((u32) (val64), add);
1814 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1815 writel((u32) (val64 >> 32), (add + 4));
1816 }
1817
20346722
K
1818 /*
1819 * Set the time value to be inserted in the pause frame
1da177e4
LT
1820 * generated by xena.
1821 */
1822 val64 = readq(&bar0->rmac_pause_cfg);
1823 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1824 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1825 writeq(val64, &bar0->rmac_pause_cfg);
1826
20346722 1827 /*
1da177e4
LT
1828 * Set the Threshold Limit for Generating the pause frame
1829 * If the amount of data in any Queue exceeds ratio of
1830 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1831 * pause frame is generated
1832 */
1833 val64 = 0;
1834 for (i = 0; i < 4; i++) {
d44570e4
JP
1835 val64 |= (((u64)0xFF00 |
1836 nic->mac_control.mc_pause_threshold_q0q3)
1837 << (i * 2 * 8));
1da177e4
LT
1838 }
1839 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1840
1841 val64 = 0;
1842 for (i = 0; i < 4; i++) {
d44570e4
JP
1843 val64 |= (((u64)0xFF00 |
1844 nic->mac_control.mc_pause_threshold_q4q7)
1845 << (i * 2 * 8));
1da177e4
LT
1846 }
1847 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1848
20346722
K
1849 /*
1850 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1851 * exceeded the limit pointed by shared_splits
1852 */
1853 val64 = readq(&bar0->pic_control);
1854 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1855 writeq(val64, &bar0->pic_control);
1856
863c11a9
AR
1857 if (nic->config.bus_speed == 266) {
1858 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1859 writeq(0x0, &bar0->read_retry_delay);
1860 writeq(0x0, &bar0->write_retry_delay);
1861 }
1862
541ae68f
K
1863 /*
1864 * Programming the Herc to split every write transaction
1865 * that does not start on an ADB to reduce disconnects.
1866 */
1867 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1868 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1869 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1870 writeq(val64, &bar0->misc_control);
1871 val64 = readq(&bar0->pic_control2);
b7b5a128 1872 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1873 writeq(val64, &bar0->pic_control2);
541ae68f 1874 }
c92ca04b
AR
1875 if (strstr(nic->product_name, "CX4")) {
1876 val64 = TMAC_AVG_IPG(0x17);
1877 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1878 }
1879
1da177e4
LT
1880 return SUCCESS;
1881}
a371a07d
K
1882#define LINK_UP_DOWN_INTERRUPT 1
1883#define MAC_RMAC_ERR_TIMER 2
1884
1ee6dd77 1885static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d
K
1886{
1887 if (nic->device_type == XFRAME_II_DEVICE)
1888 return LINK_UP_DOWN_INTERRUPT;
1889 else
1890 return MAC_RMAC_ERR_TIMER;
1891}
8116f3cf 1892
9caab458
SS
1893/**
1894 * do_s2io_write_bits - update alarm bits in alarm register
1895 * @value: alarm bits
1896 * @flag: interrupt status
1897 * @addr: address value
1898 * Description: update alarm bits in alarm register
1899 * Return Value:
1900 * NONE.
1901 */
1902static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1903{
1904 u64 temp64;
1905
1906 temp64 = readq(addr);
1907
d44570e4
JP
1908 if (flag == ENABLE_INTRS)
1909 temp64 &= ~((u64)value);
9caab458 1910 else
d44570e4 1911 temp64 |= ((u64)value);
9caab458
SS
1912 writeq(temp64, addr);
1913}
1da177e4 1914
43b7c451 1915static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1916{
1917 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1918 register u64 gen_int_mask = 0;
01e16faa 1919 u64 interruptible;
9caab458 1920
01e16faa 1921 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
9caab458 1922 if (mask & TX_DMA_INTR) {
9caab458
SS
1923 gen_int_mask |= TXDMA_INT_M;
1924
1925 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
d44570e4
JP
1926 TXDMA_PCC_INT | TXDMA_TTI_INT |
1927 TXDMA_LSO_INT | TXDMA_TPA_INT |
1928 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
9caab458
SS
1929
1930 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
d44570e4
JP
1931 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1932 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1933 &bar0->pfc_err_mask);
9caab458
SS
1934
1935 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
d44570e4
JP
1936 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1937 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
9caab458
SS
1938
1939 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
d44570e4
JP
1940 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1941 PCC_N_SERR | PCC_6_COF_OV_ERR |
1942 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1943 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1944 PCC_TXB_ECC_SG_ERR,
1945 flag, &bar0->pcc_err_mask);
9caab458
SS
1946
1947 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
d44570e4 1948 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
9caab458
SS
1949
1950 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
d44570e4
JP
1951 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1952 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1953 flag, &bar0->lso_err_mask);
9caab458
SS
1954
1955 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
d44570e4 1956 flag, &bar0->tpa_err_mask);
9caab458
SS
1957
1958 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
9caab458
SS
1959 }
1960
1961 if (mask & TX_MAC_INTR) {
1962 gen_int_mask |= TXMAC_INT_M;
1963 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
d44570e4 1964 &bar0->mac_int_mask);
9caab458 1965 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
d44570e4
JP
1966 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1967 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1968 flag, &bar0->mac_tmac_err_mask);
9caab458
SS
1969 }
1970
1971 if (mask & TX_XGXS_INTR) {
1972 gen_int_mask |= TXXGXS_INT_M;
1973 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
d44570e4 1974 &bar0->xgxs_int_mask);
9caab458 1975 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
d44570e4
JP
1976 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1977 flag, &bar0->xgxs_txgxs_err_mask);
9caab458
SS
1978 }
1979
1980 if (mask & RX_DMA_INTR) {
1981 gen_int_mask |= RXDMA_INT_M;
1982 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
d44570e4
JP
1983 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1984 flag, &bar0->rxdma_int_mask);
9caab458 1985 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
d44570e4
JP
1986 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1987 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1988 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
9caab458 1989 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
d44570e4
JP
1990 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1991 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1992 &bar0->prc_pcix_err_mask);
9caab458 1993 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
d44570e4
JP
1994 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1995 &bar0->rpa_err_mask);
9caab458 1996 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
d44570e4
JP
1997 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1998 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1999 RDA_FRM_ECC_SG_ERR |
2000 RDA_MISC_ERR|RDA_PCIX_ERR,
2001 flag, &bar0->rda_err_mask);
9caab458 2002 do_s2io_write_bits(RTI_SM_ERR_ALARM |
d44570e4
JP
2003 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2004 flag, &bar0->rti_err_mask);
9caab458
SS
2005 }
2006
2007 if (mask & RX_MAC_INTR) {
2008 gen_int_mask |= RXMAC_INT_M;
2009 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
d44570e4
JP
2010 &bar0->mac_int_mask);
2011 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2012 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2013 RMAC_DOUBLE_ECC_ERR);
01e16faa
SH
2014 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2015 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2016 do_s2io_write_bits(interruptible,
d44570e4 2017 flag, &bar0->mac_rmac_err_mask);
9caab458
SS
2018 }
2019
d44570e4 2020 if (mask & RX_XGXS_INTR) {
9caab458
SS
2021 gen_int_mask |= RXXGXS_INT_M;
2022 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
d44570e4 2023 &bar0->xgxs_int_mask);
9caab458 2024 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
d44570e4 2025 &bar0->xgxs_rxgxs_err_mask);
9caab458
SS
2026 }
2027
2028 if (mask & MC_INTR) {
2029 gen_int_mask |= MC_INT_M;
d44570e4
JP
2030 do_s2io_write_bits(MC_INT_MASK_MC_INT,
2031 flag, &bar0->mc_int_mask);
9caab458 2032 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
d44570e4
JP
2033 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2034 &bar0->mc_err_mask);
9caab458
SS
2035 }
2036 nic->general_int_mask = gen_int_mask;
2037
2038 /* Remove this line when alarm interrupts are enabled */
2039 nic->general_int_mask = 0;
2040}
d44570e4 2041
20346722
K
2042/**
2043 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
2044 * @nic: device private variable,
2045 * @mask: A mask indicating which Intr block must be modified and,
2046 * @flag: A flag indicating whether to enable or disable the Intrs.
2047 * Description: This function will either disable or enable the interrupts
20346722
K
2048 * depending on the flag argument. The mask argument can be used to
2049 * enable/disable any Intr block.
1da177e4
LT
2050 * Return Value: NONE.
2051 */
2052
2053static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2054{
1ee6dd77 2055 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
2056 register u64 temp64 = 0, intr_mask = 0;
2057
2058 intr_mask = nic->general_int_mask;
1da177e4
LT
2059
2060 /* Top level interrupt classification */
2061 /* PIC Interrupts */
9caab458 2062 if (mask & TX_PIC_INTR) {
1da177e4 2063 /* Enable PIC Intrs in the general intr mask register */
9caab458 2064 intr_mask |= TXPIC_INT_M;
1da177e4 2065 if (flag == ENABLE_INTRS) {
20346722 2066 /*
a371a07d 2067 * If Hercules adapter enable GPIO otherwise
b41477f3 2068 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
2069 * interrupts for now.
2070 * TODO
1da177e4 2071 */
a371a07d 2072 if (s2io_link_fault_indication(nic) ==
d44570e4 2073 LINK_UP_DOWN_INTERRUPT) {
9caab458 2074 do_s2io_write_bits(PIC_INT_GPIO, flag,
d44570e4 2075 &bar0->pic_int_mask);
9caab458 2076 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
d44570e4 2077 &bar0->gpio_int_mask);
9caab458 2078 } else
a371a07d 2079 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2080 } else if (flag == DISABLE_INTRS) {
20346722
K
2081 /*
2082 * Disable PIC Intrs in the general
2083 * intr mask register
1da177e4
LT
2084 */
2085 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2086 }
2087 }
2088
1da177e4
LT
2089 /* Tx traffic interrupts */
2090 if (mask & TX_TRAFFIC_INTR) {
9caab458 2091 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2092 if (flag == ENABLE_INTRS) {
20346722 2093 /*
1da177e4 2094 * Enable all the Tx side interrupts
20346722 2095 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2096 */
2097 writeq(0x0, &bar0->tx_traffic_mask);
2098 } else if (flag == DISABLE_INTRS) {
20346722
K
2099 /*
2100 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2101 * register.
2102 */
2103 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2104 }
2105 }
2106
2107 /* Rx traffic interrupts */
2108 if (mask & RX_TRAFFIC_INTR) {
9caab458 2109 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2110 if (flag == ENABLE_INTRS) {
1da177e4
LT
2111 /* writing 0 Enables all 8 RX interrupt levels */
2112 writeq(0x0, &bar0->rx_traffic_mask);
2113 } else if (flag == DISABLE_INTRS) {
20346722
K
2114 /*
2115 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2116 * register.
2117 */
2118 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2119 }
2120 }
9caab458
SS
2121
2122 temp64 = readq(&bar0->general_int_mask);
2123 if (flag == ENABLE_INTRS)
d44570e4 2124 temp64 &= ~((u64)intr_mask);
9caab458
SS
2125 else
2126 temp64 = DISABLE_ALL_INTRS;
2127 writeq(temp64, &bar0->general_int_mask);
2128
2129 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2130}
2131
19a60522
SS
2132/**
2133 * verify_pcc_quiescent- Checks for PCC quiescent state
2134 * Return: 1 If PCC is quiescence
2135 * 0 If PCC is not quiescence
2136 */
1ee6dd77 2137static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2138{
19a60522 2139 int ret = 0, herc;
1ee6dd77 2140 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2141 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2142
19a60522 2143 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 2144
f957bcf0 2145 if (flag == false) {
44c10138 2146 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2147 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2148 ret = 1;
19a60522
SS
2149 } else {
2150 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2151 ret = 1;
20346722
K
2152 }
2153 } else {
44c10138 2154 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2155 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2156 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2157 ret = 1;
5e25b9dd
K
2158 } else {
2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2160 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2161 ret = 1;
20346722
K
2162 }
2163 }
2164
2165 return ret;
2166}
2167/**
2168 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2169 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2170 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2171 * differs and the calling function passes the input argument flag to
2172 * indicate this.
20346722 2173 * Return: 1 If xena is quiescence
1da177e4
LT
2174 * 0 If Xena is not quiescence
2175 */
2176
1ee6dd77 2177static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2178{
19a60522 2179 int mode;
1ee6dd77 2180 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2181 u64 val64 = readq(&bar0->adapter_status);
2182 mode = s2io_verify_pci_mode(sp);
1da177e4 2183
19a60522 2184 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
9e39f7c5 2185 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
19a60522
SS
2186 return 0;
2187 }
2188 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
9e39f7c5 2189 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
19a60522
SS
2190 return 0;
2191 }
2192 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
9e39f7c5 2193 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
19a60522
SS
2194 return 0;
2195 }
2196 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
9e39f7c5 2197 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
19a60522
SS
2198 return 0;
2199 }
2200 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
9e39f7c5 2201 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
19a60522
SS
2202 return 0;
2203 }
2204 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
9e39f7c5 2205 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
19a60522
SS
2206 return 0;
2207 }
2208 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
9e39f7c5 2209 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
19a60522
SS
2210 return 0;
2211 }
2212 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
9e39f7c5 2213 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
19a60522 2214 return 0;
1da177e4
LT
2215 }
2216
19a60522
SS
2217 /*
2218 * In PCI 33 mode, the P_PLL is not used, and therefore,
2219 * the the P_PLL_LOCK bit in the adapter_status register will
2220 * not be asserted.
2221 */
2222 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
d44570e4
JP
2223 sp->device_type == XFRAME_II_DEVICE &&
2224 mode != PCI_MODE_PCI_33) {
9e39f7c5 2225 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
19a60522
SS
2226 return 0;
2227 }
2228 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
d44570e4 2229 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
9e39f7c5 2230 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
19a60522
SS
2231 return 0;
2232 }
2233 return 1;
1da177e4
LT
2234}
2235
2236/**
2237 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2238 * @sp: Pointer to device specifc structure
20346722 2239 * Description :
1da177e4
LT
2240 * New procedure to clear mac address reading problems on Alpha platforms
2241 *
2242 */
2243
d44570e4 2244static void fix_mac_address(struct s2io_nic *sp)
1da177e4 2245{
1ee6dd77 2246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2247 int i = 0;
2248
2249 while (fix_mac[i] != END_SIGN) {
2250 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2251 udelay(10);
d83d282b 2252 (void) readq(&bar0->gpio_control);
1da177e4
LT
2253 }
2254}
2255
2256/**
20346722 2257 * start_nic - Turns the device on
1da177e4 2258 * @nic : device private variable.
20346722
K
2259 * Description:
2260 * This function actually turns the device on. Before this function is
2261 * called,all Registers are configured from their reset states
2262 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2263 * calling this function, the device interrupts are cleared and the NIC is
2264 * literally switched on by writing into the adapter control register.
20346722 2265 * Return Value:
1da177e4
LT
2266 * SUCCESS on success and -1 on failure.
2267 */
2268
2269static int start_nic(struct s2io_nic *nic)
2270{
1ee6dd77 2271 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2272 struct net_device *dev = nic->dev;
2273 register u64 val64 = 0;
20346722 2274 u16 subid, i;
ffb5df6c
JP
2275 struct config_param *config = &nic->config;
2276 struct mac_info *mac_control = &nic->mac_control;
1da177e4
LT
2277
2278 /* PRC Initialization and configuration */
2279 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2280 struct ring_info *ring = &mac_control->rings[i];
2281
d44570e4 2282 writeq((u64)ring->rx_blocks[0].block_dma_addr,
1da177e4
LT
2283 &bar0->prc_rxd0_n[i]);
2284
2285 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2286 if (nic->rxd_mode == RXD_MODE_1)
2287 val64 |= PRC_CTRL_RC_ENABLED;
2288 else
2289 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2290 if (nic->device_type == XFRAME_II_DEVICE)
2291 val64 |= PRC_CTRL_GROUP_READS;
2292 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2293 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2294 writeq(val64, &bar0->prc_ctrl_n[i]);
2295 }
2296
da6971d8
AR
2297 if (nic->rxd_mode == RXD_MODE_3B) {
2298 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2299 val64 = readq(&bar0->rx_pa_cfg);
2300 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2301 writeq(val64, &bar0->rx_pa_cfg);
2302 }
1da177e4 2303
926930b2
SS
2304 if (vlan_tag_strip == 0) {
2305 val64 = readq(&bar0->rx_pa_cfg);
2306 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2307 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 2308 nic->vlan_strip_flag = 0;
926930b2
SS
2309 }
2310
20346722 2311 /*
1da177e4
LT
2312 * Enabling MC-RLDRAM. After enabling the device, we timeout
2313 * for around 100ms, which is approximately the time required
2314 * for the device to be ready for operation.
2315 */
2316 val64 = readq(&bar0->mc_rldram_mrs);
2317 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2318 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2319 val64 = readq(&bar0->mc_rldram_mrs);
2320
20346722 2321 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2322
2323 /* Enabling ECC Protection. */
2324 val64 = readq(&bar0->adapter_control);
2325 val64 &= ~ADAPTER_ECC_EN;
2326 writeq(val64, &bar0->adapter_control);
2327
20346722
K
2328 /*
2329 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2330 * it.
2331 */
2332 val64 = readq(&bar0->adapter_status);
19a60522 2333 if (!verify_xena_quiescence(nic)) {
9e39f7c5
JP
2334 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2335 "Adapter status reads: 0x%llx\n",
2336 dev->name, (unsigned long long)val64);
1da177e4
LT
2337 return FAILURE;
2338 }
2339
20346722 2340 /*
1da177e4 2341 * With some switches, link might be already up at this point.
20346722
K
2342 * Because of this weird behavior, when we enable laser,
2343 * we may not get link. We need to handle this. We cannot
2344 * figure out which switch is misbehaving. So we are forced to
2345 * make a global change.
1da177e4
LT
2346 */
2347
2348 /* Enabling Laser. */
2349 val64 = readq(&bar0->adapter_control);
2350 val64 |= ADAPTER_EOI_TX_ON;
2351 writeq(val64, &bar0->adapter_control);
2352
c92ca04b
AR
2353 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2354 /*
25985edc 2355 * Dont see link state interrupts initially on some switches,
c92ca04b
AR
2356 * so directly scheduling the link state task here.
2357 */
2358 schedule_work(&nic->set_link_task);
2359 }
1da177e4
LT
2360 /* SXE-002: Initialize link and activity LED */
2361 subid = nic->pdev->subsystem_device;
541ae68f
K
2362 if (((subid & 0xFF) >= 0x07) &&
2363 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2364 val64 = readq(&bar0->gpio_control);
2365 val64 |= 0x0000800000000000ULL;
2366 writeq(val64, &bar0->gpio_control);
2367 val64 = 0x0411040400000000ULL;
509a2671 2368 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2369 }
2370
1da177e4
LT
2371 return SUCCESS;
2372}
fed5eccd
AR
2373/**
2374 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2375 */
d44570e4
JP
2376static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2377 struct TxD *txdlp, int get_off)
fed5eccd 2378{
1ee6dd77 2379 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2380 struct sk_buff *skb;
1ee6dd77 2381 struct TxD *txds;
fed5eccd
AR
2382 u16 j, frg_cnt;
2383
2384 txds = txdlp;
2fda096d 2385 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
d44570e4
JP
2386 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2387 sizeof(u64), PCI_DMA_TODEVICE);
fed5eccd
AR
2388 txds++;
2389 }
2390
d44570e4 2391 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
fed5eccd 2392 if (!skb) {
1ee6dd77 2393 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2394 return NULL;
2395 }
d44570e4 2396 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
e743d313 2397 skb_headlen(skb), PCI_DMA_TODEVICE);
fed5eccd
AR
2398 frg_cnt = skb_shinfo(skb)->nr_frags;
2399 if (frg_cnt) {
2400 txds++;
2401 for (j = 0; j < frg_cnt; j++, txds++) {
2402 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2403 if (!txds->Buffer_Pointer)
2404 break;
d44570e4
JP
2405 pci_unmap_page(nic->pdev,
2406 (dma_addr_t)txds->Buffer_Pointer,
fed5eccd
AR
2407 frag->size, PCI_DMA_TODEVICE);
2408 }
2409 }
d44570e4
JP
2410 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2411 return skb;
fed5eccd 2412}
1da177e4 2413
20346722
K
2414/**
2415 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2416 * @nic : device private variable.
20346722 2417 * Description:
1da177e4 2418 * Free all queued Tx buffers.
20346722 2419 * Return Value: void
d44570e4 2420 */
1da177e4
LT
2421
2422static void free_tx_buffers(struct s2io_nic *nic)
2423{
2424 struct net_device *dev = nic->dev;
2425 struct sk_buff *skb;
1ee6dd77 2426 struct TxD *txdp;
1da177e4 2427 int i, j;
fed5eccd 2428 int cnt = 0;
ffb5df6c
JP
2429 struct config_param *config = &nic->config;
2430 struct mac_info *mac_control = &nic->mac_control;
2431 struct stat_block *stats = mac_control->stats_info;
2432 struct swStat *swstats = &stats->sw_stat;
1da177e4
LT
2433
2434 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
2435 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2436 struct fifo_info *fifo = &mac_control->fifos[i];
2fda096d 2437 unsigned long flags;
13d866a9
JP
2438
2439 spin_lock_irqsave(&fifo->tx_lock, flags);
2440 for (j = 0; j < tx_cfg->fifo_len; j++) {
2441 txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
fed5eccd
AR
2442 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2443 if (skb) {
ffb5df6c 2444 swstats->mem_freed += skb->truesize;
fed5eccd
AR
2445 dev_kfree_skb(skb);
2446 cnt++;
1da177e4 2447 }
1da177e4
LT
2448 }
2449 DBG_PRINT(INTR_DBG,
9e39f7c5 2450 "%s: forcibly freeing %d skbs on FIFO%d\n",
1da177e4 2451 dev->name, cnt, i);
13d866a9
JP
2452 fifo->tx_curr_get_info.offset = 0;
2453 fifo->tx_curr_put_info.offset = 0;
2454 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
2455 }
2456}
2457
20346722
K
2458/**
2459 * stop_nic - To stop the nic
1da177e4 2460 * @nic ; device private variable.
20346722
K
2461 * Description:
2462 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2463 * function does. This function is called to stop the device.
2464 * Return Value:
2465 * void.
2466 */
2467
2468static void stop_nic(struct s2io_nic *nic)
2469{
1ee6dd77 2470 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2471 register u64 val64 = 0;
5d3213cc 2472 u16 interruptible;
1da177e4
LT
2473
2474 /* Disable all interrupts */
9caab458 2475 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2476 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2477 interruptible |= TX_PIC_INTR;
1da177e4
LT
2478 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2479
5d3213cc
AR
2480 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2481 val64 = readq(&bar0->adapter_control);
2482 val64 &= ~(ADAPTER_CNTL_EN);
2483 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2484}
2485
20346722
K
2486/**
2487 * fill_rx_buffers - Allocates the Rx side skbs
0425b46a 2488 * @ring_info: per ring structure
3f78d885
SH
2489 * @from_card_up: If this is true, we will map the buffer to get
2490 * the dma address for buf0 and buf1 to give it to the card.
2491 * Else we will sync the already mapped buffer to give it to the card.
20346722 2492 * Description:
1da177e4
LT
2493 * The function allocates Rx side skbs and puts the physical
2494 * address of these buffers into the RxD buffer pointers, so that the NIC
2495 * can DMA the received frame into these locations.
2496 * The NIC supports 3 receive modes, viz
2497 * 1. single buffer,
2498 * 2. three buffer and
2499 * 3. Five buffer modes.
20346722
K
2500 * Each mode defines how many fragments the received frame will be split
2501 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2502 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2503 * is split into 3 fragments. As of now only single buffer mode is
2504 * supported.
2505 * Return Value:
2506 * SUCCESS on success or an appropriate -ve value on failure.
2507 */
8d8bb39b 2508static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
d44570e4 2509 int from_card_up)
1da177e4 2510{
1da177e4 2511 struct sk_buff *skb;
1ee6dd77 2512 struct RxD_t *rxdp;
0425b46a 2513 int off, size, block_no, block_no1;
1da177e4 2514 u32 alloc_tab = 0;
20346722 2515 u32 alloc_cnt;
20346722 2516 u64 tmp;
1ee6dd77 2517 struct buffAdd *ba;
1ee6dd77 2518 struct RxD_t *first_rxdp = NULL;
363dc367 2519 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
0425b46a 2520 int rxd_index = 0;
6d517a27
VP
2521 struct RxD1 *rxdp1;
2522 struct RxD3 *rxdp3;
ffb5df6c 2523 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2524
0425b46a 2525 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2526
0425b46a 2527 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2528 while (alloc_tab < alloc_cnt) {
0425b46a 2529 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2530
0425b46a
SH
2531 off = ring->rx_curr_put_info.offset;
2532
2533 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2534
2535 rxd_index = off + 1;
2536 if (block_no)
2537 rxd_index += (block_no * ring->rxd_count);
da6971d8 2538
7d2e3cb7 2539 if ((block_no == block_no1) &&
d44570e4
JP
2540 (off == ring->rx_curr_get_info.offset) &&
2541 (rxdp->Host_Control)) {
9e39f7c5
JP
2542 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2543 ring->dev->name);
1da177e4
LT
2544 goto end;
2545 }
0425b46a
SH
2546 if (off && (off == ring->rxd_count)) {
2547 ring->rx_curr_put_info.block_index++;
2548 if (ring->rx_curr_put_info.block_index ==
d44570e4 2549 ring->block_count)
0425b46a
SH
2550 ring->rx_curr_put_info.block_index = 0;
2551 block_no = ring->rx_curr_put_info.block_index;
2552 off = 0;
2553 ring->rx_curr_put_info.offset = off;
2554 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2555 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2556 ring->dev->name, rxdp);
2557
1da177e4 2558 }
c9fcbf47 2559
da6971d8 2560 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
d44570e4
JP
2561 ((ring->rxd_mode == RXD_MODE_3B) &&
2562 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2563 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2564 goto end;
2565 }
da6971d8 2566 /* calculate size of skb based on ring mode */
d44570e4
JP
2567 size = ring->mtu +
2568 HEADER_ETHERNET_II_802_3_SIZE +
2569 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2570 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2571 size += NET_IP_ALIGN;
da6971d8 2572 else
0425b46a 2573 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2574
da6971d8
AR
2575 /* allocate skb */
2576 skb = dev_alloc_skb(size);
d44570e4 2577 if (!skb) {
9e39f7c5
JP
2578 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2579 ring->dev->name);
303bcb4b
K
2580 if (first_rxdp) {
2581 wmb();
2582 first_rxdp->Control_1 |= RXD_OWN_XENA;
2583 }
ffb5df6c 2584 swstats->mem_alloc_fail_cnt++;
7d2e3cb7 2585
da6971d8
AR
2586 return -ENOMEM ;
2587 }
ffb5df6c 2588 swstats->mem_allocated += skb->truesize;
0425b46a
SH
2589
2590 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2591 /* 1 buffer mode - normal operation mode */
d44570e4 2592 rxdp1 = (struct RxD1 *)rxdp;
1ee6dd77 2593 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2594 skb_reserve(skb, NET_IP_ALIGN);
d44570e4
JP
2595 rxdp1->Buffer0_ptr =
2596 pci_map_single(ring->pdev, skb->data,
2597 size - NET_IP_ALIGN,
2598 PCI_DMA_FROMDEVICE);
8d8bb39b 2599 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2600 rxdp1->Buffer0_ptr))
491abf25
VP
2601 goto pci_map_failed;
2602
8a4bdbaa 2603 rxdp->Control_2 =
491976b2 2604 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
d44570e4 2605 rxdp->Host_Control = (unsigned long)skb;
0425b46a 2606 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2607 /*
6d517a27
VP
2608 * 2 buffer mode -
2609 * 2 buffer mode provides 128
da6971d8 2610 * byte aligned receive buffers.
da6971d8
AR
2611 */
2612
d44570e4 2613 rxdp3 = (struct RxD3 *)rxdp;
491976b2 2614 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2615 Buffer0_ptr = rxdp3->Buffer0_ptr;
2616 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2617 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2618 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2619 rxdp3->Buffer0_ptr = Buffer0_ptr;
2620 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2621
0425b46a 2622 ba = &ring->ba[block_no][off];
da6971d8 2623 skb_reserve(skb, BUF0_LEN);
d44570e4 2624 tmp = (u64)(unsigned long)skb->data;
da6971d8
AR
2625 tmp += ALIGN_SIZE;
2626 tmp &= ~ALIGN_SIZE;
2627 skb->data = (void *) (unsigned long)tmp;
27a884dc 2628 skb_reset_tail_pointer(skb);
da6971d8 2629
3f78d885 2630 if (from_card_up) {
6d517a27 2631 rxdp3->Buffer0_ptr =
d44570e4
JP
2632 pci_map_single(ring->pdev, ba->ba_0,
2633 BUF0_LEN,
2634 PCI_DMA_FROMDEVICE);
2635 if (pci_dma_mapping_error(nic->pdev,
2636 rxdp3->Buffer0_ptr))
3f78d885
SH
2637 goto pci_map_failed;
2638 } else
0425b46a 2639 pci_dma_sync_single_for_device(ring->pdev,
d44570e4
JP
2640 (dma_addr_t)rxdp3->Buffer0_ptr,
2641 BUF0_LEN,
2642 PCI_DMA_FROMDEVICE);
491abf25 2643
da6971d8 2644 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2645 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2646 /* Two buffer mode */
2647
2648 /*
6aa20a22 2649 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2650 * L4 payload
2651 */
d44570e4
JP
2652 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2653 skb->data,
2654 ring->mtu + 4,
2655 PCI_DMA_FROMDEVICE);
da6971d8 2656
8d8bb39b 2657 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2658 rxdp3->Buffer2_ptr))
491abf25
VP
2659 goto pci_map_failed;
2660
3f78d885 2661 if (from_card_up) {
0425b46a
SH
2662 rxdp3->Buffer1_ptr =
2663 pci_map_single(ring->pdev,
d44570e4
JP
2664 ba->ba_1,
2665 BUF1_LEN,
2666 PCI_DMA_FROMDEVICE);
0425b46a 2667
8d8bb39b 2668 if (pci_dma_mapping_error(nic->pdev,
d44570e4
JP
2669 rxdp3->Buffer1_ptr)) {
2670 pci_unmap_single(ring->pdev,
2671 (dma_addr_t)(unsigned long)
2672 skb->data,
2673 ring->mtu + 4,
2674 PCI_DMA_FROMDEVICE);
3f78d885
SH
2675 goto pci_map_failed;
2676 }
75c30b13 2677 }
da6971d8
AR
2678 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2679 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
d44570e4 2680 (ring->mtu + 4);
da6971d8 2681 }
b7b5a128 2682 rxdp->Control_2 |= s2BIT(0);
0425b46a 2683 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2684 }
303bcb4b
K
2685 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2686 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2687 off++;
0425b46a 2688 if (off == (ring->rxd_count + 1))
da6971d8 2689 off = 0;
0425b46a 2690 ring->rx_curr_put_info.offset = off;
20346722 2691
da6971d8 2692 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2693 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2694 if (first_rxdp) {
2695 wmb();
2696 first_rxdp->Control_1 |= RXD_OWN_XENA;
2697 }
2698 first_rxdp = rxdp;
2699 }
0425b46a 2700 ring->rx_bufs_left += 1;
1da177e4
LT
2701 alloc_tab++;
2702 }
2703
d44570e4 2704end:
303bcb4b
K
2705 /* Transfer ownership of first descriptor to adapter just before
2706 * exiting. Before that, use memory barrier so that ownership
2707 * and other fields are seen by adapter correctly.
2708 */
2709 if (first_rxdp) {
2710 wmb();
2711 first_rxdp->Control_1 |= RXD_OWN_XENA;
2712 }
2713
1da177e4 2714 return SUCCESS;
d44570e4 2715
491abf25 2716pci_map_failed:
ffb5df6c
JP
2717 swstats->pci_map_fail_cnt++;
2718 swstats->mem_freed += skb->truesize;
491abf25
VP
2719 dev_kfree_skb_irq(skb);
2720 return -ENOMEM;
1da177e4
LT
2721}
2722
da6971d8
AR
2723static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2724{
2725 struct net_device *dev = sp->dev;
2726 int j;
2727 struct sk_buff *skb;
1ee6dd77 2728 struct RxD_t *rxdp;
6d517a27
VP
2729 struct RxD1 *rxdp1;
2730 struct RxD3 *rxdp3;
ffb5df6c
JP
2731 struct mac_info *mac_control = &sp->mac_control;
2732 struct stat_block *stats = mac_control->stats_info;
2733 struct swStat *swstats = &stats->sw_stat;
da6971d8 2734
da6971d8
AR
2735 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2736 rxdp = mac_control->rings[ring_no].
d44570e4
JP
2737 rx_blocks[blk].rxds[j].virt_addr;
2738 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2739 if (!skb)
da6971d8 2740 continue;
da6971d8 2741 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4
JP
2742 rxdp1 = (struct RxD1 *)rxdp;
2743 pci_unmap_single(sp->pdev,
2744 (dma_addr_t)rxdp1->Buffer0_ptr,
2745 dev->mtu +
2746 HEADER_ETHERNET_II_802_3_SIZE +
2747 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2748 PCI_DMA_FROMDEVICE);
1ee6dd77 2749 memset(rxdp, 0, sizeof(struct RxD1));
d44570e4
JP
2750 } else if (sp->rxd_mode == RXD_MODE_3B) {
2751 rxdp3 = (struct RxD3 *)rxdp;
d44570e4
JP
2752 pci_unmap_single(sp->pdev,
2753 (dma_addr_t)rxdp3->Buffer0_ptr,
2754 BUF0_LEN,
2755 PCI_DMA_FROMDEVICE);
2756 pci_unmap_single(sp->pdev,
2757 (dma_addr_t)rxdp3->Buffer1_ptr,
2758 BUF1_LEN,
2759 PCI_DMA_FROMDEVICE);
2760 pci_unmap_single(sp->pdev,
2761 (dma_addr_t)rxdp3->Buffer2_ptr,
2762 dev->mtu + 4,
2763 PCI_DMA_FROMDEVICE);
1ee6dd77 2764 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2765 }
ffb5df6c 2766 swstats->mem_freed += skb->truesize;
da6971d8 2767 dev_kfree_skb(skb);
0425b46a 2768 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2769 }
2770}
2771
1da177e4 2772/**
20346722 2773 * free_rx_buffers - Frees all Rx buffers
1da177e4 2774 * @sp: device private variable.
20346722 2775 * Description:
1da177e4
LT
2776 * This function will free all Rx buffers allocated by host.
2777 * Return Value:
2778 * NONE.
2779 */
2780
2781static void free_rx_buffers(struct s2io_nic *sp)
2782{
2783 struct net_device *dev = sp->dev;
da6971d8 2784 int i, blk = 0, buf_cnt = 0;
ffb5df6c
JP
2785 struct config_param *config = &sp->config;
2786 struct mac_info *mac_control = &sp->mac_control;
1da177e4
LT
2787
2788 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2789 struct ring_info *ring = &mac_control->rings[i];
2790
da6971d8 2791 for (blk = 0; blk < rx_ring_sz[i]; blk++)
d44570e4 2792 free_rxd_blk(sp, i, blk);
1da177e4 2793
13d866a9
JP
2794 ring->rx_curr_put_info.block_index = 0;
2795 ring->rx_curr_get_info.block_index = 0;
2796 ring->rx_curr_put_info.offset = 0;
2797 ring->rx_curr_get_info.offset = 0;
2798 ring->rx_bufs_left = 0;
9e39f7c5 2799 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
1da177e4
LT
2800 dev->name, buf_cnt, i);
2801 }
2802}
2803
8d8bb39b 2804static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
f61e0a35 2805{
8d8bb39b 2806 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2807 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2808 ring->dev->name);
f61e0a35
SH
2809 }
2810 return 0;
2811}
2812
1da177e4
LT
2813/**
2814 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2815 * @napi : pointer to the napi structure.
20346722 2816 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2817 * during one pass through the 'Poll" function.
2818 * Description:
2819 * Comes into picture only if NAPI support has been incorporated. It does
2820 * the same thing that rx_intr_handler does, but not in a interrupt context
2821 * also It will process only a given number of packets.
2822 * Return value:
2823 * 0 on success and 1 if there are No Rx packets to be processed.
2824 */
2825
f61e0a35 2826static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2827{
f61e0a35
SH
2828 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2829 struct net_device *dev = ring->dev;
f61e0a35 2830 int pkts_processed = 0;
1a79d1c3
AV
2831 u8 __iomem *addr = NULL;
2832 u8 val8 = 0;
4cf1653a 2833 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2834 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2835 int budget_org = budget;
1da177e4 2836
f61e0a35
SH
2837 if (unlikely(!is_s2io_card_up(nic)))
2838 return 0;
1da177e4 2839
f61e0a35 2840 pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2841 s2io_chk_rx_buffers(nic, ring);
1da177e4 2842
f61e0a35 2843 if (pkts_processed < budget_org) {
288379f0 2844 napi_complete(napi);
f61e0a35 2845 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2846 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2847 addr += 7 - ring->ring_no;
2848 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2849 writeb(val8, addr);
2850 val8 = readb(addr);
1da177e4 2851 }
f61e0a35
SH
2852 return pkts_processed;
2853}
d44570e4 2854
f61e0a35
SH
2855static int s2io_poll_inta(struct napi_struct *napi, int budget)
2856{
2857 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
f61e0a35
SH
2858 int pkts_processed = 0;
2859 int ring_pkts_processed, i;
2860 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2861 int budget_org = budget;
ffb5df6c
JP
2862 struct config_param *config = &nic->config;
2863 struct mac_info *mac_control = &nic->mac_control;
1da177e4 2864
f61e0a35
SH
2865 if (unlikely(!is_s2io_card_up(nic)))
2866 return 0;
1da177e4 2867
1da177e4 2868 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9 2869 struct ring_info *ring = &mac_control->rings[i];
f61e0a35 2870 ring_pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2871 s2io_chk_rx_buffers(nic, ring);
f61e0a35
SH
2872 pkts_processed += ring_pkts_processed;
2873 budget -= ring_pkts_processed;
2874 if (budget <= 0)
1da177e4 2875 break;
1da177e4 2876 }
f61e0a35 2877 if (pkts_processed < budget_org) {
288379f0 2878 napi_complete(napi);
f61e0a35
SH
2879 /* Re enable the Rx interrupts for the ring */
2880 writeq(0, &bar0->rx_traffic_mask);
2881 readl(&bar0->rx_traffic_mask);
2882 }
2883 return pkts_processed;
1da177e4 2884}
20346722 2885
b41477f3 2886#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2887/**
b41477f3 2888 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2889 * @dev : pointer to the device structure.
2890 * Description:
b41477f3
AR
2891 * This function will be called by upper layer to check for events on the
2892 * interface in situations where interrupts are disabled. It is used for
2893 * specific in-kernel networking tasks, such as remote consoles and kernel
2894 * debugging over the network (example netdump in RedHat).
612eff0e 2895 */
612eff0e
BH
2896static void s2io_netpoll(struct net_device *dev)
2897{
4cf1653a 2898 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2899 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2900 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e 2901 int i;
ffb5df6c
JP
2902 struct config_param *config = &nic->config;
2903 struct mac_info *mac_control = &nic->mac_control;
612eff0e 2904
d796fdb7
LV
2905 if (pci_channel_offline(nic->pdev))
2906 return;
2907
612eff0e
BH
2908 disable_irq(dev->irq);
2909
612eff0e 2910 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2911 writeq(val64, &bar0->tx_traffic_int);
2912
6aa20a22 2913 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2914 * run out of skbs and will fail and eventually netpoll application such
2915 * as netdump will fail.
2916 */
2917 for (i = 0; i < config->tx_fifo_num; i++)
2918 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2919
b41477f3 2920 /* check for received packet and indicate up to network */
13d866a9
JP
2921 for (i = 0; i < config->rx_ring_num; i++) {
2922 struct ring_info *ring = &mac_control->rings[i];
2923
2924 rx_intr_handler(ring, 0);
2925 }
612eff0e
BH
2926
2927 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2928 struct ring_info *ring = &mac_control->rings[i];
2929
2930 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2931 DBG_PRINT(INFO_DBG,
2932 "%s: Out of memory in Rx Netpoll!!\n",
2933 dev->name);
612eff0e
BH
2934 break;
2935 }
2936 }
612eff0e 2937 enable_irq(dev->irq);
612eff0e
BH
2938}
2939#endif
2940
20346722 2941/**
1da177e4 2942 * rx_intr_handler - Rx interrupt handler
f61e0a35
SH
2943 * @ring_info: per ring structure.
2944 * @budget: budget for napi processing.
20346722
K
2945 * Description:
2946 * If the interrupt is because of a received frame or if the
1da177e4 2947 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2948 * called. It picks out the RxD at which place the last Rx processing had
2949 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2950 * the offset.
2951 * Return Value:
f61e0a35 2952 * No. of napi packets processed.
1da177e4 2953 */
f61e0a35 2954static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2955{
c9fcbf47 2956 int get_block, put_block;
1ee6dd77
RB
2957 struct rx_curr_get_info get_info, put_info;
2958 struct RxD_t *rxdp;
1da177e4 2959 struct sk_buff *skb;
f61e0a35 2960 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2961 int i;
d44570e4
JP
2962 struct RxD1 *rxdp1;
2963 struct RxD3 *rxdp3;
7d3d0439 2964
20346722
K
2965 get_info = ring_data->rx_curr_get_info;
2966 get_block = get_info.block_index;
1ee6dd77 2967 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2968 put_block = put_info.block_index;
da6971d8 2969 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2970
da6971d8 2971 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2972 /*
2973 * If your are next to put index then it's
2974 * FIFO full condition
2975 */
da6971d8
AR
2976 if ((get_block == put_block) &&
2977 (get_info.offset + 1) == put_info.offset) {
0425b46a 2978 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
d44570e4 2979 ring_data->dev->name);
da6971d8
AR
2980 break;
2981 }
d44570e4 2982 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
20346722 2983 if (skb == NULL) {
9e39f7c5 2984 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
0425b46a 2985 ring_data->dev->name);
f61e0a35 2986 return 0;
1da177e4 2987 }
0425b46a 2988 if (ring_data->rxd_mode == RXD_MODE_1) {
d44570e4 2989 rxdp1 = (struct RxD1 *)rxdp;
0425b46a 2990 pci_unmap_single(ring_data->pdev, (dma_addr_t)
d44570e4
JP
2991 rxdp1->Buffer0_ptr,
2992 ring_data->mtu +
2993 HEADER_ETHERNET_II_802_3_SIZE +
2994 HEADER_802_2_SIZE +
2995 HEADER_SNAP_SIZE,
2996 PCI_DMA_FROMDEVICE);
0425b46a 2997 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
d44570e4
JP
2998 rxdp3 = (struct RxD3 *)rxdp;
2999 pci_dma_sync_single_for_cpu(ring_data->pdev,
3000 (dma_addr_t)rxdp3->Buffer0_ptr,
3001 BUF0_LEN,
3002 PCI_DMA_FROMDEVICE);
3003 pci_unmap_single(ring_data->pdev,
3004 (dma_addr_t)rxdp3->Buffer2_ptr,
3005 ring_data->mtu + 4,
3006 PCI_DMA_FROMDEVICE);
da6971d8 3007 }
863c11a9 3008 prefetch(skb->data);
20346722
K
3009 rx_osm_handler(ring_data, rxdp);
3010 get_info.offset++;
da6971d8
AR
3011 ring_data->rx_curr_get_info.offset = get_info.offset;
3012 rxdp = ring_data->rx_blocks[get_block].
d44570e4 3013 rxds[get_info.offset].virt_addr;
0425b46a 3014 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 3015 get_info.offset = 0;
da6971d8 3016 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 3017 get_block++;
da6971d8
AR
3018 if (get_block == ring_data->block_count)
3019 get_block = 0;
3020 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
3021 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3022 }
1da177e4 3023
f61e0a35
SH
3024 if (ring_data->nic->config.napi) {
3025 budget--;
3026 napi_pkts++;
3027 if (!budget)
0425b46a
SH
3028 break;
3029 }
20346722 3030 pkt_cnt++;
1da177e4
LT
3031 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3032 break;
3033 }
0425b46a 3034 if (ring_data->lro) {
7d3d0439 3035 /* Clear all LRO sessions before exiting */
d44570e4 3036 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 3037 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 3038 if (lro->in_use) {
0425b46a 3039 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 3040 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
3041 clear_lro_session(lro);
3042 }
3043 }
3044 }
d44570e4 3045 return napi_pkts;
1da177e4 3046}
20346722
K
3047
3048/**
1da177e4
LT
3049 * tx_intr_handler - Transmit interrupt handler
3050 * @nic : device private variable
20346722
K
3051 * Description:
3052 * If an interrupt was raised to indicate DMA complete of the
3053 * Tx packet, this function is called. It identifies the last TxD
3054 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
3055 * DMA'ed into the NICs internal memory.
3056 * Return Value:
3057 * NONE
3058 */
3059
1ee6dd77 3060static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 3061{
1ee6dd77 3062 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 3063 struct tx_curr_get_info get_info, put_info;
3a3d5756 3064 struct sk_buff *skb = NULL;
1ee6dd77 3065 struct TxD *txdlp;
3a3d5756 3066 int pkt_cnt = 0;
2fda096d 3067 unsigned long flags = 0;
f9046eb3 3068 u8 err_mask;
ffb5df6c
JP
3069 struct stat_block *stats = nic->mac_control.stats_info;
3070 struct swStat *swstats = &stats->sw_stat;
1da177e4 3071
2fda096d 3072 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
d44570e4 3073 return;
2fda096d 3074
20346722 3075 get_info = fifo_data->tx_curr_get_info;
1ee6dd77 3076 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
d44570e4
JP
3077 txdlp = (struct TxD *)
3078 fifo_data->list_info[get_info.offset].list_virt_addr;
20346722
K
3079 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3080 (get_info.offset != put_info.offset) &&
3081 (txdlp->Host_Control)) {
3082 /* Check for TxD errors */
3083 if (txdlp->Control_1 & TXD_T_CODE) {
3084 unsigned long long err;
3085 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0 3086 if (err & 0x1) {
ffb5df6c 3087 swstats->parity_err_cnt++;
bd1034f0 3088 }
491976b2
SH
3089
3090 /* update t_code statistics */
f9046eb3 3091 err_mask = err >> 48;
d44570e4
JP
3092 switch (err_mask) {
3093 case 2:
ffb5df6c 3094 swstats->tx_buf_abort_cnt++;
491976b2
SH
3095 break;
3096
d44570e4 3097 case 3:
ffb5df6c 3098 swstats->tx_desc_abort_cnt++;
491976b2
SH
3099 break;
3100
d44570e4 3101 case 7:
ffb5df6c 3102 swstats->tx_parity_err_cnt++;
491976b2
SH
3103 break;
3104
d44570e4 3105 case 10:
ffb5df6c 3106 swstats->tx_link_loss_cnt++;
491976b2
SH
3107 break;
3108
d44570e4 3109 case 15:
ffb5df6c 3110 swstats->tx_list_proc_err_cnt++;
491976b2 3111 break;
d44570e4 3112 }
20346722 3113 }
1da177e4 3114
fed5eccd 3115 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3116 if (skb == NULL) {
2fda096d 3117 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
9e39f7c5
JP
3118 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3119 __func__);
20346722
K
3120 return;
3121 }
3a3d5756 3122 pkt_cnt++;
20346722 3123
20346722 3124 /* Updating the statistics block */
ffb5df6c 3125 swstats->mem_freed += skb->truesize;
20346722
K
3126 dev_kfree_skb_irq(skb);
3127
3128 get_info.offset++;
863c11a9
AR
3129 if (get_info.offset == get_info.fifo_len + 1)
3130 get_info.offset = 0;
d44570e4
JP
3131 txdlp = (struct TxD *)
3132 fifo_data->list_info[get_info.offset].list_virt_addr;
3133 fifo_data->tx_curr_get_info.offset = get_info.offset;
1da177e4
LT
3134 }
3135
3a3d5756 3136 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3137
3138 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3139}
3140
bd1034f0
AR
3141/**
3142 * s2io_mdio_write - Function to write in to MDIO registers
3143 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3144 * @addr : address value
3145 * @value : data value
3146 * @dev : pointer to net_device structure
3147 * Description:
3148 * This function is used to write values to the MDIO registers
3149 * NONE
3150 */
d44570e4
JP
3151static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3152 struct net_device *dev)
bd1034f0 3153{
d44570e4 3154 u64 val64;
4cf1653a 3155 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3156 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0 3157
d44570e4
JP
3158 /* address transaction */
3159 val64 = MDIO_MMD_INDX_ADDR(addr) |
3160 MDIO_MMD_DEV_ADDR(mmd_type) |
3161 MDIO_MMS_PRT_ADDR(0x0);
bd1034f0
AR
3162 writeq(val64, &bar0->mdio_control);
3163 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3164 writeq(val64, &bar0->mdio_control);
3165 udelay(100);
3166
d44570e4
JP
3167 /* Data transaction */
3168 val64 = MDIO_MMD_INDX_ADDR(addr) |
3169 MDIO_MMD_DEV_ADDR(mmd_type) |
3170 MDIO_MMS_PRT_ADDR(0x0) |
3171 MDIO_MDIO_DATA(value) |
3172 MDIO_OP(MDIO_OP_WRITE_TRANS);
bd1034f0
AR
3173 writeq(val64, &bar0->mdio_control);
3174 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3175 writeq(val64, &bar0->mdio_control);
3176 udelay(100);
3177
d44570e4
JP
3178 val64 = MDIO_MMD_INDX_ADDR(addr) |
3179 MDIO_MMD_DEV_ADDR(mmd_type) |
3180 MDIO_MMS_PRT_ADDR(0x0) |
3181 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3182 writeq(val64, &bar0->mdio_control);
3183 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3184 writeq(val64, &bar0->mdio_control);
3185 udelay(100);
bd1034f0
AR
3186}
3187
3188/**
3189 * s2io_mdio_read - Function to write in to MDIO registers
3190 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3191 * @addr : address value
3192 * @dev : pointer to net_device structure
3193 * Description:
3194 * This function is used to read values to the MDIO registers
3195 * NONE
3196 */
3197static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3198{
3199 u64 val64 = 0x0;
3200 u64 rval64 = 0x0;
4cf1653a 3201 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3202 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3203
3204 /* address transaction */
d44570e4
JP
3205 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3206 | MDIO_MMD_DEV_ADDR(mmd_type)
3207 | MDIO_MMS_PRT_ADDR(0x0));
bd1034f0
AR
3208 writeq(val64, &bar0->mdio_control);
3209 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3210 writeq(val64, &bar0->mdio_control);
3211 udelay(100);
3212
3213 /* Data transaction */
d44570e4
JP
3214 val64 = MDIO_MMD_INDX_ADDR(addr) |
3215 MDIO_MMD_DEV_ADDR(mmd_type) |
3216 MDIO_MMS_PRT_ADDR(0x0) |
3217 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3218 writeq(val64, &bar0->mdio_control);
3219 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3220 writeq(val64, &bar0->mdio_control);
3221 udelay(100);
3222
3223 /* Read the value from regs */
3224 rval64 = readq(&bar0->mdio_control);
3225 rval64 = rval64 & 0xFFFF0000;
3226 rval64 = rval64 >> 16;
3227 return rval64;
3228}
d44570e4 3229
bd1034f0
AR
3230/**
3231 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
fbfecd37 3232 * @counter : counter value to be updated
bd1034f0
AR
3233 * @flag : flag to indicate the status
3234 * @type : counter type
3235 * Description:
3236 * This function is to check the status of the xpak counters value
3237 * NONE
3238 */
3239
d44570e4
JP
3240static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3241 u16 flag, u16 type)
bd1034f0
AR
3242{
3243 u64 mask = 0x3;
3244 u64 val64;
3245 int i;
d44570e4 3246 for (i = 0; i < index; i++)
bd1034f0
AR
3247 mask = mask << 0x2;
3248
d44570e4 3249 if (flag > 0) {
bd1034f0
AR
3250 *counter = *counter + 1;
3251 val64 = *regs_stat & mask;
3252 val64 = val64 >> (index * 0x2);
3253 val64 = val64 + 1;
d44570e4
JP
3254 if (val64 == 3) {
3255 switch (type) {
bd1034f0 3256 case 1:
9e39f7c5
JP
3257 DBG_PRINT(ERR_DBG,
3258 "Take Xframe NIC out of service.\n");
3259 DBG_PRINT(ERR_DBG,
3260"Excessive temperatures may result in premature transceiver failure.\n");
d44570e4 3261 break;
bd1034f0 3262 case 2:
9e39f7c5
JP
3263 DBG_PRINT(ERR_DBG,
3264 "Take Xframe NIC out of service.\n");
3265 DBG_PRINT(ERR_DBG,
3266"Excessive bias currents may indicate imminent laser diode failure.\n");
d44570e4 3267 break;
bd1034f0 3268 case 3:
9e39f7c5
JP
3269 DBG_PRINT(ERR_DBG,
3270 "Take Xframe NIC out of service.\n");
3271 DBG_PRINT(ERR_DBG,
3272"Excessive laser output power may saturate far-end receiver.\n");
d44570e4 3273 break;
bd1034f0 3274 default:
d44570e4
JP
3275 DBG_PRINT(ERR_DBG,
3276 "Incorrect XPAK Alarm type\n");
bd1034f0
AR
3277 }
3278 val64 = 0x0;
3279 }
3280 val64 = val64 << (index * 0x2);
3281 *regs_stat = (*regs_stat & (~mask)) | (val64);
3282
3283 } else {
3284 *regs_stat = *regs_stat & (~mask);
3285 }
3286}
3287
3288/**
3289 * s2io_updt_xpak_counter - Function to update the xpak counters
3290 * @dev : pointer to net_device struct
3291 * Description:
3292 * This function is to upate the status of the xpak counters value
3293 * NONE
3294 */
3295static void s2io_updt_xpak_counter(struct net_device *dev)
3296{
3297 u16 flag = 0x0;
3298 u16 type = 0x0;
3299 u16 val16 = 0x0;
3300 u64 val64 = 0x0;
3301 u64 addr = 0x0;
3302
4cf1653a 3303 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
3304 struct stat_block *stats = sp->mac_control.stats_info;
3305 struct xpakStat *xstats = &stats->xpak_stat;
bd1034f0
AR
3306
3307 /* Check the communication with the MDIO slave */
40239396 3308 addr = MDIO_CTRL1;
bd1034f0 3309 val64 = 0x0;
40239396 3310 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
d44570e4 3311 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
9e39f7c5
JP
3312 DBG_PRINT(ERR_DBG,
3313 "ERR: MDIO slave access failed - Returned %llx\n",
3314 (unsigned long long)val64);
bd1034f0
AR
3315 return;
3316 }
3317
40239396 3318 /* Check for the expected value of control reg 1 */
d44570e4 3319 if (val64 != MDIO_CTRL1_SPEED10G) {
9e39f7c5
JP
3320 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3321 "Returned: %llx- Expected: 0x%x\n",
40239396 3322 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
bd1034f0
AR
3323 return;
3324 }
3325
3326 /* Loading the DOM register to MDIO register */
3327 addr = 0xA100;
40239396
BH
3328 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3329 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3330
3331 /* Reading the Alarm flags */
3332 addr = 0xA070;
3333 val64 = 0x0;
40239396 3334 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3335
3336 flag = CHECKBIT(val64, 0x7);
3337 type = 1;
ffb5df6c
JP
3338 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3339 &xstats->xpak_regs_stat,
d44570e4 3340 0x0, flag, type);
bd1034f0 3341
d44570e4 3342 if (CHECKBIT(val64, 0x6))
ffb5df6c 3343 xstats->alarm_transceiver_temp_low++;
bd1034f0
AR
3344
3345 flag = CHECKBIT(val64, 0x3);
3346 type = 2;
ffb5df6c
JP
3347 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3348 &xstats->xpak_regs_stat,
d44570e4 3349 0x2, flag, type);
bd1034f0 3350
d44570e4 3351 if (CHECKBIT(val64, 0x2))
ffb5df6c 3352 xstats->alarm_laser_bias_current_low++;
bd1034f0
AR
3353
3354 flag = CHECKBIT(val64, 0x1);
3355 type = 3;
ffb5df6c
JP
3356 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3357 &xstats->xpak_regs_stat,
d44570e4 3358 0x4, flag, type);
bd1034f0 3359
d44570e4 3360 if (CHECKBIT(val64, 0x0))
ffb5df6c 3361 xstats->alarm_laser_output_power_low++;
bd1034f0
AR
3362
3363 /* Reading the Warning flags */
3364 addr = 0xA074;
3365 val64 = 0x0;
40239396 3366 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0 3367
d44570e4 3368 if (CHECKBIT(val64, 0x7))
ffb5df6c 3369 xstats->warn_transceiver_temp_high++;
bd1034f0 3370
d44570e4 3371 if (CHECKBIT(val64, 0x6))
ffb5df6c 3372 xstats->warn_transceiver_temp_low++;
bd1034f0 3373
d44570e4 3374 if (CHECKBIT(val64, 0x3))
ffb5df6c 3375 xstats->warn_laser_bias_current_high++;
bd1034f0 3376
d44570e4 3377 if (CHECKBIT(val64, 0x2))
ffb5df6c 3378 xstats->warn_laser_bias_current_low++;
bd1034f0 3379
d44570e4 3380 if (CHECKBIT(val64, 0x1))
ffb5df6c 3381 xstats->warn_laser_output_power_high++;
bd1034f0 3382
d44570e4 3383 if (CHECKBIT(val64, 0x0))
ffb5df6c 3384 xstats->warn_laser_output_power_low++;
bd1034f0
AR
3385}
3386
20346722 3387/**
1da177e4 3388 * wait_for_cmd_complete - waits for a command to complete.
20346722 3389 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3390 * s2io_nic structure.
20346722
K
3391 * Description: Function that waits for a command to Write into RMAC
3392 * ADDR DATA registers to be completed and returns either success or
3393 * error depending on whether the command was complete or not.
1da177e4
LT
3394 * Return value:
3395 * SUCCESS on success and FAILURE on failure.
3396 */
3397
9fc93a41 3398static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
d44570e4 3399 int bit_state)
1da177e4 3400{
9fc93a41 3401 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3402 u64 val64;
3403
9fc93a41
SS
3404 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3405 return FAILURE;
3406
3407 do {
c92ca04b 3408 val64 = readq(addr);
9fc93a41
SS
3409 if (bit_state == S2IO_BIT_RESET) {
3410 if (!(val64 & busy_bit)) {
3411 ret = SUCCESS;
3412 break;
3413 }
3414 } else {
2d146eb1 3415 if (val64 & busy_bit) {
9fc93a41
SS
3416 ret = SUCCESS;
3417 break;
3418 }
1da177e4 3419 }
c92ca04b 3420
d44570e4 3421 if (in_interrupt())
9fc93a41 3422 mdelay(delay);
c92ca04b 3423 else
9fc93a41 3424 msleep(delay);
c92ca04b 3425
9fc93a41
SS
3426 if (++cnt >= 10)
3427 delay = 50;
3428 } while (cnt < 20);
1da177e4
LT
3429 return ret;
3430}
19a60522
SS
3431/*
3432 * check_pci_device_id - Checks if the device id is supported
3433 * @id : device id
3434 * Description: Function to check if the pci device id is supported by driver.
3435 * Return value: Actual device id if supported else PCI_ANY_ID
3436 */
3437static u16 check_pci_device_id(u16 id)
3438{
3439 switch (id) {
3440 case PCI_DEVICE_ID_HERC_WIN:
3441 case PCI_DEVICE_ID_HERC_UNI:
3442 return XFRAME_II_DEVICE;
3443 case PCI_DEVICE_ID_S2IO_UNI:
3444 case PCI_DEVICE_ID_S2IO_WIN:
3445 return XFRAME_I_DEVICE;
3446 default:
3447 return PCI_ANY_ID;
3448 }
3449}
1da177e4 3450
20346722
K
3451/**
3452 * s2io_reset - Resets the card.
1da177e4
LT
3453 * @sp : private member of the device structure.
3454 * Description: Function to Reset the card. This function then also
20346722 3455 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3456 * the card reset also resets the configuration space.
3457 * Return value:
3458 * void.
3459 */
3460
d44570e4 3461static void s2io_reset(struct s2io_nic *sp)
1da177e4 3462{
1ee6dd77 3463 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3464 u64 val64;
5e25b9dd 3465 u16 subid, pci_cmd;
19a60522
SS
3466 int i;
3467 u16 val16;
491976b2
SH
3468 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3469 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
ffb5df6c
JP
3470 struct stat_block *stats;
3471 struct swStat *swstats;
491976b2 3472
9e39f7c5 3473 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3a22813a 3474 __func__, pci_name(sp->pdev));
1da177e4 3475
0b1f7ebe 3476 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3477 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3478
1da177e4
LT
3479 val64 = SW_RESET_ALL;
3480 writeq(val64, &bar0->sw_reset);
d44570e4 3481 if (strstr(sp->product_name, "CX4"))
c92ca04b 3482 msleep(750);
19a60522
SS
3483 msleep(250);
3484 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3485
19a60522
SS
3486 /* Restore the PCI state saved during initialization. */
3487 pci_restore_state(sp->pdev);
b8a623bf 3488 pci_save_state(sp->pdev);
19a60522
SS
3489 pci_read_config_word(sp->pdev, 0x2, &val16);
3490 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3491 break;
3492 msleep(200);
3493 }
1da177e4 3494
d44570e4
JP
3495 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3496 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
19a60522
SS
3497
3498 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3499
3500 s2io_init_pci(sp);
1da177e4 3501
20346722
K
3502 /* Set swapper to enable I/O register access */
3503 s2io_set_swapper(sp);
3504
faa4f796
SH
3505 /* restore mac_addr entries */
3506 do_s2io_restore_unicast_mc(sp);
3507
cc6e7c44
RA
3508 /* Restore the MSIX table entries from local variables */
3509 restore_xmsi_data(sp);
3510
5e25b9dd 3511 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3512 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3513 /* Clear "detected parity error" bit */
303bcb4b 3514 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3515
303bcb4b
K
3516 /* Clearing PCIX Ecc status register */
3517 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3518
303bcb4b 3519 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3520 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3521 }
5e25b9dd 3522
20346722 3523 /* Reset device statistics maintained by OS */
d44570e4 3524 memset(&sp->stats, 0, sizeof(struct net_device_stats));
8a4bdbaa 3525
ffb5df6c
JP
3526 stats = sp->mac_control.stats_info;
3527 swstats = &stats->sw_stat;
3528
491976b2 3529 /* save link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3530 up_cnt = swstats->link_up_cnt;
3531 down_cnt = swstats->link_down_cnt;
3532 up_time = swstats->link_up_time;
3533 down_time = swstats->link_down_time;
3534 reset_cnt = swstats->soft_reset_cnt;
3535 mem_alloc_cnt = swstats->mem_allocated;
3536 mem_free_cnt = swstats->mem_freed;
3537 watchdog_cnt = swstats->watchdog_timer_cnt;
3538
3539 memset(stats, 0, sizeof(struct stat_block));
3540
491976b2 3541 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3542 swstats->link_up_cnt = up_cnt;
3543 swstats->link_down_cnt = down_cnt;
3544 swstats->link_up_time = up_time;
3545 swstats->link_down_time = down_time;
3546 swstats->soft_reset_cnt = reset_cnt;
3547 swstats->mem_allocated = mem_alloc_cnt;
3548 swstats->mem_freed = mem_free_cnt;
3549 swstats->watchdog_timer_cnt = watchdog_cnt;
20346722 3550
1da177e4
LT
3551 /* SXE-002: Configure link and activity LED to turn it off */
3552 subid = sp->pdev->subsystem_device;
541ae68f
K
3553 if (((subid & 0xFF) >= 0x07) &&
3554 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3555 val64 = readq(&bar0->gpio_control);
3556 val64 |= 0x0000800000000000ULL;
3557 writeq(val64, &bar0->gpio_control);
3558 val64 = 0x0411040400000000ULL;
509a2671 3559 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3560 }
3561
541ae68f 3562 /*
25985edc 3563 * Clear spurious ECC interrupts that would have occurred on
541ae68f
K
3564 * XFRAME II cards after reset.
3565 */
3566 if (sp->device_type == XFRAME_II_DEVICE) {
3567 val64 = readq(&bar0->pcc_err_reg);
3568 writeq(val64, &bar0->pcc_err_reg);
3569 }
3570
f957bcf0 3571 sp->device_enabled_once = false;
1da177e4
LT
3572}
3573
3574/**
20346722
K
3575 * s2io_set_swapper - to set the swapper controle on the card
3576 * @sp : private member of the device structure,
1da177e4 3577 * pointer to the s2io_nic structure.
20346722 3578 * Description: Function to set the swapper control on the card
1da177e4
LT
3579 * correctly depending on the 'endianness' of the system.
3580 * Return value:
3581 * SUCCESS on success and FAILURE on failure.
3582 */
3583
d44570e4 3584static int s2io_set_swapper(struct s2io_nic *sp)
1da177e4
LT
3585{
3586 struct net_device *dev = sp->dev;
1ee6dd77 3587 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3588 u64 val64, valt, valr;
3589
20346722 3590 /*
1da177e4
LT
3591 * Set proper endian settings and verify the same by reading
3592 * the PIF Feed-back register.
3593 */
3594
3595 val64 = readq(&bar0->pif_rd_swapper_fb);
3596 if (val64 != 0x0123456789ABCDEFULL) {
3597 int i = 0;
85a56498
JM
3598 static const u64 value[] = {
3599 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3600 0x8100008181000081ULL, /* FE=1, SE=0 */
3601 0x4200004242000042ULL, /* FE=0, SE=1 */
3602 0 /* FE=0, SE=0 */
3603 };
1da177e4 3604
d44570e4 3605 while (i < 4) {
1da177e4
LT
3606 writeq(value[i], &bar0->swapper_ctrl);
3607 val64 = readq(&bar0->pif_rd_swapper_fb);
3608 if (val64 == 0x0123456789ABCDEFULL)
3609 break;
3610 i++;
3611 }
3612 if (i == 4) {
9e39f7c5
JP
3613 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3614 "feedback read %llx\n",
3615 dev->name, (unsigned long long)val64);
1da177e4
LT
3616 return FAILURE;
3617 }
3618 valr = value[i];
3619 } else {
3620 valr = readq(&bar0->swapper_ctrl);
3621 }
3622
3623 valt = 0x0123456789ABCDEFULL;
3624 writeq(valt, &bar0->xmsi_address);
3625 val64 = readq(&bar0->xmsi_address);
3626
d44570e4 3627 if (val64 != valt) {
1da177e4 3628 int i = 0;
85a56498
JM
3629 static const u64 value[] = {
3630 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3631 0x0081810000818100ULL, /* FE=1, SE=0 */
3632 0x0042420000424200ULL, /* FE=0, SE=1 */
3633 0 /* FE=0, SE=0 */
3634 };
1da177e4 3635
d44570e4 3636 while (i < 4) {
1da177e4
LT
3637 writeq((value[i] | valr), &bar0->swapper_ctrl);
3638 writeq(valt, &bar0->xmsi_address);
3639 val64 = readq(&bar0->xmsi_address);
d44570e4 3640 if (val64 == valt)
1da177e4
LT
3641 break;
3642 i++;
3643 }
d44570e4 3644 if (i == 4) {
20346722 3645 unsigned long long x = val64;
9e39f7c5
JP
3646 DBG_PRINT(ERR_DBG,
3647 "Write failed, Xmsi_addr reads:0x%llx\n", x);
1da177e4
LT
3648 return FAILURE;
3649 }
3650 }
3651 val64 = readq(&bar0->swapper_ctrl);
3652 val64 &= 0xFFFF000000000000ULL;
3653
d44570e4 3654#ifdef __BIG_ENDIAN
20346722
K
3655 /*
3656 * The device by default set to a big endian format, so a
1da177e4
LT
3657 * big endian driver need not set anything.
3658 */
3659 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3660 SWAPPER_CTRL_TXP_SE |
3661 SWAPPER_CTRL_TXD_R_FE |
3662 SWAPPER_CTRL_TXD_W_FE |
3663 SWAPPER_CTRL_TXF_R_FE |
3664 SWAPPER_CTRL_RXD_R_FE |
3665 SWAPPER_CTRL_RXD_W_FE |
3666 SWAPPER_CTRL_RXF_W_FE |
3667 SWAPPER_CTRL_XMSI_FE |
3668 SWAPPER_CTRL_STATS_FE |
3669 SWAPPER_CTRL_STATS_SE);
eaae7f72 3670 if (sp->config.intr_type == INTA)
cc6e7c44 3671 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3672 writeq(val64, &bar0->swapper_ctrl);
3673#else
20346722 3674 /*
1da177e4 3675 * Initially we enable all bits to make it accessible by the
20346722 3676 * driver, then we selectively enable only those bits that
1da177e4
LT
3677 * we want to set.
3678 */
3679 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3680 SWAPPER_CTRL_TXP_SE |
3681 SWAPPER_CTRL_TXD_R_FE |
3682 SWAPPER_CTRL_TXD_R_SE |
3683 SWAPPER_CTRL_TXD_W_FE |
3684 SWAPPER_CTRL_TXD_W_SE |
3685 SWAPPER_CTRL_TXF_R_FE |
3686 SWAPPER_CTRL_RXD_R_FE |
3687 SWAPPER_CTRL_RXD_R_SE |
3688 SWAPPER_CTRL_RXD_W_FE |
3689 SWAPPER_CTRL_RXD_W_SE |
3690 SWAPPER_CTRL_RXF_W_FE |
3691 SWAPPER_CTRL_XMSI_FE |
3692 SWAPPER_CTRL_STATS_FE |
3693 SWAPPER_CTRL_STATS_SE);
eaae7f72 3694 if (sp->config.intr_type == INTA)
cc6e7c44 3695 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3696 writeq(val64, &bar0->swapper_ctrl);
3697#endif
3698 val64 = readq(&bar0->swapper_ctrl);
3699
20346722
K
3700 /*
3701 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3702 * feedback register.
3703 */
3704 val64 = readq(&bar0->pif_rd_swapper_fb);
3705 if (val64 != 0x0123456789ABCDEFULL) {
3706 /* Endian settings are incorrect, calls for another dekko. */
9e39f7c5
JP
3707 DBG_PRINT(ERR_DBG,
3708 "%s: Endian settings are wrong, feedback read %llx\n",
3709 dev->name, (unsigned long long)val64);
1da177e4
LT
3710 return FAILURE;
3711 }
3712
3713 return SUCCESS;
3714}
3715
1ee6dd77 3716static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3717{
1ee6dd77 3718 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3719 u64 val64;
3720 int ret = 0, cnt = 0;
3721
3722 do {
3723 val64 = readq(&bar0->xmsi_access);
b7b5a128 3724 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3725 break;
3726 mdelay(1);
3727 cnt++;
d44570e4 3728 } while (cnt < 5);
cc6e7c44
RA
3729 if (cnt == 5) {
3730 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3731 ret = 1;
3732 }
3733
3734 return ret;
3735}
3736
1ee6dd77 3737static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3738{
1ee6dd77 3739 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3740 u64 val64;
f61e0a35
SH
3741 int i, msix_index;
3742
f61e0a35
SH
3743 if (nic->device_type == XFRAME_I_DEVICE)
3744 return;
cc6e7c44 3745
d44570e4
JP
3746 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3747 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
cc6e7c44
RA
3748 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3749 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3750 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3751 writeq(val64, &bar0->xmsi_access);
f61e0a35 3752 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3753 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3754 __func__, msix_index);
cc6e7c44
RA
3755 continue;
3756 }
3757 }
3758}
3759
1ee6dd77 3760static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3761{
1ee6dd77 3762 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3763 u64 val64, addr, data;
f61e0a35
SH
3764 int i, msix_index;
3765
3766 if (nic->device_type == XFRAME_I_DEVICE)
3767 return;
cc6e7c44
RA
3768
3769 /* Store and display */
d44570e4
JP
3770 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3771 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
f61e0a35 3772 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3773 writeq(val64, &bar0->xmsi_access);
f61e0a35 3774 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3775 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3776 __func__, msix_index);
cc6e7c44
RA
3777 continue;
3778 }
3779 addr = readq(&bar0->xmsi_address);
3780 data = readq(&bar0->xmsi_data);
3781 if (addr && data) {
3782 nic->msix_info[i].addr = addr;
3783 nic->msix_info[i].data = data;
3784 }
3785 }
3786}
3787
1ee6dd77 3788static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3789{
1ee6dd77 3790 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3791 u64 rx_mat;
cc6e7c44
RA
3792 u16 msi_control; /* Temp variable */
3793 int ret, i, j, msix_indx = 1;
4f870320 3794 int size;
ffb5df6c
JP
3795 struct stat_block *stats = nic->mac_control.stats_info;
3796 struct swStat *swstats = &stats->sw_stat;
cc6e7c44 3797
4f870320 3798 size = nic->num_entries * sizeof(struct msix_entry);
44364a03 3799 nic->entries = kzalloc(size, GFP_KERNEL);
bd684e43 3800 if (!nic->entries) {
d44570e4
JP
3801 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3802 __func__);
ffb5df6c 3803 swstats->mem_alloc_fail_cnt++;
cc6e7c44
RA
3804 return -ENOMEM;
3805 }
ffb5df6c 3806 swstats->mem_allocated += size;
f61e0a35 3807
4f870320 3808 size = nic->num_entries * sizeof(struct s2io_msix_entry);
44364a03 3809 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
bd684e43 3810 if (!nic->s2io_entries) {
8a4bdbaa 3811 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
d44570e4 3812 __func__);
ffb5df6c 3813 swstats->mem_alloc_fail_cnt++;
cc6e7c44 3814 kfree(nic->entries);
ffb5df6c 3815 swstats->mem_freed
f61e0a35 3816 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3817 return -ENOMEM;
3818 }
ffb5df6c 3819 swstats->mem_allocated += size;
cc6e7c44 3820
ac731ab6
SH
3821 nic->entries[0].entry = 0;
3822 nic->s2io_entries[0].entry = 0;
3823 nic->s2io_entries[0].in_use = MSIX_FLG;
3824 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3825 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3826
f61e0a35
SH
3827 for (i = 1; i < nic->num_entries; i++) {
3828 nic->entries[i].entry = ((i - 1) * 8) + 1;
3829 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3830 nic->s2io_entries[i].arg = NULL;
3831 nic->s2io_entries[i].in_use = 0;
3832 }
3833
8a4bdbaa 3834 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3835 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3836 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3837 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3838 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3839 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3840 msix_indx += 8;
cc6e7c44 3841 }
8a4bdbaa 3842 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3843 readq(&bar0->rx_mat);
cc6e7c44 3844
f61e0a35 3845 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
c92ca04b 3846 /* We fail init if error or we get less vectors than min required */
cc6e7c44 3847 if (ret) {
9e39f7c5 3848 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
cc6e7c44 3849 kfree(nic->entries);
ffb5df6c
JP
3850 swstats->mem_freed += nic->num_entries *
3851 sizeof(struct msix_entry);
cc6e7c44 3852 kfree(nic->s2io_entries);
ffb5df6c
JP
3853 swstats->mem_freed += nic->num_entries *
3854 sizeof(struct s2io_msix_entry);
cc6e7c44
RA
3855 nic->entries = NULL;
3856 nic->s2io_entries = NULL;
3857 return -ENOMEM;
3858 }
3859
3860 /*
3861 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3862 * in the herc NIC. (Temp change, needs to be removed later)
3863 */
3864 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3865 msi_control |= 0x1; /* Enable MSI */
3866 pci_write_config_word(nic->pdev, 0x42, msi_control);
3867
3868 return 0;
3869}
3870
8abc4d5b 3871/* Handle software interrupt used during MSI(X) test */
33390a70 3872static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3873{
3874 struct s2io_nic *sp = dev_id;
3875
3876 sp->msi_detected = 1;
3877 wake_up(&sp->msi_wait);
3878
3879 return IRQ_HANDLED;
3880}
3881
3882/* Test interrupt path by forcing a a software IRQ */
33390a70 3883static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3884{
3885 struct pci_dev *pdev = sp->pdev;
3886 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3887 int err;
3888 u64 val64, saved64;
3889
3890 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
d44570e4 3891 sp->name, sp);
8abc4d5b
SS
3892 if (err) {
3893 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
d44570e4 3894 sp->dev->name, pci_name(pdev), pdev->irq);
8abc4d5b
SS
3895 return err;
3896 }
3897
d44570e4 3898 init_waitqueue_head(&sp->msi_wait);
8abc4d5b
SS
3899 sp->msi_detected = 0;
3900
3901 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3902 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3903 val64 |= SCHED_INT_CTRL_TIMER_EN;
3904 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3905 writeq(val64, &bar0->scheduled_int_ctrl);
3906
3907 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3908
3909 if (!sp->msi_detected) {
3910 /* MSI(X) test failed, go back to INTx mode */
2450022a 3911 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
9e39f7c5
JP
3912 "using MSI(X) during test\n",
3913 sp->dev->name, pci_name(pdev));
8abc4d5b
SS
3914
3915 err = -EOPNOTSUPP;
3916 }
3917
3918 free_irq(sp->entries[1].vector, sp);
3919
3920 writeq(saved64, &bar0->scheduled_int_ctrl);
3921
3922 return err;
3923}
18b2b7bd
SH
3924
3925static void remove_msix_isr(struct s2io_nic *sp)
3926{
3927 int i;
3928 u16 msi_control;
3929
f61e0a35 3930 for (i = 0; i < sp->num_entries; i++) {
d44570e4 3931 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
18b2b7bd
SH
3932 int vector = sp->entries[i].vector;
3933 void *arg = sp->s2io_entries[i].arg;
3934 free_irq(vector, arg);
3935 }
3936 }
3937
3938 kfree(sp->entries);
3939 kfree(sp->s2io_entries);
3940 sp->entries = NULL;
3941 sp->s2io_entries = NULL;
3942
3943 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3944 msi_control &= 0xFFFE; /* Disable MSI */
3945 pci_write_config_word(sp->pdev, 0x42, msi_control);
3946
3947 pci_disable_msix(sp->pdev);
3948}
3949
3950static void remove_inta_isr(struct s2io_nic *sp)
3951{
3952 struct net_device *dev = sp->dev;
3953
3954 free_irq(sp->pdev->irq, dev);
3955}
3956
1da177e4
LT
3957/* ********************************************************* *
3958 * Functions defined below concern the OS part of the driver *
3959 * ********************************************************* */
3960
20346722 3961/**
1da177e4
LT
3962 * s2io_open - open entry point of the driver
3963 * @dev : pointer to the device structure.
3964 * Description:
3965 * This function is the open entry point of the driver. It mainly calls a
3966 * function to allocate Rx buffers and inserts them into the buffer
20346722 3967 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3968 * Return value:
3969 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3970 * file on failure.
3971 */
3972
ac1f60db 3973static int s2io_open(struct net_device *dev)
1da177e4 3974{
4cf1653a 3975 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 3976 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
3977 int err = 0;
3978
20346722
K
3979 /*
3980 * Make sure you have link off by default every time
1da177e4
LT
3981 * Nic is initialized
3982 */
3983 netif_carrier_off(dev);
0b1f7ebe 3984 sp->last_link_state = 0;
1da177e4
LT
3985
3986 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3987 err = s2io_card_up(sp);
3988 if (err) {
1da177e4
LT
3989 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3990 dev->name);
e6a8fee2 3991 goto hw_init_failed;
1da177e4
LT
3992 }
3993
2fd37688 3994 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 3995 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3996 s2io_card_down(sp);
20346722 3997 err = -ENODEV;
e6a8fee2 3998 goto hw_init_failed;
1da177e4 3999 }
3a3d5756 4000 s2io_start_all_tx_queue(sp);
1da177e4 4001 return 0;
20346722 4002
20346722 4003hw_init_failed:
eaae7f72 4004 if (sp->config.intr_type == MSI_X) {
491976b2 4005 if (sp->entries) {
cc6e7c44 4006 kfree(sp->entries);
ffb5df6c
JP
4007 swstats->mem_freed += sp->num_entries *
4008 sizeof(struct msix_entry);
491976b2
SH
4009 }
4010 if (sp->s2io_entries) {
cc6e7c44 4011 kfree(sp->s2io_entries);
ffb5df6c
JP
4012 swstats->mem_freed += sp->num_entries *
4013 sizeof(struct s2io_msix_entry);
491976b2 4014 }
cc6e7c44 4015 }
20346722 4016 return err;
1da177e4
LT
4017}
4018
4019/**
4020 * s2io_close -close entry point of the driver
4021 * @dev : device pointer.
4022 * Description:
4023 * This is the stop entry point of the driver. It needs to undo exactly
4024 * whatever was done by the open entry point,thus it's usually referred to
4025 * as the close function.Among other things this function mainly stops the
4026 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4027 * Return value:
4028 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4029 * file on failure.
4030 */
4031
ac1f60db 4032static int s2io_close(struct net_device *dev)
1da177e4 4033{
4cf1653a 4034 struct s2io_nic *sp = netdev_priv(dev);
faa4f796
SH
4035 struct config_param *config = &sp->config;
4036 u64 tmp64;
4037 int offset;
cc6e7c44 4038
9f74ffde 4039 /* Return if the device is already closed *
d44570e4
JP
4040 * Can happen when s2io_card_up failed in change_mtu *
4041 */
9f74ffde
SH
4042 if (!is_s2io_card_up(sp))
4043 return 0;
4044
3a3d5756 4045 s2io_stop_all_tx_queue(sp);
faa4f796
SH
4046 /* delete all populated mac entries */
4047 for (offset = 1; offset < config->max_mc_addr; offset++) {
4048 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4049 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4050 do_s2io_delete_unicast_mc(sp, tmp64);
4051 }
4052
e6a8fee2 4053 s2io_card_down(sp);
cc6e7c44 4054
1da177e4
LT
4055 return 0;
4056}
4057
4058/**
4059 * s2io_xmit - Tx entry point of te driver
4060 * @skb : the socket buffer containing the Tx data.
4061 * @dev : device pointer.
4062 * Description :
4063 * This function is the Tx entry point of the driver. S2IO NIC supports
4064 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
25985edc 4065 * NOTE: when device can't queue the pkt,just the trans_start variable will
1da177e4
LT
4066 * not be upadted.
4067 * Return value:
4068 * 0 on success & 1 on failure.
4069 */
4070
61357325 4071static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4072{
4cf1653a 4073 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
4074 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4075 register u64 val64;
1ee6dd77
RB
4076 struct TxD *txdp;
4077 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4078 unsigned long flags = 0;
be3a6b02 4079 u16 vlan_tag = 0;
2fda096d 4080 struct fifo_info *fifo = NULL;
6cfc482b 4081 int do_spin_lock = 1;
75c30b13 4082 int offload_type;
6cfc482b 4083 int enable_per_list_interrupt = 0;
ffb5df6c
JP
4084 struct config_param *config = &sp->config;
4085 struct mac_info *mac_control = &sp->mac_control;
4086 struct stat_block *stats = mac_control->stats_info;
4087 struct swStat *swstats = &stats->sw_stat;
1da177e4 4088
20346722 4089 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4090
4091 if (unlikely(skb->len <= 0)) {
9e39f7c5 4092 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
491976b2 4093 dev_kfree_skb_any(skb);
6ed10654 4094 return NETDEV_TX_OK;
2fda096d 4095 }
491976b2 4096
92b84437 4097 if (!is_s2io_card_up(sp)) {
20346722 4098 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4099 dev->name);
20346722 4100 dev_kfree_skb(skb);
6ed10654 4101 return NETDEV_TX_OK;
1da177e4
LT
4102 }
4103
4104 queue = 0;
eab6d18d 4105 if (vlan_tx_tag_present(skb))
be3a6b02 4106 vlan_tag = vlan_tx_tag_get(skb);
6cfc482b
SH
4107 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4108 if (skb->protocol == htons(ETH_P_IP)) {
4109 struct iphdr *ip;
4110 struct tcphdr *th;
4111 ip = ip_hdr(skb);
4112
4113 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4114 th = (struct tcphdr *)(((unsigned char *)ip) +
d44570e4 4115 ip->ihl*4);
6cfc482b
SH
4116
4117 if (ip->protocol == IPPROTO_TCP) {
4118 queue_len = sp->total_tcp_fifos;
4119 queue = (ntohs(th->source) +
d44570e4
JP
4120 ntohs(th->dest)) &
4121 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4122 if (queue >= queue_len)
4123 queue = queue_len - 1;
4124 } else if (ip->protocol == IPPROTO_UDP) {
4125 queue_len = sp->total_udp_fifos;
4126 queue = (ntohs(th->source) +
d44570e4
JP
4127 ntohs(th->dest)) &
4128 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4129 if (queue >= queue_len)
4130 queue = queue_len - 1;
4131 queue += sp->udp_fifo_idx;
4132 if (skb->len > 1024)
4133 enable_per_list_interrupt = 1;
4134 do_spin_lock = 0;
4135 }
4136 }
4137 }
4138 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4139 /* get fifo number based on skb->priority value */
4140 queue = config->fifo_mapping
d44570e4 4141 [skb->priority & (MAX_TX_FIFOS - 1)];
6cfc482b 4142 fifo = &mac_control->fifos[queue];
3a3d5756 4143
6cfc482b
SH
4144 if (do_spin_lock)
4145 spin_lock_irqsave(&fifo->tx_lock, flags);
4146 else {
4147 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4148 return NETDEV_TX_LOCKED;
4149 }
be3a6b02 4150
3a3d5756
SH
4151 if (sp->config.multiq) {
4152 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4153 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4154 return NETDEV_TX_BUSY;
4155 }
b19fa1fa 4156 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4157 if (netif_queue_stopped(dev)) {
4158 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4159 return NETDEV_TX_BUSY;
4160 }
4161 }
4162
d44570e4
JP
4163 put_off = (u16)fifo->tx_curr_put_info.offset;
4164 get_off = (u16)fifo->tx_curr_get_info.offset;
4165 txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
20346722 4166
2fda096d 4167 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4168 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4169 if (txdp->Host_Control ||
d44570e4 4170 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4171 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4172 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4173 dev_kfree_skb(skb);
2fda096d 4174 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4175 return NETDEV_TX_OK;
1da177e4 4176 }
0b1f7ebe 4177
75c30b13 4178 offload_type = s2io_offload_type(skb);
75c30b13 4179 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4180 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4181 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4182 }
84fa7933 4183 if (skb->ip_summed == CHECKSUM_PARTIAL) {
d44570e4
JP
4184 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4185 TXD_TX_CKO_TCP_EN |
4186 TXD_TX_CKO_UDP_EN);
1da177e4 4187 }
fed5eccd
AR
4188 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4189 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4190 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4191 if (enable_per_list_interrupt)
4192 if (put_off & (queue_len >> 5))
4193 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4194 if (vlan_tag) {
be3a6b02
K
4195 txdp->Control_2 |= TXD_VLAN_ENABLE;
4196 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4197 }
4198
e743d313 4199 frg_len = skb_headlen(skb);
75c30b13 4200 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
4201 int ufo_size;
4202
75c30b13 4203 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
4204 ufo_size &= ~7;
4205 txdp->Control_1 |= TXD_UFO_EN;
4206 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4207 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4208#ifdef __BIG_ENDIAN
3459feb8 4209 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
2fda096d 4210 fifo->ufo_in_band_v[put_off] =
d44570e4 4211 (__force u64)skb_shinfo(skb)->ip6_frag_id;
fed5eccd 4212#else
2fda096d 4213 fifo->ufo_in_band_v[put_off] =
d44570e4 4214 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
fed5eccd 4215#endif
2fda096d 4216 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
fed5eccd 4217 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
d44570e4
JP
4218 fifo->ufo_in_band_v,
4219 sizeof(u64),
4220 PCI_DMA_TODEVICE);
8d8bb39b 4221 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25 4222 goto pci_map_failed;
fed5eccd 4223 txdp++;
fed5eccd 4224 }
1da177e4 4225
d44570e4
JP
4226 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4227 frg_len, PCI_DMA_TODEVICE);
8d8bb39b 4228 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25
VP
4229 goto pci_map_failed;
4230
d44570e4 4231 txdp->Host_Control = (unsigned long)skb;
fed5eccd 4232 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 4233 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4234 txdp->Control_1 |= TXD_UFO_EN;
4235
4236 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4237 /* For fragmented SKB. */
4238 for (i = 0; i < frg_cnt; i++) {
4239 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
4240 /* A '0' length fragment will be ignored */
4241 if (!frag->size)
4242 continue;
1da177e4 4243 txdp++;
d44570e4
JP
4244 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4245 frag->page_offset,
4246 frag->size,
4247 PCI_DMA_TODEVICE);
efd51b5c 4248 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 4249 if (offload_type == SKB_GSO_UDP)
fed5eccd 4250 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
4251 }
4252 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4253
75c30b13 4254 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4255 frg_cnt++; /* as Txd0 was used for inband header */
4256
1da177e4 4257 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4258 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4259 writeq(val64, &tx_fifo->TxDL_Pointer);
4260
4261 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4262 TX_FIFO_LAST_LIST);
75c30b13 4263 if (offload_type)
fed5eccd 4264 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4265
1da177e4
LT
4266 writeq(val64, &tx_fifo->List_Control);
4267
303bcb4b
K
4268 mmiowb();
4269
1da177e4 4270 put_off++;
2fda096d 4271 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4272 put_off = 0;
2fda096d 4273 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4274
4275 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4276 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
ffb5df6c 4277 swstats->fifo_full_cnt++;
1da177e4
LT
4278 DBG_PRINT(TX_DBG,
4279 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4280 put_off, get_off);
3a3d5756 4281 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4282 }
ffb5df6c 4283 swstats->mem_allocated += skb->truesize;
2fda096d 4284 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4285
f6f4bfa3
SH
4286 if (sp->config.intr_type == MSI_X)
4287 tx_intr_handler(fifo);
4288
6ed10654 4289 return NETDEV_TX_OK;
ffb5df6c 4290
491abf25 4291pci_map_failed:
ffb5df6c 4292 swstats->pci_map_fail_cnt++;
3a3d5756 4293 s2io_stop_tx_queue(sp, fifo->fifo_no);
ffb5df6c 4294 swstats->mem_freed += skb->truesize;
491abf25 4295 dev_kfree_skb(skb);
2fda096d 4296 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4297 return NETDEV_TX_OK;
1da177e4
LT
4298}
4299
25fff88e
K
4300static void
4301s2io_alarm_handle(unsigned long data)
4302{
1ee6dd77 4303 struct s2io_nic *sp = (struct s2io_nic *)data;
8116f3cf 4304 struct net_device *dev = sp->dev;
25fff88e 4305
8116f3cf 4306 s2io_handle_errors(dev);
25fff88e
K
4307 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4308}
4309
7d12e780 4310static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4311{
1ee6dd77
RB
4312 struct ring_info *ring = (struct ring_info *)dev_id;
4313 struct s2io_nic *sp = ring->nic;
f61e0a35 4314 struct XENA_dev_config __iomem *bar0 = sp->bar0;
cc6e7c44 4315
f61e0a35 4316 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4317 return IRQ_HANDLED;
92b84437 4318
f61e0a35 4319 if (sp->config.napi) {
1a79d1c3
AV
4320 u8 __iomem *addr = NULL;
4321 u8 val8 = 0;
f61e0a35 4322
1a79d1c3 4323 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4324 addr += (7 - ring->ring_no);
4325 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4326 writeb(val8, addr);
4327 val8 = readb(addr);
288379f0 4328 napi_schedule(&ring->napi);
f61e0a35
SH
4329 } else {
4330 rx_intr_handler(ring, 0);
8d8bb39b 4331 s2io_chk_rx_buffers(sp, ring);
f61e0a35 4332 }
7d3d0439 4333
cc6e7c44
RA
4334 return IRQ_HANDLED;
4335}
4336
7d12e780 4337static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4338{
ac731ab6
SH
4339 int i;
4340 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4341 struct s2io_nic *sp = fifos->nic;
4342 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4343 struct config_param *config = &sp->config;
4344 u64 reason;
cc6e7c44 4345
ac731ab6
SH
4346 if (unlikely(!is_s2io_card_up(sp)))
4347 return IRQ_NONE;
4348
4349 reason = readq(&bar0->general_int_status);
4350 if (unlikely(reason == S2IO_MINUS_ONE))
4351 /* Nothing much can be done. Get out */
92b84437 4352 return IRQ_HANDLED;
92b84437 4353
01e16faa
SH
4354 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4355 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
ac731ab6 4356
01e16faa
SH
4357 if (reason & GEN_INTR_TXPIC)
4358 s2io_txpic_intr_handle(sp);
ac731ab6 4359
01e16faa
SH
4360 if (reason & GEN_INTR_TXTRAFFIC)
4361 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
ac731ab6 4362
01e16faa
SH
4363 for (i = 0; i < config->tx_fifo_num; i++)
4364 tx_intr_handler(&fifos[i]);
ac731ab6 4365
01e16faa
SH
4366 writeq(sp->general_int_mask, &bar0->general_int_mask);
4367 readl(&bar0->general_int_status);
4368 return IRQ_HANDLED;
4369 }
4370 /* The interrupt was not raised by us */
4371 return IRQ_NONE;
cc6e7c44 4372}
ac731ab6 4373
1ee6dd77 4374static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4375{
1ee6dd77 4376 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4377 u64 val64;
4378
4379 val64 = readq(&bar0->pic_int_status);
4380 if (val64 & PIC_INT_GPIO) {
4381 val64 = readq(&bar0->gpio_int_reg);
4382 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4383 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4384 /*
4385 * This is unstable state so clear both up/down
4386 * interrupt and adapter to re-evaluate the link state.
4387 */
d44570e4 4388 val64 |= GPIO_INT_REG_LINK_DOWN;
a371a07d
K
4389 val64 |= GPIO_INT_REG_LINK_UP;
4390 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4391 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4392 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4393 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4394 writeq(val64, &bar0->gpio_int_mask);
d44570e4 4395 } else if (val64 & GPIO_INT_REG_LINK_UP) {
c92ca04b 4396 val64 = readq(&bar0->adapter_status);
d44570e4 4397 /* Enable Adapter */
19a60522
SS
4398 val64 = readq(&bar0->adapter_control);
4399 val64 |= ADAPTER_CNTL_EN;
4400 writeq(val64, &bar0->adapter_control);
4401 val64 |= ADAPTER_LED_ON;
4402 writeq(val64, &bar0->adapter_control);
4403 if (!sp->device_enabled_once)
4404 sp->device_enabled_once = 1;
c92ca04b 4405
19a60522
SS
4406 s2io_link(sp, LINK_UP);
4407 /*
4408 * unmask link down interrupt and mask link-up
4409 * intr
4410 */
4411 val64 = readq(&bar0->gpio_int_mask);
4412 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4413 val64 |= GPIO_INT_MASK_LINK_UP;
4414 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4415
d44570e4 4416 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
c92ca04b 4417 val64 = readq(&bar0->adapter_status);
19a60522
SS
4418 s2io_link(sp, LINK_DOWN);
4419 /* Link is down so unmaks link up interrupt */
4420 val64 = readq(&bar0->gpio_int_mask);
4421 val64 &= ~GPIO_INT_MASK_LINK_UP;
4422 val64 |= GPIO_INT_MASK_LINK_DOWN;
4423 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4424
4425 /* turn off LED */
4426 val64 = readq(&bar0->adapter_control);
d44570e4 4427 val64 = val64 & (~ADAPTER_LED_ON);
ac1f90d6 4428 writeq(val64, &bar0->adapter_control);
a371a07d
K
4429 }
4430 }
c92ca04b 4431 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4432}
4433
8116f3cf
SS
4434/**
4435 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4436 * @value: alarm bits
4437 * @addr: address value
4438 * @cnt: counter variable
4439 * Description: Check for alarm and increment the counter
4440 * Return Value:
4441 * 1 - if alarm bit set
4442 * 0 - if alarm bit is not set
4443 */
d44570e4
JP
4444static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4445 unsigned long long *cnt)
8116f3cf
SS
4446{
4447 u64 val64;
4448 val64 = readq(addr);
d44570e4 4449 if (val64 & value) {
8116f3cf
SS
4450 writeq(val64, addr);
4451 (*cnt)++;
4452 return 1;
4453 }
4454 return 0;
4455
4456}
4457
4458/**
4459 * s2io_handle_errors - Xframe error indication handler
4460 * @nic: device private variable
4461 * Description: Handle alarms such as loss of link, single or
4462 * double ECC errors, critical and serious errors.
4463 * Return Value:
4464 * NONE
4465 */
d44570e4 4466static void s2io_handle_errors(void *dev_id)
8116f3cf 4467{
d44570e4 4468 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4469 struct s2io_nic *sp = netdev_priv(dev);
8116f3cf 4470 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d44570e4 4471 u64 temp64 = 0, val64 = 0;
8116f3cf
SS
4472 int i = 0;
4473
4474 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4475 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4476
92b84437 4477 if (!is_s2io_card_up(sp))
8116f3cf
SS
4478 return;
4479
4480 if (pci_channel_offline(sp->pdev))
4481 return;
4482
4483 memset(&sw_stat->ring_full_cnt, 0,
d44570e4 4484 sizeof(sw_stat->ring_full_cnt));
8116f3cf
SS
4485
4486 /* Handling the XPAK counters update */
d44570e4 4487 if (stats->xpak_timer_count < 72000) {
8116f3cf
SS
4488 /* waiting for an hour */
4489 stats->xpak_timer_count++;
4490 } else {
4491 s2io_updt_xpak_counter(dev);
4492 /* reset the count to zero */
4493 stats->xpak_timer_count = 0;
4494 }
4495
4496 /* Handling link status change error Intr */
4497 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4498 val64 = readq(&bar0->mac_rmac_err_reg);
4499 writeq(val64, &bar0->mac_rmac_err_reg);
4500 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4501 schedule_work(&sp->set_link_task);
4502 }
4503
4504 /* In case of a serious error, the device will be Reset. */
4505 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
d44570e4 4506 &sw_stat->serious_err_cnt))
8116f3cf
SS
4507 goto reset;
4508
4509 /* Check for data parity error */
4510 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
d44570e4 4511 &sw_stat->parity_err_cnt))
8116f3cf
SS
4512 goto reset;
4513
4514 /* Check for ring full counter */
4515 if (sp->device_type == XFRAME_II_DEVICE) {
4516 val64 = readq(&bar0->ring_bump_counter1);
d44570e4
JP
4517 for (i = 0; i < 4; i++) {
4518 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf
SS
4519 temp64 >>= 64 - ((i+1)*16);
4520 sw_stat->ring_full_cnt[i] += temp64;
4521 }
4522
4523 val64 = readq(&bar0->ring_bump_counter2);
d44570e4
JP
4524 for (i = 0; i < 4; i++) {
4525 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf 4526 temp64 >>= 64 - ((i+1)*16);
d44570e4 4527 sw_stat->ring_full_cnt[i+4] += temp64;
8116f3cf
SS
4528 }
4529 }
4530
4531 val64 = readq(&bar0->txdma_int_status);
4532 /*check for pfc_err*/
4533 if (val64 & TXDMA_PFC_INT) {
d44570e4
JP
4534 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4535 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4536 PFC_PCIX_ERR,
4537 &bar0->pfc_err_reg,
4538 &sw_stat->pfc_err_cnt))
8116f3cf 4539 goto reset;
d44570e4
JP
4540 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4541 &bar0->pfc_err_reg,
4542 &sw_stat->pfc_err_cnt);
8116f3cf
SS
4543 }
4544
4545 /*check for tda_err*/
4546 if (val64 & TXDMA_TDA_INT) {
d44570e4
JP
4547 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4548 TDA_SM0_ERR_ALARM |
4549 TDA_SM1_ERR_ALARM,
4550 &bar0->tda_err_reg,
4551 &sw_stat->tda_err_cnt))
8116f3cf
SS
4552 goto reset;
4553 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
d44570e4
JP
4554 &bar0->tda_err_reg,
4555 &sw_stat->tda_err_cnt);
8116f3cf
SS
4556 }
4557 /*check for pcc_err*/
4558 if (val64 & TXDMA_PCC_INT) {
d44570e4
JP
4559 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4560 PCC_N_SERR | PCC_6_COF_OV_ERR |
4561 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4562 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4563 PCC_TXB_ECC_DB_ERR,
4564 &bar0->pcc_err_reg,
4565 &sw_stat->pcc_err_cnt))
8116f3cf
SS
4566 goto reset;
4567 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
d44570e4
JP
4568 &bar0->pcc_err_reg,
4569 &sw_stat->pcc_err_cnt);
8116f3cf
SS
4570 }
4571
4572 /*check for tti_err*/
4573 if (val64 & TXDMA_TTI_INT) {
d44570e4
JP
4574 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4575 &bar0->tti_err_reg,
4576 &sw_stat->tti_err_cnt))
8116f3cf
SS
4577 goto reset;
4578 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
d44570e4
JP
4579 &bar0->tti_err_reg,
4580 &sw_stat->tti_err_cnt);
8116f3cf
SS
4581 }
4582
4583 /*check for lso_err*/
4584 if (val64 & TXDMA_LSO_INT) {
d44570e4
JP
4585 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4586 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4587 &bar0->lso_err_reg,
4588 &sw_stat->lso_err_cnt))
8116f3cf
SS
4589 goto reset;
4590 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
d44570e4
JP
4591 &bar0->lso_err_reg,
4592 &sw_stat->lso_err_cnt);
8116f3cf
SS
4593 }
4594
4595 /*check for tpa_err*/
4596 if (val64 & TXDMA_TPA_INT) {
d44570e4
JP
4597 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4598 &bar0->tpa_err_reg,
4599 &sw_stat->tpa_err_cnt))
8116f3cf 4600 goto reset;
d44570e4
JP
4601 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4602 &bar0->tpa_err_reg,
4603 &sw_stat->tpa_err_cnt);
8116f3cf
SS
4604 }
4605
4606 /*check for sm_err*/
4607 if (val64 & TXDMA_SM_INT) {
d44570e4
JP
4608 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4609 &bar0->sm_err_reg,
4610 &sw_stat->sm_err_cnt))
8116f3cf
SS
4611 goto reset;
4612 }
4613
4614 val64 = readq(&bar0->mac_int_status);
4615 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4616 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
d44570e4
JP
4617 &bar0->mac_tmac_err_reg,
4618 &sw_stat->mac_tmac_err_cnt))
8116f3cf 4619 goto reset;
d44570e4
JP
4620 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4621 TMAC_DESC_ECC_SG_ERR |
4622 TMAC_DESC_ECC_DB_ERR,
4623 &bar0->mac_tmac_err_reg,
4624 &sw_stat->mac_tmac_err_cnt);
8116f3cf
SS
4625 }
4626
4627 val64 = readq(&bar0->xgxs_int_status);
4628 if (val64 & XGXS_INT_STATUS_TXGXS) {
4629 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
d44570e4
JP
4630 &bar0->xgxs_txgxs_err_reg,
4631 &sw_stat->xgxs_txgxs_err_cnt))
8116f3cf
SS
4632 goto reset;
4633 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
d44570e4
JP
4634 &bar0->xgxs_txgxs_err_reg,
4635 &sw_stat->xgxs_txgxs_err_cnt);
8116f3cf
SS
4636 }
4637
4638 val64 = readq(&bar0->rxdma_int_status);
4639 if (val64 & RXDMA_INT_RC_INT_M) {
d44570e4
JP
4640 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4641 RC_FTC_ECC_DB_ERR |
4642 RC_PRCn_SM_ERR_ALARM |
4643 RC_FTC_SM_ERR_ALARM,
4644 &bar0->rc_err_reg,
4645 &sw_stat->rc_err_cnt))
8116f3cf 4646 goto reset;
d44570e4
JP
4647 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4648 RC_FTC_ECC_SG_ERR |
4649 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4650 &sw_stat->rc_err_cnt);
4651 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4652 PRC_PCI_AB_WR_Rn |
4653 PRC_PCI_AB_F_WR_Rn,
4654 &bar0->prc_pcix_err_reg,
4655 &sw_stat->prc_pcix_err_cnt))
8116f3cf 4656 goto reset;
d44570e4
JP
4657 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4658 PRC_PCI_DP_WR_Rn |
4659 PRC_PCI_DP_F_WR_Rn,
4660 &bar0->prc_pcix_err_reg,
4661 &sw_stat->prc_pcix_err_cnt);
8116f3cf
SS
4662 }
4663
4664 if (val64 & RXDMA_INT_RPA_INT_M) {
4665 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
d44570e4
JP
4666 &bar0->rpa_err_reg,
4667 &sw_stat->rpa_err_cnt))
8116f3cf
SS
4668 goto reset;
4669 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
d44570e4
JP
4670 &bar0->rpa_err_reg,
4671 &sw_stat->rpa_err_cnt);
8116f3cf
SS
4672 }
4673
4674 if (val64 & RXDMA_INT_RDA_INT_M) {
d44570e4
JP
4675 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4676 RDA_FRM_ECC_DB_N_AERR |
4677 RDA_SM1_ERR_ALARM |
4678 RDA_SM0_ERR_ALARM |
4679 RDA_RXD_ECC_DB_SERR,
4680 &bar0->rda_err_reg,
4681 &sw_stat->rda_err_cnt))
8116f3cf 4682 goto reset;
d44570e4
JP
4683 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4684 RDA_FRM_ECC_SG_ERR |
4685 RDA_MISC_ERR |
4686 RDA_PCIX_ERR,
4687 &bar0->rda_err_reg,
4688 &sw_stat->rda_err_cnt);
8116f3cf
SS
4689 }
4690
4691 if (val64 & RXDMA_INT_RTI_INT_M) {
d44570e4
JP
4692 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4693 &bar0->rti_err_reg,
4694 &sw_stat->rti_err_cnt))
8116f3cf
SS
4695 goto reset;
4696 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
d44570e4
JP
4697 &bar0->rti_err_reg,
4698 &sw_stat->rti_err_cnt);
8116f3cf
SS
4699 }
4700
4701 val64 = readq(&bar0->mac_int_status);
4702 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4703 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
d44570e4
JP
4704 &bar0->mac_rmac_err_reg,
4705 &sw_stat->mac_rmac_err_cnt))
8116f3cf 4706 goto reset;
d44570e4
JP
4707 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4708 RMAC_SINGLE_ECC_ERR |
4709 RMAC_DOUBLE_ECC_ERR,
4710 &bar0->mac_rmac_err_reg,
4711 &sw_stat->mac_rmac_err_cnt);
8116f3cf
SS
4712 }
4713
4714 val64 = readq(&bar0->xgxs_int_status);
4715 if (val64 & XGXS_INT_STATUS_RXGXS) {
4716 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
d44570e4
JP
4717 &bar0->xgxs_rxgxs_err_reg,
4718 &sw_stat->xgxs_rxgxs_err_cnt))
8116f3cf
SS
4719 goto reset;
4720 }
4721
4722 val64 = readq(&bar0->mc_int_status);
d44570e4
JP
4723 if (val64 & MC_INT_STATUS_MC_INT) {
4724 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4725 &bar0->mc_err_reg,
4726 &sw_stat->mc_err_cnt))
8116f3cf
SS
4727 goto reset;
4728
4729 /* Handling Ecc errors */
4730 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4731 writeq(val64, &bar0->mc_err_reg);
4732 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4733 sw_stat->double_ecc_errs++;
4734 if (sp->device_type != XFRAME_II_DEVICE) {
4735 /*
4736 * Reset XframeI only if critical error
4737 */
4738 if (val64 &
d44570e4
JP
4739 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4740 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4741 goto reset;
4742 }
8116f3cf
SS
4743 } else
4744 sw_stat->single_ecc_errs++;
4745 }
4746 }
4747 return;
4748
4749reset:
3a3d5756 4750 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4751 schedule_work(&sp->rst_timer_task);
4752 sw_stat->soft_reset_cnt++;
8116f3cf
SS
4753}
4754
1da177e4
LT
4755/**
4756 * s2io_isr - ISR handler of the device .
4757 * @irq: the irq of the device.
4758 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4759 * Description: This function is the ISR handler of the device. It
4760 * identifies the reason for the interrupt and calls the relevant
4761 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4762 * recv buffers, if their numbers are below the panic value which is
4763 * presently set to 25% of the original number of rcv buffers allocated.
4764 * Return value:
20346722 4765 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4766 * IRQ_NONE: will be returned if interrupt is not from our device
4767 */
7d12e780 4768static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4 4769{
d44570e4 4770 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4771 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4772 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4773 int i;
19a60522 4774 u64 reason = 0;
1ee6dd77 4775 struct mac_info *mac_control;
1da177e4
LT
4776 struct config_param *config;
4777
d796fdb7
LV
4778 /* Pretend we handled any irq's from a disconnected card */
4779 if (pci_channel_offline(sp->pdev))
4780 return IRQ_NONE;
4781
596c5c97 4782 if (!is_s2io_card_up(sp))
92b84437 4783 return IRQ_NONE;
92b84437 4784
1da177e4 4785 config = &sp->config;
ffb5df6c 4786 mac_control = &sp->mac_control;
1da177e4 4787
20346722 4788 /*
1da177e4
LT
4789 * Identify the cause for interrupt and call the appropriate
4790 * interrupt handler. Causes for the interrupt could be;
4791 * 1. Rx of packet.
4792 * 2. Tx complete.
4793 * 3. Link down.
1da177e4
LT
4794 */
4795 reason = readq(&bar0->general_int_status);
4796
d44570e4
JP
4797 if (unlikely(reason == S2IO_MINUS_ONE))
4798 return IRQ_HANDLED; /* Nothing much can be done. Get out */
5d3213cc 4799
d44570e4
JP
4800 if (reason &
4801 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
596c5c97
SS
4802 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4803
4804 if (config->napi) {
4805 if (reason & GEN_INTR_RXTRAFFIC) {
288379f0 4806 napi_schedule(&sp->napi);
f61e0a35
SH
4807 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4808 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4809 readl(&bar0->rx_traffic_int);
db874e65 4810 }
596c5c97
SS
4811 } else {
4812 /*
4813 * rx_traffic_int reg is an R1 register, writing all 1's
4814 * will ensure that the actual interrupt causing bit
4815 * get's cleared and hence a read can be avoided.
4816 */
4817 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4818 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97 4819
13d866a9
JP
4820 for (i = 0; i < config->rx_ring_num; i++) {
4821 struct ring_info *ring = &mac_control->rings[i];
4822
4823 rx_intr_handler(ring, 0);
4824 }
db874e65 4825 }
596c5c97 4826
db874e65 4827 /*
596c5c97 4828 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4829 * will ensure that the actual interrupt causing bit get's
4830 * cleared and hence a read can be avoided.
4831 */
596c5c97
SS
4832 if (reason & GEN_INTR_TXTRAFFIC)
4833 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4834
596c5c97
SS
4835 for (i = 0; i < config->tx_fifo_num; i++)
4836 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4837
596c5c97
SS
4838 if (reason & GEN_INTR_TXPIC)
4839 s2io_txpic_intr_handle(sp);
fe113638 4840
596c5c97
SS
4841 /*
4842 * Reallocate the buffers from the interrupt handler itself.
4843 */
4844 if (!config->napi) {
13d866a9
JP
4845 for (i = 0; i < config->rx_ring_num; i++) {
4846 struct ring_info *ring = &mac_control->rings[i];
4847
4848 s2io_chk_rx_buffers(sp, ring);
4849 }
596c5c97
SS
4850 }
4851 writeq(sp->general_int_mask, &bar0->general_int_mask);
4852 readl(&bar0->general_int_status);
20346722 4853
596c5c97 4854 return IRQ_HANDLED;
db874e65 4855
d44570e4 4856 } else if (!reason) {
596c5c97
SS
4857 /* The interrupt was not raised by us */
4858 return IRQ_NONE;
4859 }
db874e65 4860
1da177e4
LT
4861 return IRQ_HANDLED;
4862}
4863
7ba013ac
K
4864/**
4865 * s2io_updt_stats -
4866 */
1ee6dd77 4867static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4868{
1ee6dd77 4869 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4870 u64 val64;
4871 int cnt = 0;
4872
92b84437 4873 if (is_s2io_card_up(sp)) {
7ba013ac
K
4874 /* Apprx 30us on a 133 MHz bus */
4875 val64 = SET_UPDT_CLICKS(10) |
4876 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4877 writeq(val64, &bar0->stat_cfg);
4878 do {
4879 udelay(100);
4880 val64 = readq(&bar0->stat_cfg);
b7b5a128 4881 if (!(val64 & s2BIT(0)))
7ba013ac
K
4882 break;
4883 cnt++;
4884 if (cnt == 5)
4885 break; /* Updt failed */
d44570e4 4886 } while (1);
8a4bdbaa 4887 }
7ba013ac
K
4888}
4889
1da177e4 4890/**
20346722 4891 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4892 * @dev : pointer to the device structure.
4893 * Description:
20346722 4894 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4895 * structure and returns a pointer to the same.
4896 * Return value:
4897 * pointer to the updated net_device_stats structure.
4898 */
ac1f60db 4899static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4900{
4cf1653a 4901 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
4902 struct mac_info *mac_control = &sp->mac_control;
4903 struct stat_block *stats = mac_control->stats_info;
4a490432 4904 u64 delta;
1da177e4 4905
7ba013ac
K
4906 /* Configure Stats for immediate updt */
4907 s2io_updt_stats(sp);
4908
4a490432
JM
4909 /* A device reset will cause the on-adapter statistics to be zero'ed.
4910 * This can be done while running by changing the MTU. To prevent the
4911 * system from having the stats zero'ed, the driver keeps a copy of the
4912 * last update to the system (which is also zero'ed on reset). This
4913 * enables the driver to accurately know the delta between the last
4914 * update and the current update.
4915 */
4916 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4917 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4918 sp->stats.rx_packets += delta;
4919 dev->stats.rx_packets += delta;
4920
4921 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4922 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4923 sp->stats.tx_packets += delta;
4924 dev->stats.tx_packets += delta;
4925
4926 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4927 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4928 sp->stats.rx_bytes += delta;
4929 dev->stats.rx_bytes += delta;
4930
4931 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4932 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4933 sp->stats.tx_bytes += delta;
4934 dev->stats.tx_bytes += delta;
4935
4936 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4937 sp->stats.rx_errors += delta;
4938 dev->stats.rx_errors += delta;
4939
4940 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4941 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4942 sp->stats.tx_errors += delta;
4943 dev->stats.tx_errors += delta;
4944
4945 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4946 sp->stats.rx_dropped += delta;
4947 dev->stats.rx_dropped += delta;
4948
4949 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4950 sp->stats.tx_dropped += delta;
4951 dev->stats.tx_dropped += delta;
4952
4953 /* The adapter MAC interprets pause frames as multicast packets, but
4954 * does not pass them up. This erroneously increases the multicast
4955 * packet count and needs to be deducted when the multicast frame count
4956 * is queried.
4957 */
4958 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4959 le32_to_cpu(stats->rmac_vld_mcst_frms);
4960 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4961 delta -= sp->stats.multicast;
4962 sp->stats.multicast += delta;
4963 dev->stats.multicast += delta;
1da177e4 4964
4a490432
JM
4965 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4966 le32_to_cpu(stats->rmac_usized_frms)) +
4967 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4968 sp->stats.rx_length_errors += delta;
4969 dev->stats.rx_length_errors += delta;
13d866a9 4970
4a490432
JM
4971 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4972 sp->stats.rx_crc_errors += delta;
4973 dev->stats.rx_crc_errors += delta;
0425b46a 4974
d44570e4 4975 return &dev->stats;
1da177e4
LT
4976}
4977
4978/**
4979 * s2io_set_multicast - entry point for multicast address enable/disable.
4980 * @dev : pointer to the device structure
4981 * Description:
20346722
K
4982 * This function is a driver entry point which gets called by the kernel
4983 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4984 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4985 * determine, if multicast address must be enabled or if promiscuous mode
4986 * is to be disabled etc.
4987 * Return value:
4988 * void.
4989 */
4990
4991static void s2io_set_multicast(struct net_device *dev)
4992{
4993 int i, j, prev_cnt;
22bedad3 4994 struct netdev_hw_addr *ha;
4cf1653a 4995 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4996 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4997 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
d44570e4 4998 0xfeffffffffffULL;
faa4f796 4999 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 5000 void __iomem *add;
faa4f796 5001 struct config_param *config = &sp->config;
1da177e4
LT
5002
5003 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
5004 /* Enable all Multicast addresses */
5005 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
5006 &bar0->rmac_addr_data0_mem);
5007 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5008 &bar0->rmac_addr_data1_mem);
5009 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5010 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5011 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
5012 writeq(val64, &bar0->rmac_addr_cmd_mem);
5013 /* Wait till command completes */
c92ca04b 5014 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5015 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5016 S2IO_BIT_RESET);
1da177e4
LT
5017
5018 sp->m_cast_flg = 1;
faa4f796 5019 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
5020 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5021 /* Disable all Multicast addresses */
5022 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5023 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
5024 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5025 &bar0->rmac_addr_data1_mem);
1da177e4 5026 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5027 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5028 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
1da177e4
LT
5029 writeq(val64, &bar0->rmac_addr_cmd_mem);
5030 /* Wait till command completes */
c92ca04b 5031 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5032 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5033 S2IO_BIT_RESET);
1da177e4
LT
5034
5035 sp->m_cast_flg = 0;
5036 sp->all_multi_pos = 0;
5037 }
5038
5039 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5040 /* Put the NIC into promiscuous mode */
5041 add = &bar0->mac_cfg;
5042 val64 = readq(&bar0->mac_cfg);
5043 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5044
5045 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 5046 writel((u32)val64, add);
1da177e4
LT
5047 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5048 writel((u32) (val64 >> 32), (add + 4));
5049
926930b2
SS
5050 if (vlan_tag_strip != 1) {
5051 val64 = readq(&bar0->rx_pa_cfg);
5052 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5053 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 5054 sp->vlan_strip_flag = 0;
926930b2
SS
5055 }
5056
1da177e4
LT
5057 val64 = readq(&bar0->mac_cfg);
5058 sp->promisc_flg = 1;
776bd20f 5059 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
5060 dev->name);
5061 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5062 /* Remove the NIC from promiscuous mode */
5063 add = &bar0->mac_cfg;
5064 val64 = readq(&bar0->mac_cfg);
5065 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5066
5067 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 5068 writel((u32)val64, add);
1da177e4
LT
5069 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5070 writel((u32) (val64 >> 32), (add + 4));
5071
926930b2
SS
5072 if (vlan_tag_strip != 0) {
5073 val64 = readq(&bar0->rx_pa_cfg);
5074 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5075 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 5076 sp->vlan_strip_flag = 1;
926930b2
SS
5077 }
5078
1da177e4
LT
5079 val64 = readq(&bar0->mac_cfg);
5080 sp->promisc_flg = 0;
9e39f7c5 5081 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
1da177e4
LT
5082 }
5083
5084 /* Update individual M_CAST address list */
4cd24eaf
JP
5085 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5086 if (netdev_mc_count(dev) >
faa4f796 5087 (config->max_mc_addr - config->max_mac_addr)) {
9e39f7c5
JP
5088 DBG_PRINT(ERR_DBG,
5089 "%s: No more Rx filters can be added - "
5090 "please enable ALL_MULTI instead\n",
1da177e4 5091 dev->name);
1da177e4
LT
5092 return;
5093 }
5094
5095 prev_cnt = sp->mc_addr_count;
4cd24eaf 5096 sp->mc_addr_count = netdev_mc_count(dev);
1da177e4
LT
5097
5098 /* Clear out the previous list of Mc in the H/W. */
5099 for (i = 0; i < prev_cnt; i++) {
5100 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5101 &bar0->rmac_addr_data0_mem);
5102 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5103 &bar0->rmac_addr_data1_mem);
1da177e4 5104 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5105 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5106 RMAC_ADDR_CMD_MEM_OFFSET
5107 (config->mc_start_offset + i);
1da177e4
LT
5108 writeq(val64, &bar0->rmac_addr_cmd_mem);
5109
5110 /* Wait for command completes */
c92ca04b 5111 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5112 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5113 S2IO_BIT_RESET)) {
9e39f7c5
JP
5114 DBG_PRINT(ERR_DBG,
5115 "%s: Adding Multicasts failed\n",
5116 dev->name);
1da177e4
LT
5117 return;
5118 }
5119 }
5120
5121 /* Create the new Rx filter list and update the same in H/W. */
5508590c 5122 i = 0;
22bedad3 5123 netdev_for_each_mc_addr(ha, dev) {
a7a80d5a 5124 mac_addr = 0;
1da177e4 5125 for (j = 0; j < ETH_ALEN; j++) {
22bedad3 5126 mac_addr |= ha->addr[j];
1da177e4
LT
5127 mac_addr <<= 8;
5128 }
5129 mac_addr >>= 8;
5130 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5131 &bar0->rmac_addr_data0_mem);
5132 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5133 &bar0->rmac_addr_data1_mem);
1da177e4 5134 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5135 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5136 RMAC_ADDR_CMD_MEM_OFFSET
5137 (i + config->mc_start_offset);
1da177e4
LT
5138 writeq(val64, &bar0->rmac_addr_cmd_mem);
5139
5140 /* Wait for command completes */
c92ca04b 5141 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5142 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5143 S2IO_BIT_RESET)) {
9e39f7c5
JP
5144 DBG_PRINT(ERR_DBG,
5145 "%s: Adding Multicasts failed\n",
5146 dev->name);
1da177e4
LT
5147 return;
5148 }
5508590c 5149 i++;
1da177e4
LT
5150 }
5151 }
5152}
5153
faa4f796
SH
5154/* read from CAM unicast & multicast addresses and store it in
5155 * def_mac_addr structure
5156 */
dac499f9 5157static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
faa4f796
SH
5158{
5159 int offset;
5160 u64 mac_addr = 0x0;
5161 struct config_param *config = &sp->config;
5162
5163 /* store unicast & multicast mac addresses */
5164 for (offset = 0; offset < config->max_mc_addr; offset++) {
5165 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5166 /* if read fails disable the entry */
5167 if (mac_addr == FAILURE)
5168 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5169 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5170 }
5171}
5172
5173/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5174static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5175{
5176 int offset;
5177 struct config_param *config = &sp->config;
5178 /* restore unicast mac address */
5179 for (offset = 0; offset < config->max_mac_addr; offset++)
5180 do_s2io_prog_unicast(sp->dev,
d44570e4 5181 sp->def_mac_addr[offset].mac_addr);
faa4f796
SH
5182
5183 /* restore multicast mac address */
5184 for (offset = config->mc_start_offset;
d44570e4 5185 offset < config->max_mc_addr; offset++)
faa4f796
SH
5186 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5187}
5188
5189/* add a multicast MAC address to CAM */
5190static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5191{
5192 int i;
5193 u64 mac_addr = 0;
5194 struct config_param *config = &sp->config;
5195
5196 for (i = 0; i < ETH_ALEN; i++) {
5197 mac_addr <<= 8;
5198 mac_addr |= addr[i];
5199 }
5200 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5201 return SUCCESS;
5202
5203 /* check if the multicast mac already preset in CAM */
5204 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5205 u64 tmp64;
5206 tmp64 = do_s2io_read_unicast_mc(sp, i);
5207 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5208 break;
5209
5210 if (tmp64 == mac_addr)
5211 return SUCCESS;
5212 }
5213 if (i == config->max_mc_addr) {
5214 DBG_PRINT(ERR_DBG,
d44570e4 5215 "CAM full no space left for multicast MAC\n");
faa4f796
SH
5216 return FAILURE;
5217 }
5218 /* Update the internal structure with this new mac address */
5219 do_s2io_copy_mac_addr(sp, i, mac_addr);
5220
d44570e4 5221 return do_s2io_add_mac(sp, mac_addr, i);
faa4f796
SH
5222}
5223
5224/* add MAC address to CAM */
5225static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5226{
5227 u64 val64;
5228 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5229
5230 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
d44570e4 5231 &bar0->rmac_addr_data0_mem);
2fd37688 5232
d44570e4 5233 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2fd37688
SS
5234 RMAC_ADDR_CMD_MEM_OFFSET(off);
5235 writeq(val64, &bar0->rmac_addr_cmd_mem);
5236
5237 /* Wait till command completes */
5238 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5239 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5240 S2IO_BIT_RESET)) {
faa4f796 5241 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5242 return FAILURE;
5243 }
5244 return SUCCESS;
5245}
faa4f796
SH
5246/* deletes a specified unicast/multicast mac entry from CAM */
5247static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5248{
5249 int offset;
5250 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5251 struct config_param *config = &sp->config;
5252
5253 for (offset = 1;
d44570e4 5254 offset < config->max_mc_addr; offset++) {
faa4f796
SH
5255 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5256 if (tmp64 == addr) {
5257 /* disable the entry by writing 0xffffffffffffULL */
5258 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5259 return FAILURE;
5260 /* store the new mac list from CAM */
5261 do_s2io_store_unicast_mc(sp);
5262 return SUCCESS;
5263 }
5264 }
5265 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
d44570e4 5266 (unsigned long long)addr);
faa4f796
SH
5267 return FAILURE;
5268}
5269
5270/* read mac entries from CAM */
5271static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5272{
5273 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5274 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5275
5276 /* read mac addr */
d44570e4 5277 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796
SH
5278 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5279 writeq(val64, &bar0->rmac_addr_cmd_mem);
5280
5281 /* Wait till command completes */
5282 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5283 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5284 S2IO_BIT_RESET)) {
faa4f796
SH
5285 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5286 return FAILURE;
5287 }
5288 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4
JP
5289
5290 return tmp64 >> 16;
faa4f796 5291}
2fd37688
SS
5292
5293/**
5294 * s2io_set_mac_addr driver entry point
5295 */
faa4f796 5296
2fd37688
SS
5297static int s2io_set_mac_addr(struct net_device *dev, void *p)
5298{
5299 struct sockaddr *addr = p;
5300
5301 if (!is_valid_ether_addr(addr->sa_data))
5302 return -EINVAL;
5303
5304 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5305
5306 /* store the MAC address in CAM */
d44570e4 5307 return do_s2io_prog_unicast(dev, dev->dev_addr);
2fd37688 5308}
1da177e4 5309/**
2fd37688 5310 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5311 * @dev : pointer to the device structure.
5312 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5313 * Description : This procedure will program the Xframe to receive
1da177e4 5314 * frames with new Mac Address
20346722 5315 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5316 * as defined in errno.h file on failure.
5317 */
faa4f796 5318
2fd37688 5319static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5320{
4cf1653a 5321 struct s2io_nic *sp = netdev_priv(dev);
2fd37688 5322 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5323 int i;
faa4f796
SH
5324 u64 tmp64;
5325 struct config_param *config = &sp->config;
1da177e4 5326
20346722 5327 /*
d44570e4
JP
5328 * Set the new MAC address as the new unicast filter and reflect this
5329 * change on the device address registered with the OS. It will be
5330 * at offset 0.
5331 */
1da177e4
LT
5332 for (i = 0; i < ETH_ALEN; i++) {
5333 mac_addr <<= 8;
5334 mac_addr |= addr[i];
2fd37688
SS
5335 perm_addr <<= 8;
5336 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5337 }
5338
2fd37688
SS
5339 /* check if the dev_addr is different than perm_addr */
5340 if (mac_addr == perm_addr)
d8d70caf
SS
5341 return SUCCESS;
5342
faa4f796
SH
5343 /* check if the mac already preset in CAM */
5344 for (i = 1; i < config->max_mac_addr; i++) {
5345 tmp64 = do_s2io_read_unicast_mc(sp, i);
5346 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5347 break;
5348
5349 if (tmp64 == mac_addr) {
5350 DBG_PRINT(INFO_DBG,
d44570e4
JP
5351 "MAC addr:0x%llx already present in CAM\n",
5352 (unsigned long long)mac_addr);
faa4f796
SH
5353 return SUCCESS;
5354 }
5355 }
5356 if (i == config->max_mac_addr) {
5357 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5358 return FAILURE;
5359 }
d8d70caf 5360 /* Update the internal structure with this new mac address */
faa4f796 5361 do_s2io_copy_mac_addr(sp, i, mac_addr);
d44570e4
JP
5362
5363 return do_s2io_add_mac(sp, mac_addr, i);
1da177e4
LT
5364}
5365
5366/**
20346722 5367 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
5368 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5369 * @info: pointer to the structure with parameters given by ethtool to set
5370 * link information.
5371 * Description:
20346722 5372 * The function sets different link parameters provided by the user onto
1da177e4
LT
5373 * the NIC.
5374 * Return value:
5375 * 0 on success.
d44570e4 5376 */
1da177e4
LT
5377
5378static int s2io_ethtool_sset(struct net_device *dev,
5379 struct ethtool_cmd *info)
5380{
4cf1653a 5381 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5382 if ((info->autoneg == AUTONEG_ENABLE) ||
d44570e4
JP
5383 (info->speed != SPEED_10000) ||
5384 (info->duplex != DUPLEX_FULL))
1da177e4
LT
5385 return -EINVAL;
5386 else {
5387 s2io_close(sp->dev);
5388 s2io_open(sp->dev);
5389 }
5390
5391 return 0;
5392}
5393
5394/**
20346722 5395 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
5396 * @sp : private member of the device structure, pointer to the
5397 * s2io_nic structure.
5398 * @info : pointer to the structure with parameters given by ethtool
5399 * to return link information.
5400 * Description:
5401 * Returns link specific information like speed, duplex etc.. to ethtool.
5402 * Return value :
5403 * return 0 on success.
5404 */
5405
5406static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5407{
4cf1653a 5408 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5409 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5410 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5411 info->port = PORT_FIBRE;
1a7eb72b
SS
5412
5413 /* info->transceiver */
5414 info->transceiver = XCVR_EXTERNAL;
1da177e4
LT
5415
5416 if (netif_carrier_ok(sp->dev)) {
5417 info->speed = 10000;
5418 info->duplex = DUPLEX_FULL;
5419 } else {
5420 info->speed = -1;
5421 info->duplex = -1;
5422 }
5423
5424 info->autoneg = AUTONEG_DISABLE;
5425 return 0;
5426}
5427
5428/**
20346722
K
5429 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5430 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5431 * s2io_nic structure.
5432 * @info : pointer to the structure with parameters given by ethtool to
5433 * return driver information.
5434 * Description:
5435 * Returns driver specefic information like name, version etc.. to ethtool.
5436 * Return value:
5437 * void
5438 */
5439
5440static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5441 struct ethtool_drvinfo *info)
5442{
4cf1653a 5443 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5444
dbc2309d
JL
5445 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5446 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5447 strncpy(info->fw_version, "", sizeof(info->fw_version));
5448 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5449 info->regdump_len = XENA_REG_SPACE;
5450 info->eedump_len = XENA_EEPROM_SPACE;
1da177e4
LT
5451}
5452
5453/**
5454 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5455 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5456 * s2io_nic structure.
20346722 5457 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
5458 * dumping the registers.
5459 * @reg_space: The input argumnet into which all the registers are dumped.
5460 * Description:
5461 * Dumps the entire register space of xFrame NIC into the user given
5462 * buffer area.
5463 * Return value :
5464 * void .
d44570e4 5465 */
1da177e4
LT
5466
5467static void s2io_ethtool_gregs(struct net_device *dev,
5468 struct ethtool_regs *regs, void *space)
5469{
5470 int i;
5471 u64 reg;
d44570e4 5472 u8 *reg_space = (u8 *)space;
4cf1653a 5473 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5474
5475 regs->len = XENA_REG_SPACE;
5476 regs->version = sp->pdev->subsystem_device;
5477
5478 for (i = 0; i < regs->len; i += 8) {
5479 reg = readq(sp->bar0 + i);
5480 memcpy((reg_space + i), &reg, 8);
5481 }
5482}
5483
034e3450 5484/*
5485 * s2io_set_led - control NIC led
d44570e4 5486 */
034e3450 5487static void s2io_set_led(struct s2io_nic *sp, bool on)
1da177e4 5488{
1ee6dd77 5489 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5490 u16 subid = sp->pdev->subsystem_device;
5491 u64 val64;
1da177e4 5492
541ae68f 5493 if ((sp->device_type == XFRAME_II_DEVICE) ||
d44570e4 5494 ((subid & 0xFF) >= 0x07)) {
1da177e4 5495 val64 = readq(&bar0->gpio_control);
034e3450 5496 if (on)
5497 val64 |= GPIO_CTRL_GPIO_0;
5498 else
5499 val64 &= ~GPIO_CTRL_GPIO_0;
5500
1da177e4
LT
5501 writeq(val64, &bar0->gpio_control);
5502 } else {
5503 val64 = readq(&bar0->adapter_control);
034e3450 5504 if (on)
5505 val64 |= ADAPTER_LED_ON;
5506 else
5507 val64 &= ~ADAPTER_LED_ON;
5508
1da177e4
LT
5509 writeq(val64, &bar0->adapter_control);
5510 }
5511
1da177e4
LT
5512}
5513
5514/**
034e3450 5515 * s2io_ethtool_set_led - To physically identify the nic on the system.
5516 * @dev : network device
5517 * @state: led setting
5518 *
1da177e4 5519 * Description: Used to physically identify the NIC on the system.
20346722 5520 * The Link LED will blink for a time specified by the user for
1da177e4 5521 * identification.
20346722 5522 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4 5523 * identification is possible only if it's link is up.
1da177e4
LT
5524 */
5525
034e3450 5526static int s2io_ethtool_set_led(struct net_device *dev,
5527 enum ethtool_phys_id_state state)
1da177e4 5528{
4cf1653a 5529 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5530 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5531 u16 subid = sp->pdev->subsystem_device;
1da177e4 5532
d44570e4 5533 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
034e3450 5534 u64 val64 = readq(&bar0->adapter_control);
1da177e4 5535 if (!(val64 & ADAPTER_CNTL_EN)) {
6cef2b8e 5536 pr_err("Adapter Link down, cannot blink LED\n");
034e3450 5537 return -EAGAIN;
1da177e4
LT
5538 }
5539 }
1da177e4 5540
034e3450 5541 switch (state) {
5542 case ETHTOOL_ID_ACTIVE:
5543 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
fce55922 5544 return 1; /* cycle on/off once per second */
034e3450 5545
5546 case ETHTOOL_ID_ON:
5547 s2io_set_led(sp, true);
5548 break;
5549
5550 case ETHTOOL_ID_OFF:
5551 s2io_set_led(sp, false);
5552 break;
5553
5554 case ETHTOOL_ID_INACTIVE:
5555 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5556 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
1da177e4
LT
5557 }
5558
5559 return 0;
5560}
5561
0cec35eb 5562static void s2io_ethtool_gringparam(struct net_device *dev,
d44570e4 5563 struct ethtool_ringparam *ering)
0cec35eb 5564{
4cf1653a 5565 struct s2io_nic *sp = netdev_priv(dev);
d44570e4 5566 int i, tx_desc_count = 0, rx_desc_count = 0;
0cec35eb 5567
1853e2e1 5568 if (sp->rxd_mode == RXD_MODE_1) {
0cec35eb 5569 ering->rx_max_pending = MAX_RX_DESC_1;
1853e2e1
JM
5570 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5571 } else {
0cec35eb 5572 ering->rx_max_pending = MAX_RX_DESC_2;
1853e2e1
JM
5573 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5574 }
0cec35eb 5575
1853e2e1 5576 ering->rx_mini_max_pending = 0;
0cec35eb 5577 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5578
1853e2e1 5579 for (i = 0; i < sp->config.rx_ring_num; i++)
0cec35eb 5580 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
0cec35eb 5581 ering->rx_pending = rx_desc_count;
0cec35eb 5582 ering->rx_jumbo_pending = rx_desc_count;
1853e2e1
JM
5583 ering->rx_mini_pending = 0;
5584
5585 for (i = 0; i < sp->config.tx_fifo_num; i++)
5586 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5587 ering->tx_pending = tx_desc_count;
5588 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
0cec35eb
SH
5589}
5590
1da177e4
LT
5591/**
5592 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
5593 * @sp : private member of the device structure, which is a pointer to the
5594 * s2io_nic structure.
1da177e4
LT
5595 * @ep : pointer to the structure with pause parameters given by ethtool.
5596 * Description:
5597 * Returns the Pause frame generation and reception capability of the NIC.
5598 * Return value:
5599 * void
5600 */
5601static void s2io_ethtool_getpause_data(struct net_device *dev,
5602 struct ethtool_pauseparam *ep)
5603{
5604 u64 val64;
4cf1653a 5605 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5606 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5607
5608 val64 = readq(&bar0->rmac_pause_cfg);
5609 if (val64 & RMAC_PAUSE_GEN_ENABLE)
f957bcf0 5610 ep->tx_pause = true;
1da177e4 5611 if (val64 & RMAC_PAUSE_RX_ENABLE)
f957bcf0
TK
5612 ep->rx_pause = true;
5613 ep->autoneg = false;
1da177e4
LT
5614}
5615
5616/**
5617 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5618 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5619 * s2io_nic structure.
5620 * @ep : pointer to the structure with pause parameters given by ethtool.
5621 * Description:
5622 * It can be used to set or reset Pause frame generation or reception
5623 * support of the NIC.
5624 * Return value:
5625 * int, returns 0 on Success
5626 */
5627
5628static int s2io_ethtool_setpause_data(struct net_device *dev,
d44570e4 5629 struct ethtool_pauseparam *ep)
1da177e4
LT
5630{
5631 u64 val64;
4cf1653a 5632 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5633 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5634
5635 val64 = readq(&bar0->rmac_pause_cfg);
5636 if (ep->tx_pause)
5637 val64 |= RMAC_PAUSE_GEN_ENABLE;
5638 else
5639 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5640 if (ep->rx_pause)
5641 val64 |= RMAC_PAUSE_RX_ENABLE;
5642 else
5643 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5644 writeq(val64, &bar0->rmac_pause_cfg);
5645 return 0;
5646}
5647
5648/**
5649 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5650 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5651 * s2io_nic structure.
5652 * @off : offset at which the data must be written
5653 * @data : Its an output parameter where the data read at the given
20346722 5654 * offset is stored.
1da177e4 5655 * Description:
20346722 5656 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5657 * read data.
5658 * NOTE: Will allow to read only part of the EEPROM visible through the
5659 * I2C bus.
5660 * Return value:
5661 * -1 on failure and 0 on success.
5662 */
5663
5664#define S2IO_DEV_ID 5
d44570e4 5665static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
1da177e4
LT
5666{
5667 int ret = -1;
5668 u32 exit_cnt = 0;
5669 u64 val64;
1ee6dd77 5670 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5671
ad4ebed0 5672 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5673 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5674 I2C_CONTROL_ADDR(off) |
5675 I2C_CONTROL_BYTE_CNT(0x3) |
5676 I2C_CONTROL_READ |
5677 I2C_CONTROL_CNTL_START;
ad4ebed0 5678 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5679
ad4ebed0 5680 while (exit_cnt < 5) {
5681 val64 = readq(&bar0->i2c_control);
5682 if (I2C_CONTROL_CNTL_END(val64)) {
5683 *data = I2C_CONTROL_GET_DATA(val64);
5684 ret = 0;
5685 break;
5686 }
5687 msleep(50);
5688 exit_cnt++;
1da177e4 5689 }
1da177e4
LT
5690 }
5691
ad4ebed0 5692 if (sp->device_type == XFRAME_II_DEVICE) {
5693 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5694 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5695 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5696 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5697 val64 |= SPI_CONTROL_REQ;
5698 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5699 while (exit_cnt < 5) {
5700 val64 = readq(&bar0->spi_control);
5701 if (val64 & SPI_CONTROL_NACK) {
5702 ret = 1;
5703 break;
5704 } else if (val64 & SPI_CONTROL_DONE) {
5705 *data = readq(&bar0->spi_data);
5706 *data &= 0xffffff;
5707 ret = 0;
5708 break;
5709 }
5710 msleep(50);
5711 exit_cnt++;
5712 }
5713 }
1da177e4
LT
5714 return ret;
5715}
5716
5717/**
5718 * write_eeprom - actually writes the relevant part of the data value.
5719 * @sp : private member of the device structure, which is a pointer to the
5720 * s2io_nic structure.
5721 * @off : offset at which the data must be written
5722 * @data : The data that is to be written
20346722 5723 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5724 * the Eeprom. (max of 3)
5725 * Description:
5726 * Actually writes the relevant part of the data value into the Eeprom
5727 * through the I2C bus.
5728 * Return value:
5729 * 0 on success, -1 on failure.
5730 */
5731
d44570e4 5732static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
1da177e4
LT
5733{
5734 int exit_cnt = 0, ret = -1;
5735 u64 val64;
1ee6dd77 5736 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5737
ad4ebed0 5738 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5739 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5740 I2C_CONTROL_ADDR(off) |
5741 I2C_CONTROL_BYTE_CNT(cnt) |
5742 I2C_CONTROL_SET_DATA((u32)data) |
5743 I2C_CONTROL_CNTL_START;
ad4ebed0 5744 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5745
5746 while (exit_cnt < 5) {
5747 val64 = readq(&bar0->i2c_control);
5748 if (I2C_CONTROL_CNTL_END(val64)) {
5749 if (!(val64 & I2C_CONTROL_NACK))
5750 ret = 0;
5751 break;
5752 }
5753 msleep(50);
5754 exit_cnt++;
5755 }
5756 }
1da177e4 5757
ad4ebed0 5758 if (sp->device_type == XFRAME_II_DEVICE) {
5759 int write_cnt = (cnt == 8) ? 0 : cnt;
d44570e4 5760 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
ad4ebed0 5761
5762 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5763 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5764 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5765 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5766 val64 |= SPI_CONTROL_REQ;
5767 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5768 while (exit_cnt < 5) {
5769 val64 = readq(&bar0->spi_control);
5770 if (val64 & SPI_CONTROL_NACK) {
5771 ret = 1;
5772 break;
5773 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5774 ret = 0;
ad4ebed0 5775 break;
5776 }
5777 msleep(50);
5778 exit_cnt++;
1da177e4 5779 }
1da177e4 5780 }
1da177e4
LT
5781 return ret;
5782}
1ee6dd77 5783static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5784{
b41477f3
AR
5785 u8 *vpd_data;
5786 u8 data;
9c179780 5787 int i = 0, cnt, len, fail = 0;
9dc737a7 5788 int vpd_addr = 0x80;
ffb5df6c 5789 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
9dc737a7
AR
5790
5791 if (nic->device_type == XFRAME_II_DEVICE) {
5792 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5793 vpd_addr = 0x80;
d44570e4 5794 } else {
9dc737a7
AR
5795 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5796 vpd_addr = 0x50;
5797 }
19a60522 5798 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5799
b41477f3 5800 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945 5801 if (!vpd_data) {
ffb5df6c 5802 swstats->mem_alloc_fail_cnt++;
b41477f3 5803 return;
c53d4945 5804 }
ffb5df6c 5805 swstats->mem_allocated += 256;
b41477f3 5806
d44570e4 5807 for (i = 0; i < 256; i += 4) {
9dc737a7
AR
5808 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5809 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5810 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
d44570e4 5811 for (cnt = 0; cnt < 5; cnt++) {
9dc737a7
AR
5812 msleep(2);
5813 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5814 if (data == 0x80)
5815 break;
5816 }
5817 if (cnt >= 5) {
5818 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5819 fail = 1;
5820 break;
5821 }
5822 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5823 (u32 *)&vpd_data[i]);
5824 }
19a60522 5825
d44570e4 5826 if (!fail) {
19a60522 5827 /* read serial number of adapter */
9c179780 5828 for (cnt = 0; cnt < 252; cnt++) {
d44570e4 5829 if ((vpd_data[cnt] == 'S') &&
9c179780
KV
5830 (vpd_data[cnt+1] == 'N')) {
5831 len = vpd_data[cnt+2];
5832 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5833 memcpy(nic->serial_num,
5834 &vpd_data[cnt + 3],
5835 len);
5836 memset(nic->serial_num+len,
5837 0,
5838 VPD_STRING_LEN-len);
5839 break;
5840 }
19a60522
SS
5841 }
5842 }
5843 }
5844
9c179780
KV
5845 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5846 len = vpd_data[1];
5847 memcpy(nic->product_name, &vpd_data[3], len);
5848 nic->product_name[len] = 0;
5849 }
b41477f3 5850 kfree(vpd_data);
ffb5df6c 5851 swstats->mem_freed += 256;
9dc737a7
AR
5852}
5853
1da177e4
LT
5854/**
5855 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5856 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5857 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5858 * containing all relevant information.
5859 * @data_buf : user defined value to be written into Eeprom.
5860 * Description: Reads the values stored in the Eeprom at given offset
5861 * for a given length. Stores these values int the input argument data
5862 * buffer 'data_buf' and returns these to the caller (ethtool.)
5863 * Return value:
5864 * int 0 on success
5865 */
5866
5867static int s2io_ethtool_geeprom(struct net_device *dev,
d44570e4 5868 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5869{
ad4ebed0 5870 u32 i, valid;
5871 u64 data;
4cf1653a 5872 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5873
5874 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5875
5876 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5877 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5878
5879 for (i = 0; i < eeprom->len; i += 4) {
5880 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5881 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5882 return -EFAULT;
5883 }
5884 valid = INV(data);
5885 memcpy((data_buf + i), &valid, 4);
5886 }
5887 return 0;
5888}
5889
5890/**
5891 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5892 * @sp : private member of the device structure, which is a pointer to the
5893 * s2io_nic structure.
20346722 5894 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5895 * containing all relevant information.
5896 * @data_buf ; user defined value to be written into Eeprom.
5897 * Description:
5898 * Tries to write the user provided value in the Eeprom, at the offset
5899 * given by the user.
5900 * Return value:
5901 * 0 on success, -EFAULT on failure.
5902 */
5903
5904static int s2io_ethtool_seeprom(struct net_device *dev,
5905 struct ethtool_eeprom *eeprom,
d44570e4 5906 u8 *data_buf)
1da177e4
LT
5907{
5908 int len = eeprom->len, cnt = 0;
ad4ebed0 5909 u64 valid = 0, data;
4cf1653a 5910 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5911
5912 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5913 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5914 "ETHTOOL_WRITE_EEPROM Err: "
5915 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5916 (sp->pdev->vendor | (sp->pdev->device << 16)),
5917 eeprom->magic);
1da177e4
LT
5918 return -EFAULT;
5919 }
5920
5921 while (len) {
d44570e4
JP
5922 data = (u32)data_buf[cnt] & 0x000000FF;
5923 if (data)
5924 valid = (u32)(data << 24);
5925 else
1da177e4
LT
5926 valid = data;
5927
5928 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5929 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5930 "ETHTOOL_WRITE_EEPROM Err: "
5931 "Cannot write into the specified offset\n");
1da177e4
LT
5932 return -EFAULT;
5933 }
5934 cnt++;
5935 len--;
5936 }
5937
5938 return 0;
5939}
5940
5941/**
20346722
K
5942 * s2io_register_test - reads and writes into all clock domains.
5943 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5944 * s2io_nic structure.
5945 * @data : variable that returns the result of each of the test conducted b
5946 * by the driver.
5947 * Description:
5948 * Read and write into all clock domains. The NIC has 3 clock domains,
5949 * see that registers in all the three regions are accessible.
5950 * Return value:
5951 * 0 on success.
5952 */
5953
d44570e4 5954static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 5955{
1ee6dd77 5956 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5957 u64 val64 = 0, exp_val;
1da177e4
LT
5958 int fail = 0;
5959
20346722
K
5960 val64 = readq(&bar0->pif_rd_swapper_fb);
5961 if (val64 != 0x123456789abcdefULL) {
1da177e4 5962 fail = 1;
9e39f7c5 5963 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
1da177e4
LT
5964 }
5965
5966 val64 = readq(&bar0->rmac_pause_cfg);
5967 if (val64 != 0xc000ffff00000000ULL) {
5968 fail = 1;
9e39f7c5 5969 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
1da177e4
LT
5970 }
5971
5972 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5973 if (sp->device_type == XFRAME_II_DEVICE)
5974 exp_val = 0x0404040404040404ULL;
5975 else
5976 exp_val = 0x0808080808080808ULL;
5977 if (val64 != exp_val) {
1da177e4 5978 fail = 1;
9e39f7c5 5979 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
1da177e4
LT
5980 }
5981
5982 val64 = readq(&bar0->xgxs_efifo_cfg);
5983 if (val64 != 0x000000001923141EULL) {
5984 fail = 1;
9e39f7c5 5985 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
1da177e4
LT
5986 }
5987
5988 val64 = 0x5A5A5A5A5A5A5A5AULL;
5989 writeq(val64, &bar0->xmsi_data);
5990 val64 = readq(&bar0->xmsi_data);
5991 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5992 fail = 1;
9e39f7c5 5993 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
1da177e4
LT
5994 }
5995
5996 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5997 writeq(val64, &bar0->xmsi_data);
5998 val64 = readq(&bar0->xmsi_data);
5999 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
6000 fail = 1;
9e39f7c5 6001 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
1da177e4
LT
6002 }
6003
6004 *data = fail;
ad4ebed0 6005 return fail;
1da177e4
LT
6006}
6007
6008/**
20346722 6009 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
6010 * @sp : private member of the device structure, which is a pointer to the
6011 * s2io_nic structure.
6012 * @data:variable that returns the result of each of the test conducted by
6013 * the driver.
6014 * Description:
20346722 6015 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
6016 * register.
6017 * Return value:
6018 * 0 on success.
6019 */
6020
d44570e4 6021static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6022{
6023 int fail = 0;
ad4ebed0 6024 u64 ret_data, org_4F0, org_7F0;
6025 u8 saved_4F0 = 0, saved_7F0 = 0;
6026 struct net_device *dev = sp->dev;
1da177e4
LT
6027
6028 /* Test Write Error at offset 0 */
ad4ebed0 6029 /* Note that SPI interface allows write access to all areas
6030 * of EEPROM. Hence doing all negative testing only for Xframe I.
6031 */
6032 if (sp->device_type == XFRAME_I_DEVICE)
6033 if (!write_eeprom(sp, 0, 0, 3))
6034 fail = 1;
6035
6036 /* Save current values at offsets 0x4F0 and 0x7F0 */
6037 if (!read_eeprom(sp, 0x4F0, &org_4F0))
6038 saved_4F0 = 1;
6039 if (!read_eeprom(sp, 0x7F0, &org_7F0))
6040 saved_7F0 = 1;
1da177e4
LT
6041
6042 /* Test Write at offset 4f0 */
ad4ebed0 6043 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
6044 fail = 1;
6045 if (read_eeprom(sp, 0x4F0, &ret_data))
6046 fail = 1;
6047
ad4ebed0 6048 if (ret_data != 0x012345) {
26b7625c 6049 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
d44570e4
JP
6050 "Data written %llx Data read %llx\n",
6051 dev->name, (unsigned long long)0x12345,
6052 (unsigned long long)ret_data);
1da177e4 6053 fail = 1;
ad4ebed0 6054 }
1da177e4
LT
6055
6056 /* Reset the EEPROM data go FFFF */
ad4ebed0 6057 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
6058
6059 /* Test Write Request Error at offset 0x7c */
ad4ebed0 6060 if (sp->device_type == XFRAME_I_DEVICE)
6061 if (!write_eeprom(sp, 0x07C, 0, 3))
6062 fail = 1;
1da177e4 6063
ad4ebed0 6064 /* Test Write Request at offset 0x7f0 */
6065 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 6066 fail = 1;
ad4ebed0 6067 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
6068 fail = 1;
6069
ad4ebed0 6070 if (ret_data != 0x012345) {
26b7625c 6071 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
d44570e4
JP
6072 "Data written %llx Data read %llx\n",
6073 dev->name, (unsigned long long)0x12345,
6074 (unsigned long long)ret_data);
1da177e4 6075 fail = 1;
ad4ebed0 6076 }
1da177e4
LT
6077
6078 /* Reset the EEPROM data go FFFF */
ad4ebed0 6079 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 6080
ad4ebed0 6081 if (sp->device_type == XFRAME_I_DEVICE) {
6082 /* Test Write Error at offset 0x80 */
6083 if (!write_eeprom(sp, 0x080, 0, 3))
6084 fail = 1;
1da177e4 6085
ad4ebed0 6086 /* Test Write Error at offset 0xfc */
6087 if (!write_eeprom(sp, 0x0FC, 0, 3))
6088 fail = 1;
1da177e4 6089
ad4ebed0 6090 /* Test Write Error at offset 0x100 */
6091 if (!write_eeprom(sp, 0x100, 0, 3))
6092 fail = 1;
1da177e4 6093
ad4ebed0 6094 /* Test Write Error at offset 4ec */
6095 if (!write_eeprom(sp, 0x4EC, 0, 3))
6096 fail = 1;
6097 }
6098
6099 /* Restore values at offsets 0x4F0 and 0x7F0 */
6100 if (saved_4F0)
6101 write_eeprom(sp, 0x4F0, org_4F0, 3);
6102 if (saved_7F0)
6103 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
6104
6105 *data = fail;
ad4ebed0 6106 return fail;
1da177e4
LT
6107}
6108
6109/**
6110 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6111 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6112 * s2io_nic structure.
20346722 6113 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6114 * the driver.
6115 * Description:
6116 * This invokes the MemBist test of the card. We give around
6117 * 2 secs time for the Test to complete. If it's still not complete
20346722 6118 * within this peiod, we consider that the test failed.
1da177e4
LT
6119 * Return value:
6120 * 0 on success and -1 on failure.
6121 */
6122
d44570e4 6123static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6124{
6125 u8 bist = 0;
6126 int cnt = 0, ret = -1;
6127
6128 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6129 bist |= PCI_BIST_START;
6130 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6131
6132 while (cnt < 20) {
6133 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6134 if (!(bist & PCI_BIST_START)) {
6135 *data = (bist & PCI_BIST_CODE_MASK);
6136 ret = 0;
6137 break;
6138 }
6139 msleep(100);
6140 cnt++;
6141 }
6142
6143 return ret;
6144}
6145
6146/**
20346722
K
6147 * s2io-link_test - verifies the link state of the nic
6148 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6149 * s2io_nic structure.
6150 * @data: variable that returns the result of each of the test conducted by
6151 * the driver.
6152 * Description:
20346722 6153 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6154 * argument 'data' appropriately.
6155 * Return value:
6156 * 0 on success.
6157 */
6158
d44570e4 6159static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6160{
1ee6dd77 6161 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6162 u64 val64;
6163
6164 val64 = readq(&bar0->adapter_status);
d44570e4 6165 if (!(LINK_IS_UP(val64)))
1da177e4 6166 *data = 1;
c92ca04b
AR
6167 else
6168 *data = 0;
1da177e4 6169
b41477f3 6170 return *data;
1da177e4
LT
6171}
6172
6173/**
20346722
K
6174 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6175 * @sp - private member of the device structure, which is a pointer to the
1da177e4 6176 * s2io_nic structure.
20346722 6177 * @data - variable that returns the result of each of the test
1da177e4
LT
6178 * conducted by the driver.
6179 * Description:
20346722 6180 * This is one of the offline test that tests the read and write
1da177e4
LT
6181 * access to the RldRam chip on the NIC.
6182 * Return value:
6183 * 0 on success.
6184 */
6185
d44570e4 6186static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6187{
1ee6dd77 6188 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6189 u64 val64;
ad4ebed0 6190 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6191
6192 val64 = readq(&bar0->adapter_control);
6193 val64 &= ~ADAPTER_ECC_EN;
6194 writeq(val64, &bar0->adapter_control);
6195
6196 val64 = readq(&bar0->mc_rldram_test_ctrl);
6197 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6198 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6199
6200 val64 = readq(&bar0->mc_rldram_mrs);
6201 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6202 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6203
6204 val64 |= MC_RLDRAM_MRS_ENABLE;
6205 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6206
6207 while (iteration < 2) {
6208 val64 = 0x55555555aaaa0000ULL;
d44570e4 6209 if (iteration == 1)
1da177e4 6210 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6211 writeq(val64, &bar0->mc_rldram_test_d0);
6212
6213 val64 = 0xaaaa5a5555550000ULL;
d44570e4 6214 if (iteration == 1)
1da177e4 6215 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6216 writeq(val64, &bar0->mc_rldram_test_d1);
6217
6218 val64 = 0x55aaaaaaaa5a0000ULL;
d44570e4 6219 if (iteration == 1)
1da177e4 6220 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6221 writeq(val64, &bar0->mc_rldram_test_d2);
6222
ad4ebed0 6223 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6224 writeq(val64, &bar0->mc_rldram_test_add);
6225
d44570e4
JP
6226 val64 = MC_RLDRAM_TEST_MODE |
6227 MC_RLDRAM_TEST_WRITE |
6228 MC_RLDRAM_TEST_GO;
ad4ebed0 6229 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6230
6231 for (cnt = 0; cnt < 5; cnt++) {
6232 val64 = readq(&bar0->mc_rldram_test_ctrl);
6233 if (val64 & MC_RLDRAM_TEST_DONE)
6234 break;
6235 msleep(200);
6236 }
6237
6238 if (cnt == 5)
6239 break;
6240
ad4ebed0 6241 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6242 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6243
6244 for (cnt = 0; cnt < 5; cnt++) {
6245 val64 = readq(&bar0->mc_rldram_test_ctrl);
6246 if (val64 & MC_RLDRAM_TEST_DONE)
6247 break;
6248 msleep(500);
6249 }
6250
6251 if (cnt == 5)
6252 break;
6253
6254 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6255 if (!(val64 & MC_RLDRAM_TEST_PASS))
6256 test_fail = 1;
1da177e4
LT
6257
6258 iteration++;
6259 }
6260
ad4ebed0 6261 *data = test_fail;
1da177e4 6262
ad4ebed0 6263 /* Bring the adapter out of test mode */
6264 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6265
6266 return test_fail;
1da177e4
LT
6267}
6268
6269/**
6270 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6271 * @sp : private member of the device structure, which is a pointer to the
6272 * s2io_nic structure.
6273 * @ethtest : pointer to a ethtool command specific structure that will be
6274 * returned to the user.
20346722 6275 * @data : variable that returns the result of each of the test
1da177e4
LT
6276 * conducted by the driver.
6277 * Description:
6278 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6279 * the health of the card.
6280 * Return value:
6281 * void
6282 */
6283
6284static void s2io_ethtool_test(struct net_device *dev,
6285 struct ethtool_test *ethtest,
d44570e4 6286 uint64_t *data)
1da177e4 6287{
4cf1653a 6288 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6289 int orig_state = netif_running(sp->dev);
6290
6291 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6292 /* Offline Tests. */
20346722 6293 if (orig_state)
1da177e4 6294 s2io_close(sp->dev);
1da177e4
LT
6295
6296 if (s2io_register_test(sp, &data[0]))
6297 ethtest->flags |= ETH_TEST_FL_FAILED;
6298
6299 s2io_reset(sp);
1da177e4
LT
6300
6301 if (s2io_rldram_test(sp, &data[3]))
6302 ethtest->flags |= ETH_TEST_FL_FAILED;
6303
6304 s2io_reset(sp);
1da177e4
LT
6305
6306 if (s2io_eeprom_test(sp, &data[1]))
6307 ethtest->flags |= ETH_TEST_FL_FAILED;
6308
6309 if (s2io_bist_test(sp, &data[4]))
6310 ethtest->flags |= ETH_TEST_FL_FAILED;
6311
6312 if (orig_state)
6313 s2io_open(sp->dev);
6314
6315 data[2] = 0;
6316 } else {
6317 /* Online Tests. */
6318 if (!orig_state) {
d44570e4 6319 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
1da177e4
LT
6320 dev->name);
6321 data[0] = -1;
6322 data[1] = -1;
6323 data[2] = -1;
6324 data[3] = -1;
6325 data[4] = -1;
6326 }
6327
6328 if (s2io_link_test(sp, &data[2]))
6329 ethtest->flags |= ETH_TEST_FL_FAILED;
6330
6331 data[0] = 0;
6332 data[1] = 0;
6333 data[3] = 0;
6334 data[4] = 0;
6335 }
6336}
6337
6338static void s2io_get_ethtool_stats(struct net_device *dev,
6339 struct ethtool_stats *estats,
d44570e4 6340 u64 *tmp_stats)
1da177e4 6341{
8116f3cf 6342 int i = 0, k;
4cf1653a 6343 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
6344 struct stat_block *stats = sp->mac_control.stats_info;
6345 struct swStat *swstats = &stats->sw_stat;
6346 struct xpakStat *xstats = &stats->xpak_stat;
1da177e4 6347
7ba013ac 6348 s2io_updt_stats(sp);
541ae68f 6349 tmp_stats[i++] =
ffb5df6c
JP
6350 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6351 le32_to_cpu(stats->tmac_frms);
541ae68f 6352 tmp_stats[i++] =
ffb5df6c
JP
6353 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6354 le32_to_cpu(stats->tmac_data_octets);
6355 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
541ae68f 6356 tmp_stats[i++] =
ffb5df6c
JP
6357 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6358 le32_to_cpu(stats->tmac_mcst_frms);
541ae68f 6359 tmp_stats[i++] =
ffb5df6c
JP
6360 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6361 le32_to_cpu(stats->tmac_bcst_frms);
6362 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
bd1034f0 6363 tmp_stats[i++] =
ffb5df6c
JP
6364 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6365 le32_to_cpu(stats->tmac_ttl_octets);
bd1034f0 6366 tmp_stats[i++] =
ffb5df6c
JP
6367 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6368 le32_to_cpu(stats->tmac_ucst_frms);
d44570e4 6369 tmp_stats[i++] =
ffb5df6c
JP
6370 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6371 le32_to_cpu(stats->tmac_nucst_frms);
541ae68f 6372 tmp_stats[i++] =
ffb5df6c
JP
6373 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6374 le32_to_cpu(stats->tmac_any_err_frms);
6375 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6376 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
541ae68f 6377 tmp_stats[i++] =
ffb5df6c
JP
6378 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6379 le32_to_cpu(stats->tmac_vld_ip);
541ae68f 6380 tmp_stats[i++] =
ffb5df6c
JP
6381 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6382 le32_to_cpu(stats->tmac_drop_ip);
541ae68f 6383 tmp_stats[i++] =
ffb5df6c
JP
6384 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6385 le32_to_cpu(stats->tmac_icmp);
541ae68f 6386 tmp_stats[i++] =
ffb5df6c
JP
6387 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6388 le32_to_cpu(stats->tmac_rst_tcp);
6389 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6390 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6391 le32_to_cpu(stats->tmac_udp);
541ae68f 6392 tmp_stats[i++] =
ffb5df6c
JP
6393 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6394 le32_to_cpu(stats->rmac_vld_frms);
541ae68f 6395 tmp_stats[i++] =
ffb5df6c
JP
6396 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6397 le32_to_cpu(stats->rmac_data_octets);
6398 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6399 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
541ae68f 6400 tmp_stats[i++] =
ffb5df6c
JP
6401 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6402 le32_to_cpu(stats->rmac_vld_mcst_frms);
541ae68f 6403 tmp_stats[i++] =
ffb5df6c
JP
6404 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6405 le32_to_cpu(stats->rmac_vld_bcst_frms);
6406 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6407 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6408 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6409 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6410 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
d44570e4 6411 tmp_stats[i++] =
ffb5df6c
JP
6412 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6413 le32_to_cpu(stats->rmac_ttl_octets);
bd1034f0 6414 tmp_stats[i++] =
ffb5df6c
JP
6415 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6416 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
d44570e4 6417 tmp_stats[i++] =
ffb5df6c
JP
6418 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6419 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
541ae68f 6420 tmp_stats[i++] =
ffb5df6c
JP
6421 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6422 le32_to_cpu(stats->rmac_discarded_frms);
d44570e4 6423 tmp_stats[i++] =
ffb5df6c
JP
6424 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6425 << 32 | le32_to_cpu(stats->rmac_drop_events);
6426 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6427 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
541ae68f 6428 tmp_stats[i++] =
ffb5df6c
JP
6429 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6430 le32_to_cpu(stats->rmac_usized_frms);
541ae68f 6431 tmp_stats[i++] =
ffb5df6c
JP
6432 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6433 le32_to_cpu(stats->rmac_osized_frms);
541ae68f 6434 tmp_stats[i++] =
ffb5df6c
JP
6435 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6436 le32_to_cpu(stats->rmac_frag_frms);
541ae68f 6437 tmp_stats[i++] =
ffb5df6c
JP
6438 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6439 le32_to_cpu(stats->rmac_jabber_frms);
6440 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6441 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6442 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6443 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6444 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6445 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
bd1034f0 6446 tmp_stats[i++] =
ffb5df6c
JP
6447 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6448 le32_to_cpu(stats->rmac_ip);
6449 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6450 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
bd1034f0 6451 tmp_stats[i++] =
ffb5df6c
JP
6452 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6453 le32_to_cpu(stats->rmac_drop_ip);
bd1034f0 6454 tmp_stats[i++] =
ffb5df6c
JP
6455 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6456 le32_to_cpu(stats->rmac_icmp);
6457 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
bd1034f0 6458 tmp_stats[i++] =
ffb5df6c
JP
6459 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6460 le32_to_cpu(stats->rmac_udp);
541ae68f 6461 tmp_stats[i++] =
ffb5df6c
JP
6462 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6463 le32_to_cpu(stats->rmac_err_drp_udp);
6464 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6465 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6466 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6467 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6468 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6469 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6470 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6471 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6472 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6473 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6474 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6475 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6476 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6477 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6478 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6479 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6480 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
541ae68f 6481 tmp_stats[i++] =
ffb5df6c
JP
6482 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6483 le32_to_cpu(stats->rmac_pause_cnt);
6484 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6485 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
541ae68f 6486 tmp_stats[i++] =
ffb5df6c
JP
6487 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6488 le32_to_cpu(stats->rmac_accepted_ip);
6489 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6490 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6491 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6492 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6493 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6494 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6495 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6496 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6497 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6498 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6499 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6500 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6501 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6502 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6503 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6504 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6505 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6506 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6507 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
fa1f0cb3
SS
6508
6509 /* Enhanced statistics exist only for Hercules */
d44570e4 6510 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6511 tmp_stats[i++] =
ffb5df6c 6512 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
fa1f0cb3 6513 tmp_stats[i++] =
ffb5df6c 6514 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
fa1f0cb3 6515 tmp_stats[i++] =
ffb5df6c
JP
6516 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6517 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6518 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6519 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6520 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6521 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6522 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6523 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6524 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6525 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6526 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6527 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6528 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6529 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
fa1f0cb3
SS
6530 }
6531
7ba013ac 6532 tmp_stats[i++] = 0;
ffb5df6c
JP
6533 tmp_stats[i++] = swstats->single_ecc_errs;
6534 tmp_stats[i++] = swstats->double_ecc_errs;
6535 tmp_stats[i++] = swstats->parity_err_cnt;
6536 tmp_stats[i++] = swstats->serious_err_cnt;
6537 tmp_stats[i++] = swstats->soft_reset_cnt;
6538 tmp_stats[i++] = swstats->fifo_full_cnt;
8116f3cf 6539 for (k = 0; k < MAX_RX_RINGS; k++)
ffb5df6c
JP
6540 tmp_stats[i++] = swstats->ring_full_cnt[k];
6541 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6542 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6543 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6544 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6545 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6546 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6547 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6548 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6549 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6550 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6551 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6552 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6553 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6554 tmp_stats[i++] = swstats->sending_both;
6555 tmp_stats[i++] = swstats->outof_sequence_pkts;
6556 tmp_stats[i++] = swstats->flush_max_pkts;
6557 if (swstats->num_aggregations) {
6558 u64 tmp = swstats->sum_avg_pkts_aggregated;
bd1034f0 6559 int count = 0;
6aa20a22 6560 /*
bd1034f0
AR
6561 * Since 64-bit divide does not work on all platforms,
6562 * do repeated subtraction.
6563 */
ffb5df6c
JP
6564 while (tmp >= swstats->num_aggregations) {
6565 tmp -= swstats->num_aggregations;
bd1034f0
AR
6566 count++;
6567 }
6568 tmp_stats[i++] = count;
d44570e4 6569 } else
bd1034f0 6570 tmp_stats[i++] = 0;
ffb5df6c
JP
6571 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6572 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6573 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6574 tmp_stats[i++] = swstats->mem_allocated;
6575 tmp_stats[i++] = swstats->mem_freed;
6576 tmp_stats[i++] = swstats->link_up_cnt;
6577 tmp_stats[i++] = swstats->link_down_cnt;
6578 tmp_stats[i++] = swstats->link_up_time;
6579 tmp_stats[i++] = swstats->link_down_time;
6580
6581 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6582 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6583 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6584 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6585 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6586
6587 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6588 tmp_stats[i++] = swstats->rx_abort_cnt;
6589 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6590 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6591 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6592 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6593 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6594 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6595 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6596 tmp_stats[i++] = swstats->tda_err_cnt;
6597 tmp_stats[i++] = swstats->pfc_err_cnt;
6598 tmp_stats[i++] = swstats->pcc_err_cnt;
6599 tmp_stats[i++] = swstats->tti_err_cnt;
6600 tmp_stats[i++] = swstats->tpa_err_cnt;
6601 tmp_stats[i++] = swstats->sm_err_cnt;
6602 tmp_stats[i++] = swstats->lso_err_cnt;
6603 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6604 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6605 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6606 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6607 tmp_stats[i++] = swstats->rc_err_cnt;
6608 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6609 tmp_stats[i++] = swstats->rpa_err_cnt;
6610 tmp_stats[i++] = swstats->rda_err_cnt;
6611 tmp_stats[i++] = swstats->rti_err_cnt;
6612 tmp_stats[i++] = swstats->mc_err_cnt;
1da177e4
LT
6613}
6614
ac1f60db 6615static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4 6616{
d44570e4 6617 return XENA_REG_SPACE;
1da177e4
LT
6618}
6619
6620
d44570e4 6621static u32 s2io_ethtool_get_rx_csum(struct net_device *dev)
1da177e4 6622{
4cf1653a 6623 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 6624
d44570e4 6625 return sp->rx_csum;
1da177e4 6626}
ac1f60db
AB
6627
6628static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 6629{
4cf1653a 6630 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6631
6632 if (data)
6633 sp->rx_csum = 1;
6634 else
6635 sp->rx_csum = 0;
6636
6637 return 0;
6638}
ac1f60db
AB
6639
6640static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4 6641{
d44570e4 6642 return XENA_EEPROM_SPACE;
1da177e4
LT
6643}
6644
b9f2c044 6645static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6646{
4cf1653a 6647 struct s2io_nic *sp = netdev_priv(dev);
b9f2c044
JG
6648
6649 switch (sset) {
6650 case ETH_SS_TEST:
6651 return S2IO_TEST_LEN;
6652 case ETH_SS_STATS:
d44570e4 6653 switch (sp->device_type) {
b9f2c044
JG
6654 case XFRAME_I_DEVICE:
6655 return XFRAME_I_STAT_LEN;
6656 case XFRAME_II_DEVICE:
6657 return XFRAME_II_STAT_LEN;
6658 default:
6659 return 0;
6660 }
6661 default:
6662 return -EOPNOTSUPP;
6663 }
1da177e4 6664}
ac1f60db
AB
6665
6666static void s2io_ethtool_get_strings(struct net_device *dev,
d44570e4 6667 u32 stringset, u8 *data)
1da177e4 6668{
fa1f0cb3 6669 int stat_size = 0;
4cf1653a 6670 struct s2io_nic *sp = netdev_priv(dev);
fa1f0cb3 6671
1da177e4
LT
6672 switch (stringset) {
6673 case ETH_SS_TEST:
6674 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6675 break;
6676 case ETH_SS_STATS:
fa1f0cb3 6677 stat_size = sizeof(ethtool_xena_stats_keys);
d44570e4
JP
6678 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6679 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6680 memcpy(data + stat_size,
d44570e4
JP
6681 &ethtool_enhanced_stats_keys,
6682 sizeof(ethtool_enhanced_stats_keys));
fa1f0cb3
SS
6683 stat_size += sizeof(ethtool_enhanced_stats_keys);
6684 }
6685
6686 memcpy(data + stat_size, &ethtool_driver_stats_keys,
d44570e4 6687 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6688 }
6689}
1da177e4 6690
ac1f60db 6691static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
6692{
6693 if (data)
6694 dev->features |= NETIF_F_IP_CSUM;
6695 else
6696 dev->features &= ~NETIF_F_IP_CSUM;
6697
6698 return 0;
6699}
6700
75c30b13
AR
6701static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6702{
6703 return (dev->features & NETIF_F_TSO) != 0;
6704}
958de193 6705
75c30b13
AR
6706static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6707{
6708 if (data)
6709 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6710 else
6711 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6712
6713 return 0;
6714}
1da177e4 6715
958de193
JM
6716static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6717{
6718 struct s2io_nic *sp = netdev_priv(dev);
6719 int rc = 0;
6720 int changed = 0;
6721
673e63c6 6722 if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO))
97d1935a 6723 return -EINVAL;
958de193
JM
6724
6725 if (data & ETH_FLAG_LRO) {
f0c54ace
AW
6726 if (!(dev->features & NETIF_F_LRO)) {
6727 dev->features |= NETIF_F_LRO;
6728 changed = 1;
6729 }
958de193
JM
6730 } else if (dev->features & NETIF_F_LRO) {
6731 dev->features &= ~NETIF_F_LRO;
6732 changed = 1;
6733 }
6734
6735 if (changed && netif_running(dev)) {
6736 s2io_stop_all_tx_queue(sp);
6737 s2io_card_down(sp);
958de193
JM
6738 rc = s2io_card_up(sp);
6739 if (rc)
6740 s2io_reset(sp);
6741 else
6742 s2io_start_all_tx_queue(sp);
6743 }
6744
6745 return rc;
6746}
6747
7282d491 6748static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6749 .get_settings = s2io_ethtool_gset,
6750 .set_settings = s2io_ethtool_sset,
6751 .get_drvinfo = s2io_ethtool_gdrvinfo,
6752 .get_regs_len = s2io_ethtool_get_regs_len,
6753 .get_regs = s2io_ethtool_gregs,
6754 .get_link = ethtool_op_get_link,
6755 .get_eeprom_len = s2io_get_eeprom_len,
6756 .get_eeprom = s2io_ethtool_geeprom,
6757 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6758 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6759 .get_pauseparam = s2io_ethtool_getpause_data,
6760 .set_pauseparam = s2io_ethtool_setpause_data,
6761 .get_rx_csum = s2io_ethtool_get_rx_csum,
6762 .set_rx_csum = s2io_ethtool_set_rx_csum,
1da177e4 6763 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
958de193
JM
6764 .set_flags = s2io_ethtool_set_flags,
6765 .get_flags = ethtool_op_get_flags,
1da177e4 6766 .set_sg = ethtool_op_set_sg,
75c30b13
AR
6767 .get_tso = s2io_ethtool_op_get_tso,
6768 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd 6769 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
6770 .self_test = s2io_ethtool_test,
6771 .get_strings = s2io_ethtool_get_strings,
034e3450 6772 .set_phys_id = s2io_ethtool_set_led,
b9f2c044
JG
6773 .get_ethtool_stats = s2io_get_ethtool_stats,
6774 .get_sset_count = s2io_get_sset_count,
1da177e4
LT
6775};
6776
6777/**
20346722 6778 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6779 * @dev : Device pointer.
6780 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6781 * a proprietary structure used to pass information to the driver.
6782 * @cmd : This is used to distinguish between the different commands that
6783 * can be passed to the IOCTL functions.
6784 * Description:
20346722
K
6785 * Currently there are no special functionality supported in IOCTL, hence
6786 * function always return EOPNOTSUPPORTED
1da177e4
LT
6787 */
6788
ac1f60db 6789static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6790{
6791 return -EOPNOTSUPP;
6792}
6793
6794/**
6795 * s2io_change_mtu - entry point to change MTU size for the device.
6796 * @dev : device pointer.
6797 * @new_mtu : the new MTU size for the device.
6798 * Description: A driver entry point to change MTU size for the device.
6799 * Before changing the MTU the device must be stopped.
6800 * Return value:
6801 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6802 * file on failure.
6803 */
6804
ac1f60db 6805static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6806{
4cf1653a 6807 struct s2io_nic *sp = netdev_priv(dev);
9f74ffde 6808 int ret = 0;
1da177e4
LT
6809
6810 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
d44570e4 6811 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
1da177e4
LT
6812 return -EPERM;
6813 }
6814
1da177e4 6815 dev->mtu = new_mtu;
d8892c6e 6816 if (netif_running(dev)) {
3a3d5756 6817 s2io_stop_all_tx_queue(sp);
e6a8fee2 6818 s2io_card_down(sp);
9f74ffde
SH
6819 ret = s2io_card_up(sp);
6820 if (ret) {
d8892c6e 6821 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
b39d66a8 6822 __func__);
9f74ffde 6823 return ret;
d8892c6e 6824 }
3a3d5756 6825 s2io_wake_all_tx_queue(sp);
d8892c6e 6826 } else { /* Device is down */
1ee6dd77 6827 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
6828 u64 val64 = new_mtu;
6829
6830 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6831 }
1da177e4 6832
9f74ffde 6833 return ret;
1da177e4
LT
6834}
6835
1da177e4
LT
6836/**
6837 * s2io_set_link - Set the LInk status
6838 * @data: long pointer to device private structue
6839 * Description: Sets the link status for the adapter
6840 */
6841
c4028958 6842static void s2io_set_link(struct work_struct *work)
1da177e4 6843{
d44570e4
JP
6844 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6845 set_link_task);
1da177e4 6846 struct net_device *dev = nic->dev;
1ee6dd77 6847 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6848 register u64 val64;
6849 u16 subid;
6850
22747d6b
FR
6851 rtnl_lock();
6852
6853 if (!netif_running(dev))
6854 goto out_unlock;
6855
92b84437 6856 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6857 /* The card is being reset, no point doing anything */
22747d6b 6858 goto out_unlock;
1da177e4
LT
6859 }
6860
6861 subid = nic->pdev->subsystem_device;
a371a07d
K
6862 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6863 /*
6864 * Allow a small delay for the NICs self initiated
6865 * cleanup to complete.
6866 */
6867 msleep(100);
6868 }
1da177e4
LT
6869
6870 val64 = readq(&bar0->adapter_status);
19a60522
SS
6871 if (LINK_IS_UP(val64)) {
6872 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6873 if (verify_xena_quiescence(nic)) {
6874 val64 = readq(&bar0->adapter_control);
6875 val64 |= ADAPTER_CNTL_EN;
1da177e4 6876 writeq(val64, &bar0->adapter_control);
19a60522 6877 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
d44570e4 6878 nic->device_type, subid)) {
19a60522
SS
6879 val64 = readq(&bar0->gpio_control);
6880 val64 |= GPIO_CTRL_GPIO_0;
6881 writeq(val64, &bar0->gpio_control);
6882 val64 = readq(&bar0->gpio_control);
6883 } else {
6884 val64 |= ADAPTER_LED_ON;
6885 writeq(val64, &bar0->adapter_control);
a371a07d 6886 }
f957bcf0 6887 nic->device_enabled_once = true;
19a60522 6888 } else {
9e39f7c5
JP
6889 DBG_PRINT(ERR_DBG,
6890 "%s: Error: device is not Quiescent\n",
6891 dev->name);
3a3d5756 6892 s2io_stop_all_tx_queue(nic);
1da177e4 6893 }
19a60522 6894 }
92c48799
SS
6895 val64 = readq(&bar0->adapter_control);
6896 val64 |= ADAPTER_LED_ON;
6897 writeq(val64, &bar0->adapter_control);
6898 s2io_link(nic, LINK_UP);
19a60522
SS
6899 } else {
6900 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6901 subid)) {
6902 val64 = readq(&bar0->gpio_control);
6903 val64 &= ~GPIO_CTRL_GPIO_0;
6904 writeq(val64, &bar0->gpio_control);
6905 val64 = readq(&bar0->gpio_control);
1da177e4 6906 }
92c48799
SS
6907 /* turn off LED */
6908 val64 = readq(&bar0->adapter_control);
d44570e4 6909 val64 = val64 & (~ADAPTER_LED_ON);
92c48799 6910 writeq(val64, &bar0->adapter_control);
19a60522 6911 s2io_link(nic, LINK_DOWN);
1da177e4 6912 }
92b84437 6913 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6914
6915out_unlock:
d8d70caf 6916 rtnl_unlock();
1da177e4
LT
6917}
6918
1ee6dd77 6919static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
d44570e4
JP
6920 struct buffAdd *ba,
6921 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6922 u64 *temp2, int size)
5d3213cc
AR
6923{
6924 struct net_device *dev = sp->dev;
491abf25 6925 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6926
6927 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6928 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6929 /* allocate skb */
6930 if (*skb) {
6931 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6932 /*
6933 * As Rx frame are not going to be processed,
6934 * using same mapped address for the Rxd
6935 * buffer pointer
6936 */
6d517a27 6937 rxdp1->Buffer0_ptr = *temp0;
5d3213cc
AR
6938 } else {
6939 *skb = dev_alloc_skb(size);
6940 if (!(*skb)) {
9e39f7c5
JP
6941 DBG_PRINT(INFO_DBG,
6942 "%s: Out of memory to allocate %s\n",
6943 dev->name, "1 buf mode SKBs");
ffb5df6c 6944 stats->mem_alloc_fail_cnt++;
5d3213cc
AR
6945 return -ENOMEM ;
6946 }
ffb5df6c 6947 stats->mem_allocated += (*skb)->truesize;
5d3213cc
AR
6948 /* storing the mapped addr in a temp variable
6949 * such it will be used for next rxd whose
6950 * Host Control is NULL
6951 */
6d517a27 6952 rxdp1->Buffer0_ptr = *temp0 =
d44570e4
JP
6953 pci_map_single(sp->pdev, (*skb)->data,
6954 size - NET_IP_ALIGN,
6955 PCI_DMA_FROMDEVICE);
8d8bb39b 6956 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
491abf25 6957 goto memalloc_failed;
5d3213cc
AR
6958 rxdp->Host_Control = (unsigned long) (*skb);
6959 }
6960 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6961 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6962 /* Two buffer Mode */
6963 if (*skb) {
6d517a27
VP
6964 rxdp3->Buffer2_ptr = *temp2;
6965 rxdp3->Buffer0_ptr = *temp0;
6966 rxdp3->Buffer1_ptr = *temp1;
5d3213cc
AR
6967 } else {
6968 *skb = dev_alloc_skb(size);
2ceaac75 6969 if (!(*skb)) {
9e39f7c5
JP
6970 DBG_PRINT(INFO_DBG,
6971 "%s: Out of memory to allocate %s\n",
6972 dev->name,
6973 "2 buf mode SKBs");
ffb5df6c 6974 stats->mem_alloc_fail_cnt++;
2ceaac75
DR
6975 return -ENOMEM;
6976 }
ffb5df6c 6977 stats->mem_allocated += (*skb)->truesize;
6d517a27 6978 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6979 pci_map_single(sp->pdev, (*skb)->data,
6980 dev->mtu + 4,
6981 PCI_DMA_FROMDEVICE);
8d8bb39b 6982 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
491abf25 6983 goto memalloc_failed;
6d517a27 6984 rxdp3->Buffer0_ptr = *temp0 =
d44570e4
JP
6985 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6986 PCI_DMA_FROMDEVICE);
8d8bb39b 6987 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6988 rxdp3->Buffer0_ptr)) {
6989 pci_unmap_single(sp->pdev,
6990 (dma_addr_t)rxdp3->Buffer2_ptr,
6991 dev->mtu + 4,
6992 PCI_DMA_FROMDEVICE);
491abf25
VP
6993 goto memalloc_failed;
6994 }
5d3213cc
AR
6995 rxdp->Host_Control = (unsigned long) (*skb);
6996
6997 /* Buffer-1 will be dummy buffer not used */
6d517a27 6998 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 6999 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
d44570e4 7000 PCI_DMA_FROMDEVICE);
8d8bb39b 7001 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
7002 rxdp3->Buffer1_ptr)) {
7003 pci_unmap_single(sp->pdev,
7004 (dma_addr_t)rxdp3->Buffer0_ptr,
7005 BUF0_LEN, PCI_DMA_FROMDEVICE);
7006 pci_unmap_single(sp->pdev,
7007 (dma_addr_t)rxdp3->Buffer2_ptr,
7008 dev->mtu + 4,
7009 PCI_DMA_FROMDEVICE);
491abf25
VP
7010 goto memalloc_failed;
7011 }
5d3213cc
AR
7012 }
7013 }
7014 return 0;
d44570e4
JP
7015
7016memalloc_failed:
7017 stats->pci_map_fail_cnt++;
7018 stats->mem_freed += (*skb)->truesize;
7019 dev_kfree_skb(*skb);
7020 return -ENOMEM;
5d3213cc 7021}
491abf25 7022
1ee6dd77
RB
7023static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
7024 int size)
5d3213cc
AR
7025{
7026 struct net_device *dev = sp->dev;
7027 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4 7028 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
5d3213cc
AR
7029 } else if (sp->rxd_mode == RXD_MODE_3B) {
7030 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
7031 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
d44570e4 7032 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
5d3213cc
AR
7033 }
7034}
7035
1ee6dd77 7036static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
7037{
7038 int i, j, k, blk_cnt = 0, size;
5d3213cc 7039 struct config_param *config = &sp->config;
ffb5df6c 7040 struct mac_info *mac_control = &sp->mac_control;
5d3213cc 7041 struct net_device *dev = sp->dev;
1ee6dd77 7042 struct RxD_t *rxdp = NULL;
5d3213cc 7043 struct sk_buff *skb = NULL;
1ee6dd77 7044 struct buffAdd *ba = NULL;
5d3213cc
AR
7045 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7046
7047 /* Calculate the size based on ring mode */
7048 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7049 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7050 if (sp->rxd_mode == RXD_MODE_1)
7051 size += NET_IP_ALIGN;
7052 else if (sp->rxd_mode == RXD_MODE_3B)
7053 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
7054
7055 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7056 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7057 struct ring_info *ring = &mac_control->rings[i];
7058
d44570e4 7059 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
5d3213cc
AR
7060
7061 for (j = 0; j < blk_cnt; j++) {
7062 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
d44570e4
JP
7063 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
7064 if (sp->rxd_mode == RXD_MODE_3B)
13d866a9 7065 ba = &ring->ba[j][k];
d44570e4
JP
7066 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
7067 (u64 *)&temp0_64,
7068 (u64 *)&temp1_64,
7069 (u64 *)&temp2_64,
7070 size) == -ENOMEM) {
ac1f90d6
SS
7071 return 0;
7072 }
5d3213cc
AR
7073
7074 set_rxd_buffer_size(sp, rxdp, size);
7075 wmb();
7076 /* flip the Ownership bit to Hardware */
7077 rxdp->Control_1 |= RXD_OWN_XENA;
7078 }
7079 }
7080 }
7081 return 0;
7082
7083}
7084
d44570e4 7085static int s2io_add_isr(struct s2io_nic *sp)
1da177e4 7086{
e6a8fee2 7087 int ret = 0;
c92ca04b 7088 struct net_device *dev = sp->dev;
e6a8fee2 7089 int err = 0;
1da177e4 7090
eaae7f72 7091 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7092 ret = s2io_enable_msi_x(sp);
7093 if (ret) {
7094 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 7095 sp->config.intr_type = INTA;
20346722 7096 }
1da177e4 7097
d44570e4
JP
7098 /*
7099 * Store the values of the MSIX table in
7100 * the struct s2io_nic structure
7101 */
e6a8fee2 7102 store_xmsi_data(sp);
c92ca04b 7103
e6a8fee2 7104 /* After proper initialization of H/W, register ISR */
eaae7f72 7105 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
7106 int i, msix_rx_cnt = 0;
7107
f61e0a35
SH
7108 for (i = 0; i < sp->num_entries; i++) {
7109 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7110 if (sp->s2io_entries[i].type ==
d44570e4 7111 MSIX_RING_TYPE) {
ac731ab6
SH
7112 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7113 dev->name, i);
7114 err = request_irq(sp->entries[i].vector,
d44570e4
JP
7115 s2io_msix_ring_handle,
7116 0,
7117 sp->desc[i],
7118 sp->s2io_entries[i].arg);
ac731ab6 7119 } else if (sp->s2io_entries[i].type ==
d44570e4 7120 MSIX_ALARM_TYPE) {
ac731ab6 7121 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
d44570e4 7122 dev->name, i);
ac731ab6 7123 err = request_irq(sp->entries[i].vector,
d44570e4
JP
7124 s2io_msix_fifo_handle,
7125 0,
7126 sp->desc[i],
7127 sp->s2io_entries[i].arg);
ac731ab6 7128
fb6a825b 7129 }
ac731ab6
SH
7130 /* if either data or addr is zero print it. */
7131 if (!(sp->msix_info[i].addr &&
d44570e4 7132 sp->msix_info[i].data)) {
ac731ab6 7133 DBG_PRINT(ERR_DBG,
d44570e4
JP
7134 "%s @Addr:0x%llx Data:0x%llx\n",
7135 sp->desc[i],
7136 (unsigned long long)
7137 sp->msix_info[i].addr,
7138 (unsigned long long)
7139 ntohl(sp->msix_info[i].data));
ac731ab6 7140 } else
fb6a825b 7141 msix_rx_cnt++;
ac731ab6
SH
7142 if (err) {
7143 remove_msix_isr(sp);
7144
7145 DBG_PRINT(ERR_DBG,
d44570e4
JP
7146 "%s:MSI-X-%d registration "
7147 "failed\n", dev->name, i);
ac731ab6
SH
7148
7149 DBG_PRINT(ERR_DBG,
d44570e4
JP
7150 "%s: Defaulting to INTA\n",
7151 dev->name);
ac731ab6
SH
7152 sp->config.intr_type = INTA;
7153 break;
fb6a825b 7154 }
ac731ab6
SH
7155 sp->s2io_entries[i].in_use =
7156 MSIX_REGISTERED_SUCCESS;
c92ca04b 7157 }
e6a8fee2 7158 }
18b2b7bd 7159 if (!err) {
6cef2b8e 7160 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
9e39f7c5
JP
7161 DBG_PRINT(INFO_DBG,
7162 "MSI-X-TX entries enabled through alarm vector\n");
18b2b7bd 7163 }
e6a8fee2 7164 }
eaae7f72 7165 if (sp->config.intr_type == INTA) {
d44570e4
JP
7166 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7167 sp->name, dev);
e6a8fee2
AR
7168 if (err) {
7169 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7170 dev->name);
7171 return -1;
7172 }
7173 }
7174 return 0;
7175}
d44570e4
JP
7176
7177static void s2io_rem_isr(struct s2io_nic *sp)
e6a8fee2 7178{
18b2b7bd
SH
7179 if (sp->config.intr_type == MSI_X)
7180 remove_msix_isr(sp);
7181 else
7182 remove_inta_isr(sp);
e6a8fee2
AR
7183}
7184
d44570e4 7185static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
e6a8fee2
AR
7186{
7187 int cnt = 0;
1ee6dd77 7188 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7189 register u64 val64 = 0;
5f490c96
SH
7190 struct config_param *config;
7191 config = &sp->config;
e6a8fee2 7192
9f74ffde
SH
7193 if (!is_s2io_card_up(sp))
7194 return;
7195
e6a8fee2
AR
7196 del_timer_sync(&sp->alarm_timer);
7197 /* If s2io_set_link task is executing, wait till it completes. */
d44570e4 7198 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
e6a8fee2 7199 msleep(50);
92b84437 7200 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7201
5f490c96 7202 /* Disable napi */
f61e0a35
SH
7203 if (sp->config.napi) {
7204 int off = 0;
7205 if (config->intr_type == MSI_X) {
7206 for (; off < sp->config.rx_ring_num; off++)
7207 napi_disable(&sp->mac_control.rings[off].napi);
d44570e4 7208 }
f61e0a35
SH
7209 else
7210 napi_disable(&sp->napi);
7211 }
5f490c96 7212
e6a8fee2 7213 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7214 if (do_io)
7215 stop_nic(sp);
e6a8fee2
AR
7216
7217 s2io_rem_isr(sp);
1da177e4 7218
01e16faa
SH
7219 /* stop the tx queue, indicate link down */
7220 s2io_link(sp, LINK_DOWN);
7221
1da177e4 7222 /* Check if the device is Quiescent and then Reset the NIC */
d44570e4 7223 while (do_io) {
5d3213cc
AR
7224 /* As per the HW requirement we need to replenish the
7225 * receive buffer to avoid the ring bump. Since there is
7226 * no intention of processing the Rx frame at this pointwe are
7227 * just settting the ownership bit of rxd in Each Rx
7228 * ring to HW and set the appropriate buffer size
7229 * based on the ring mode
7230 */
7231 rxd_owner_bit_reset(sp);
7232
1da177e4 7233 val64 = readq(&bar0->adapter_status);
19a60522 7234 if (verify_xena_quiescence(sp)) {
d44570e4
JP
7235 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7236 break;
1da177e4
LT
7237 }
7238
7239 msleep(50);
7240 cnt++;
7241 if (cnt == 10) {
9e39f7c5
JP
7242 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7243 "adapter status reads 0x%llx\n",
d44570e4 7244 (unsigned long long)val64);
1da177e4
LT
7245 break;
7246 }
d796fdb7
LV
7247 }
7248 if (do_io)
7249 s2io_reset(sp);
1da177e4 7250
7ba013ac 7251 /* Free all Tx buffers */
1da177e4 7252 free_tx_buffers(sp);
7ba013ac
K
7253
7254 /* Free all Rx buffers */
1da177e4
LT
7255 free_rx_buffers(sp);
7256
92b84437 7257 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7258}
7259
d44570e4 7260static void s2io_card_down(struct s2io_nic *sp)
d796fdb7
LV
7261{
7262 do_s2io_card_down(sp, 1);
7263}
7264
d44570e4 7265static int s2io_card_up(struct s2io_nic *sp)
1da177e4 7266{
cc6e7c44 7267 int i, ret = 0;
1da177e4 7268 struct config_param *config;
ffb5df6c 7269 struct mac_info *mac_control;
d44570e4 7270 struct net_device *dev = (struct net_device *)sp->dev;
e6a8fee2 7271 u16 interruptible;
1da177e4
LT
7272
7273 /* Initialize the H/W I/O registers */
9f74ffde
SH
7274 ret = init_nic(sp);
7275 if (ret != 0) {
1da177e4
LT
7276 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7277 dev->name);
9f74ffde
SH
7278 if (ret != -EIO)
7279 s2io_reset(sp);
7280 return ret;
1da177e4
LT
7281 }
7282
20346722
K
7283 /*
7284 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7285 * Rx ring and initializing buffers into 30 Rx blocks
7286 */
1da177e4 7287 config = &sp->config;
ffb5df6c 7288 mac_control = &sp->mac_control;
1da177e4
LT
7289
7290 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7291 struct ring_info *ring = &mac_control->rings[i];
7292
7293 ring->mtu = dev->mtu;
f0c54ace 7294 ring->lro = !!(dev->features & NETIF_F_LRO);
13d866a9 7295 ret = fill_rx_buffers(sp, ring, 1);
0425b46a 7296 if (ret) {
1da177e4
LT
7297 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7298 dev->name);
7299 s2io_reset(sp);
7300 free_rx_buffers(sp);
7301 return -ENOMEM;
7302 }
7303 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
13d866a9 7304 ring->rx_bufs_left);
1da177e4 7305 }
5f490c96
SH
7306
7307 /* Initialise napi */
f61e0a35 7308 if (config->napi) {
f61e0a35
SH
7309 if (config->intr_type == MSI_X) {
7310 for (i = 0; i < sp->config.rx_ring_num; i++)
7311 napi_enable(&sp->mac_control.rings[i].napi);
7312 } else {
7313 napi_enable(&sp->napi);
7314 }
7315 }
5f490c96 7316
19a60522
SS
7317 /* Maintain the state prior to the open */
7318 if (sp->promisc_flg)
7319 sp->promisc_flg = 0;
7320 if (sp->m_cast_flg) {
7321 sp->m_cast_flg = 0;
d44570e4 7322 sp->all_multi_pos = 0;
19a60522 7323 }
1da177e4
LT
7324
7325 /* Setting its receive mode */
7326 s2io_set_multicast(dev);
7327
f0c54ace 7328 if (dev->features & NETIF_F_LRO) {
b41477f3 7329 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439 7330 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
d44570e4 7331 /* Check if we can use (if specified) user provided value */
7d3d0439
RA
7332 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7333 sp->lro_max_aggr_per_sess = lro_max_pkts;
7334 }
7335
1da177e4
LT
7336 /* Enable Rx Traffic and interrupts on the NIC */
7337 if (start_nic(sp)) {
7338 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7339 s2io_reset(sp);
e6a8fee2
AR
7340 free_rx_buffers(sp);
7341 return -ENODEV;
7342 }
7343
7344 /* Add interrupt service routine */
7345 if (s2io_add_isr(sp) != 0) {
eaae7f72 7346 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7347 s2io_rem_isr(sp);
7348 s2io_reset(sp);
1da177e4
LT
7349 free_rx_buffers(sp);
7350 return -ENODEV;
7351 }
7352
25fff88e
K
7353 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7354
01e16faa
SH
7355 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7356
e6a8fee2 7357 /* Enable select interrupts */
9caab458 7358 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
01e16faa
SH
7359 if (sp->config.intr_type != INTA) {
7360 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7361 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7362 } else {
e6a8fee2 7363 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7364 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7365 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7366 }
7367
1da177e4
LT
7368 return 0;
7369}
7370
20346722 7371/**
1da177e4
LT
7372 * s2io_restart_nic - Resets the NIC.
7373 * @data : long pointer to the device private structure
7374 * Description:
7375 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7376 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7377 * the run time of the watch dog routine which is run holding a
7378 * spin lock.
7379 */
7380
c4028958 7381static void s2io_restart_nic(struct work_struct *work)
1da177e4 7382{
1ee6dd77 7383 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7384 struct net_device *dev = sp->dev;
1da177e4 7385
22747d6b
FR
7386 rtnl_lock();
7387
7388 if (!netif_running(dev))
7389 goto out_unlock;
7390
e6a8fee2 7391 s2io_card_down(sp);
1da177e4 7392 if (s2io_card_up(sp)) {
d44570e4 7393 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
1da177e4 7394 }
3a3d5756 7395 s2io_wake_all_tx_queue(sp);
d44570e4 7396 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
22747d6b
FR
7397out_unlock:
7398 rtnl_unlock();
1da177e4
LT
7399}
7400
20346722
K
7401/**
7402 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7403 * @dev : Pointer to net device structure
7404 * Description:
7405 * This function is triggered if the Tx Queue is stopped
7406 * for a pre-defined amount of time when the Interface is still up.
7407 * If the Interface is jammed in such a situation, the hardware is
7408 * reset (by s2io_close) and restarted again (by s2io_open) to
7409 * overcome any problem that might have been caused in the hardware.
7410 * Return value:
7411 * void
7412 */
7413
7414static void s2io_tx_watchdog(struct net_device *dev)
7415{
4cf1653a 7416 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 7417 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7418
7419 if (netif_carrier_ok(dev)) {
ffb5df6c 7420 swstats->watchdog_timer_cnt++;
1da177e4 7421 schedule_work(&sp->rst_timer_task);
ffb5df6c 7422 swstats->soft_reset_cnt++;
1da177e4
LT
7423 }
7424}
7425
7426/**
7427 * rx_osm_handler - To perform some OS related operations on SKB.
7428 * @sp: private member of the device structure,pointer to s2io_nic structure.
7429 * @skb : the socket buffer pointer.
7430 * @len : length of the packet
7431 * @cksum : FCS checksum of the frame.
7432 * @ring_no : the ring from which this RxD was extracted.
20346722 7433 * Description:
b41477f3 7434 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7435 * some OS related operations on the SKB before passing it to the upper
7436 * layers. It mainly checks if the checksum is OK, if so adds it to the
7437 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7438 * to the upper layer. If the checksum is wrong, it increments the Rx
7439 * packet error count, frees the SKB and returns error.
7440 * Return value:
7441 * SUCCESS on success and -1 on failure.
7442 */
1ee6dd77 7443static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7444{
1ee6dd77 7445 struct s2io_nic *sp = ring_data->nic;
d44570e4 7446 struct net_device *dev = (struct net_device *)ring_data->dev;
20346722 7447 struct sk_buff *skb = (struct sk_buff *)
d44570e4 7448 ((unsigned long)rxdp->Host_Control);
20346722 7449 int ring_no = ring_data->ring_no;
1da177e4 7450 u16 l3_csum, l4_csum;
863c11a9 7451 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
2e6a684b 7452 struct lro *uninitialized_var(lro);
f9046eb3 7453 u8 err_mask;
ffb5df6c 7454 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
da6971d8 7455
20346722 7456 skb->dev = dev;
c92ca04b 7457
863c11a9 7458 if (err) {
bd1034f0 7459 /* Check for parity error */
d44570e4 7460 if (err & 0x1)
ffb5df6c 7461 swstats->parity_err_cnt++;
d44570e4 7462
f9046eb3 7463 err_mask = err >> 48;
d44570e4
JP
7464 switch (err_mask) {
7465 case 1:
ffb5df6c 7466 swstats->rx_parity_err_cnt++;
491976b2
SH
7467 break;
7468
d44570e4 7469 case 2:
ffb5df6c 7470 swstats->rx_abort_cnt++;
491976b2
SH
7471 break;
7472
d44570e4 7473 case 3:
ffb5df6c 7474 swstats->rx_parity_abort_cnt++;
491976b2
SH
7475 break;
7476
d44570e4 7477 case 4:
ffb5df6c 7478 swstats->rx_rda_fail_cnt++;
491976b2
SH
7479 break;
7480
d44570e4 7481 case 5:
ffb5df6c 7482 swstats->rx_unkn_prot_cnt++;
491976b2
SH
7483 break;
7484
d44570e4 7485 case 6:
ffb5df6c 7486 swstats->rx_fcs_err_cnt++;
491976b2 7487 break;
bd1034f0 7488
d44570e4 7489 case 7:
ffb5df6c 7490 swstats->rx_buf_size_err_cnt++;
491976b2
SH
7491 break;
7492
d44570e4 7493 case 8:
ffb5df6c 7494 swstats->rx_rxd_corrupt_cnt++;
491976b2
SH
7495 break;
7496
d44570e4 7497 case 15:
ffb5df6c 7498 swstats->rx_unkn_err_cnt++;
491976b2
SH
7499 break;
7500 }
863c11a9 7501 /*
d44570e4
JP
7502 * Drop the packet if bad transfer code. Exception being
7503 * 0x5, which could be due to unsupported IPv6 extension header.
7504 * In this case, we let stack handle the packet.
7505 * Note that in this case, since checksum will be incorrect,
7506 * stack will validate the same.
7507 */
f9046eb3
OH
7508 if (err_mask != 0x5) {
7509 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
d44570e4 7510 dev->name, err_mask);
dc56e634 7511 dev->stats.rx_crc_errors++;
ffb5df6c 7512 swstats->mem_freed
491976b2 7513 += skb->truesize;
863c11a9 7514 dev_kfree_skb(skb);
0425b46a 7515 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7516 rxdp->Host_Control = 0;
7517 return 0;
7518 }
20346722 7519 }
1da177e4 7520
20346722 7521 rxdp->Host_Control = 0;
da6971d8
AR
7522 if (sp->rxd_mode == RXD_MODE_1) {
7523 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7524
da6971d8 7525 skb_put(skb, len);
6d517a27 7526 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7527 int get_block = ring_data->rx_curr_get_info.block_index;
7528 int get_off = ring_data->rx_curr_get_info.offset;
7529 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7530 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7531 unsigned char *buff = skb_push(skb, buf0_len);
7532
1ee6dd77 7533 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8 7534 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7535 skb_put(skb, buf2_len);
da6971d8 7536 }
20346722 7537
d44570e4
JP
7538 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7539 ((!ring_data->lro) ||
7540 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
7541 (sp->rx_csum)) {
7542 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7543 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7544 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7545 /*
1da177e4
LT
7546 * NIC verifies if the Checksum of the received
7547 * frame is Ok or not and accordingly returns
7548 * a flag in the RxD.
7549 */
7550 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7551 if (ring_data->lro) {
06f0c139 7552 u32 tcp_len = 0;
7d3d0439
RA
7553 u8 *tcp;
7554 int ret = 0;
7555
0425b46a 7556 ret = s2io_club_tcp_session(ring_data,
d44570e4
JP
7557 skb->data, &tcp,
7558 &tcp_len, &lro,
7559 rxdp, sp);
7d3d0439 7560 switch (ret) {
d44570e4
JP
7561 case 3: /* Begin anew */
7562 lro->parent = skb;
7563 goto aggregate;
7564 case 1: /* Aggregate */
7565 lro_append_pkt(sp, lro, skb, tcp_len);
7566 goto aggregate;
7567 case 4: /* Flush session */
7568 lro_append_pkt(sp, lro, skb, tcp_len);
7569 queue_rx_frame(lro->parent,
7570 lro->vlan_tag);
7571 clear_lro_session(lro);
ffb5df6c 7572 swstats->flush_max_pkts++;
d44570e4
JP
7573 goto aggregate;
7574 case 2: /* Flush both */
7575 lro->parent->data_len = lro->frags_len;
ffb5df6c 7576 swstats->sending_both++;
d44570e4
JP
7577 queue_rx_frame(lro->parent,
7578 lro->vlan_tag);
7579 clear_lro_session(lro);
7580 goto send_up;
7581 case 0: /* sessions exceeded */
7582 case -1: /* non-TCP or not L2 aggregatable */
7583 case 5: /*
7584 * First pkt in session not
7585 * L3/L4 aggregatable
7586 */
7587 break;
7588 default:
7589 DBG_PRINT(ERR_DBG,
7590 "%s: Samadhana!!\n",
7591 __func__);
7592 BUG();
7d3d0439
RA
7593 }
7594 }
1da177e4 7595 } else {
20346722
K
7596 /*
7597 * Packet with erroneous checksum, let the
1da177e4
LT
7598 * upper layers deal with it.
7599 */
bc8acf2c 7600 skb_checksum_none_assert(skb);
1da177e4 7601 }
cdb5bf02 7602 } else
bc8acf2c 7603 skb_checksum_none_assert(skb);
cdb5bf02 7604
ffb5df6c 7605 swstats->mem_freed += skb->truesize;
7d3d0439 7606send_up:
0c8dfc83 7607 skb_record_rx_queue(skb, ring_no);
cdb5bf02 7608 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 7609aggregate:
0425b46a 7610 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7611 return SUCCESS;
7612}
7613
7614/**
7615 * s2io_link - stops/starts the Tx queue.
7616 * @sp : private member of the device structure, which is a pointer to the
7617 * s2io_nic structure.
7618 * @link : inidicates whether link is UP/DOWN.
7619 * Description:
7620 * This function stops/starts the Tx queue depending on whether the link
20346722
K
7621 * status of the NIC is is down or up. This is called by the Alarm
7622 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7623 * Return value:
7624 * void.
7625 */
7626
d44570e4 7627static void s2io_link(struct s2io_nic *sp, int link)
1da177e4 7628{
d44570e4 7629 struct net_device *dev = (struct net_device *)sp->dev;
ffb5df6c 7630 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7631
7632 if (link != sp->last_link_state) {
b7c5678f 7633 init_tti(sp, link);
1da177e4
LT
7634 if (link == LINK_DOWN) {
7635 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7636 s2io_stop_all_tx_queue(sp);
1da177e4 7637 netif_carrier_off(dev);
ffb5df6c
JP
7638 if (swstats->link_up_cnt)
7639 swstats->link_up_time =
7640 jiffies - sp->start_time;
7641 swstats->link_down_cnt++;
1da177e4
LT
7642 } else {
7643 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
ffb5df6c
JP
7644 if (swstats->link_down_cnt)
7645 swstats->link_down_time =
d44570e4 7646 jiffies - sp->start_time;
ffb5df6c 7647 swstats->link_up_cnt++;
1da177e4 7648 netif_carrier_on(dev);
3a3d5756 7649 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7650 }
7651 }
7652 sp->last_link_state = link;
491976b2 7653 sp->start_time = jiffies;
1da177e4
LT
7654}
7655
20346722
K
7656/**
7657 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7658 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7659 * s2io_nic structure.
7660 * Description:
7661 * This function initializes a few of the PCI and PCI-X configuration registers
7662 * with recommended values.
7663 * Return value:
7664 * void
7665 */
7666
d44570e4 7667static void s2io_init_pci(struct s2io_nic *sp)
1da177e4 7668{
20346722 7669 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7670
7671 /* Enable Data Parity Error Recovery in PCI-X command register. */
7672 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7673 &(pcix_cmd));
1da177e4 7674 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7675 (pcix_cmd | 1));
1da177e4 7676 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7677 &(pcix_cmd));
1da177e4
LT
7678
7679 /* Set the PErr Response bit in PCI command register. */
7680 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7681 pci_write_config_word(sp->pdev, PCI_COMMAND,
7682 (pci_cmd | PCI_COMMAND_PARITY));
7683 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7684}
7685
3a3d5756 7686static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
d44570e4 7687 u8 *dev_multiq)
9dc737a7 7688{
1853e2e1
JM
7689 int i;
7690
d44570e4 7691 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
9e39f7c5 7692 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
d44570e4 7693 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7694
7695 if (tx_fifo_num < 1)
7696 tx_fifo_num = 1;
7697 else
7698 tx_fifo_num = MAX_TX_FIFOS;
7699
9e39f7c5 7700 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
9dc737a7 7701 }
2fda096d 7702
6cfc482b 7703 if (multiq)
3a3d5756 7704 *dev_multiq = multiq;
6cfc482b
SH
7705
7706 if (tx_steering_type && (1 == tx_fifo_num)) {
7707 if (tx_steering_type != TX_DEFAULT_STEERING)
7708 DBG_PRINT(ERR_DBG,
9e39f7c5 7709 "Tx steering is not supported with "
d44570e4 7710 "one fifo. Disabling Tx steering.\n");
6cfc482b
SH
7711 tx_steering_type = NO_STEERING;
7712 }
7713
7714 if ((tx_steering_type < NO_STEERING) ||
d44570e4
JP
7715 (tx_steering_type > TX_DEFAULT_STEERING)) {
7716 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7717 "Requested transmit steering not supported\n");
7718 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
6cfc482b 7719 tx_steering_type = NO_STEERING;
3a3d5756
SH
7720 }
7721
0425b46a 7722 if (rx_ring_num > MAX_RX_RINGS) {
d44570e4 7723 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7724 "Requested number of rx rings not supported\n");
7725 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
d44570e4 7726 MAX_RX_RINGS);
0425b46a 7727 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7728 }
0425b46a 7729
eccb8628 7730 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9e39f7c5 7731 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
9dc737a7
AR
7732 "Defaulting to INTA\n");
7733 *dev_intr_type = INTA;
7734 }
596c5c97 7735
9dc737a7 7736 if ((*dev_intr_type == MSI_X) &&
d44570e4
JP
7737 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7738 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
9e39f7c5 7739 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
d44570e4 7740 "Defaulting to INTA\n");
9dc737a7
AR
7741 *dev_intr_type = INTA;
7742 }
fb6a825b 7743
6d517a27 7744 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9e39f7c5
JP
7745 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7746 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
6d517a27 7747 rx_ring_mode = 1;
9dc737a7 7748 }
1853e2e1
JM
7749
7750 for (i = 0; i < MAX_RX_RINGS; i++)
7751 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7752 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7753 "supported\nDefaulting to %d\n",
7754 MAX_RX_BLOCKS_PER_RING);
7755 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7756 }
7757
9dc737a7
AR
7758 return SUCCESS;
7759}
7760
9fc93a41
SS
7761/**
7762 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7763 * or Traffic class respectively.
b7c5678f 7764 * @nic: device private variable
9fc93a41
SS
7765 * Description: The function configures the receive steering to
7766 * desired receive ring.
7767 * Return Value: SUCCESS on success and
7768 * '-1' on failure (endian settings incorrect).
7769 */
7770static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7771{
7772 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7773 register u64 val64 = 0;
7774
7775 if (ds_codepoint > 63)
7776 return FAILURE;
7777
7778 val64 = RTS_DS_MEM_DATA(ring);
7779 writeq(val64, &bar0->rts_ds_mem_data);
7780
7781 val64 = RTS_DS_MEM_CTRL_WE |
7782 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7783 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7784
7785 writeq(val64, &bar0->rts_ds_mem_ctrl);
7786
7787 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
d44570e4
JP
7788 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7789 S2IO_BIT_RESET);
9fc93a41
SS
7790}
7791
04025095
SH
7792static const struct net_device_ops s2io_netdev_ops = {
7793 .ndo_open = s2io_open,
7794 .ndo_stop = s2io_close,
7795 .ndo_get_stats = s2io_get_stats,
7796 .ndo_start_xmit = s2io_xmit,
7797 .ndo_validate_addr = eth_validate_addr,
7798 .ndo_set_multicast_list = s2io_set_multicast,
7799 .ndo_do_ioctl = s2io_ioctl,
7800 .ndo_set_mac_address = s2io_set_mac_addr,
7801 .ndo_change_mtu = s2io_change_mtu,
7802 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7803 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7804 .ndo_tx_timeout = s2io_tx_watchdog,
7805#ifdef CONFIG_NET_POLL_CONTROLLER
7806 .ndo_poll_controller = s2io_netpoll,
7807#endif
7808};
7809
1da177e4 7810/**
20346722 7811 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7812 * @pdev : structure containing the PCI related information of the device.
7813 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7814 * Description:
7815 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
7816 * All OS related initialization including memory and device structure and
7817 * initlaization of the device private variable is done. Also the swapper
7818 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7819 * registers of the device.
7820 * Return value:
7821 * returns 0 on success and negative on failure.
7822 */
7823
7824static int __devinit
7825s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7826{
1ee6dd77 7827 struct s2io_nic *sp;
1da177e4 7828 struct net_device *dev;
1da177e4 7829 int i, j, ret;
f957bcf0 7830 int dma_flag = false;
1da177e4
LT
7831 u32 mac_up, mac_down;
7832 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7833 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7834 u16 subid;
1da177e4 7835 struct config_param *config;
ffb5df6c 7836 struct mac_info *mac_control;
541ae68f 7837 int mode;
cc6e7c44 7838 u8 dev_intr_type = intr_type;
3a3d5756 7839 u8 dev_multiq = 0;
1da177e4 7840
3a3d5756
SH
7841 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7842 if (ret)
9dc737a7 7843 return ret;
1da177e4 7844
d44570e4
JP
7845 ret = pci_enable_device(pdev);
7846 if (ret) {
1da177e4 7847 DBG_PRINT(ERR_DBG,
9e39f7c5 7848 "%s: pci_enable_device failed\n", __func__);
1da177e4
LT
7849 return ret;
7850 }
7851
6a35528a 7852 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
9e39f7c5 7853 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
f957bcf0 7854 dma_flag = true;
d44570e4 7855 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1da177e4 7856 DBG_PRINT(ERR_DBG,
d44570e4
JP
7857 "Unable to obtain 64bit DMA "
7858 "for consistent allocations\n");
1da177e4
LT
7859 pci_disable_device(pdev);
7860 return -ENOMEM;
7861 }
284901a9 7862 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
9e39f7c5 7863 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
1da177e4
LT
7864 } else {
7865 pci_disable_device(pdev);
7866 return -ENOMEM;
7867 }
d44570e4
JP
7868 ret = pci_request_regions(pdev, s2io_driver_name);
7869 if (ret) {
9e39f7c5 7870 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
d44570e4 7871 __func__, ret);
eccb8628
VP
7872 pci_disable_device(pdev);
7873 return -ENODEV;
1da177e4 7874 }
3a3d5756 7875 if (dev_multiq)
6cfc482b 7876 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7877 else
b19fa1fa 7878 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
7879 if (dev == NULL) {
7880 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7881 pci_disable_device(pdev);
7882 pci_release_regions(pdev);
7883 return -ENODEV;
7884 }
7885
7886 pci_set_master(pdev);
7887 pci_set_drvdata(pdev, dev);
1da177e4
LT
7888 SET_NETDEV_DEV(dev, &pdev->dev);
7889
7890 /* Private member variable initialized to s2io NIC structure */
4cf1653a 7891 sp = netdev_priv(dev);
1da177e4
LT
7892 sp->dev = dev;
7893 sp->pdev = pdev;
1da177e4 7894 sp->high_dma_flag = dma_flag;
f957bcf0 7895 sp->device_enabled_once = false;
da6971d8
AR
7896 if (rx_ring_mode == 1)
7897 sp->rxd_mode = RXD_MODE_1;
7898 if (rx_ring_mode == 2)
7899 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7900
eaae7f72 7901 sp->config.intr_type = dev_intr_type;
1da177e4 7902
541ae68f 7903 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
d44570e4 7904 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
541ae68f
K
7905 sp->device_type = XFRAME_II_DEVICE;
7906 else
7907 sp->device_type = XFRAME_I_DEVICE;
7908
6aa20a22 7909
1da177e4
LT
7910 /* Initialize some PCI/PCI-X fields of the NIC. */
7911 s2io_init_pci(sp);
7912
20346722 7913 /*
1da177e4 7914 * Setting the device configuration parameters.
20346722
K
7915 * Most of these parameters can be specified by the user during
7916 * module insertion as they are module loadable parameters. If
7917 * these parameters are not not specified during load time, they
1da177e4
LT
7918 * are initialized with default values.
7919 */
1da177e4 7920 config = &sp->config;
ffb5df6c 7921 mac_control = &sp->mac_control;
1da177e4 7922
596c5c97 7923 config->napi = napi;
6cfc482b 7924 config->tx_steering_type = tx_steering_type;
596c5c97 7925
1da177e4 7926 /* Tx side parameters. */
6cfc482b
SH
7927 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7928 config->tx_fifo_num = MAX_TX_FIFOS;
7929 else
7930 config->tx_fifo_num = tx_fifo_num;
7931
7932 /* Initialize the fifos used for tx steering */
7933 if (config->tx_fifo_num < 5) {
d44570e4
JP
7934 if (config->tx_fifo_num == 1)
7935 sp->total_tcp_fifos = 1;
7936 else
7937 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7938 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7939 sp->total_udp_fifos = 1;
7940 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
6cfc482b
SH
7941 } else {
7942 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
d44570e4 7943 FIFO_OTHER_MAX_NUM);
6cfc482b
SH
7944 sp->udp_fifo_idx = sp->total_tcp_fifos;
7945 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7946 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7947 }
7948
3a3d5756 7949 config->multiq = dev_multiq;
6cfc482b 7950 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7951 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7952
7953 tx_cfg->fifo_len = tx_fifo_len[i];
7954 tx_cfg->fifo_priority = i;
1da177e4
LT
7955 }
7956
20346722
K
7957 /* mapping the QoS priority to the configured fifos */
7958 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7959 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7960
6cfc482b
SH
7961 /* map the hashing selector table to the configured fifos */
7962 for (i = 0; i < config->tx_fifo_num; i++)
7963 sp->fifo_selector[i] = fifo_selector[i];
7964
7965
1da177e4
LT
7966 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7967 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7968 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7969
7970 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7971 if (tx_cfg->fifo_len < 65) {
1da177e4
LT
7972 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7973 break;
7974 }
7975 }
fed5eccd
AR
7976 /* + 2 because one Txd for skb->data and one Txd for UFO */
7977 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7978
7979 /* Rx side parameters. */
1da177e4 7980 config->rx_ring_num = rx_ring_num;
0425b46a 7981 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7982 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7983 struct ring_info *ring = &mac_control->rings[i];
7984
7985 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7986 rx_cfg->ring_priority = i;
7987 ring->rx_bufs_left = 0;
7988 ring->rxd_mode = sp->rxd_mode;
7989 ring->rxd_count = rxd_count[sp->rxd_mode];
7990 ring->pdev = sp->pdev;
7991 ring->dev = sp->dev;
1da177e4
LT
7992 }
7993
7994 for (i = 0; i < rx_ring_num; i++) {
13d866a9
JP
7995 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7996
7997 rx_cfg->ring_org = RING_ORG_BUFF1;
7998 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
1da177e4
LT
7999 }
8000
8001 /* Setting Mac Control parameters */
8002 mac_control->rmac_pause_time = rmac_pause_time;
8003 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
8004 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
8005
8006
1da177e4
LT
8007 /* initialize the shared memory used by the NIC and the host */
8008 if (init_shared_mem(sp)) {
d44570e4 8009 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
1da177e4
LT
8010 ret = -ENOMEM;
8011 goto mem_alloc_failed;
8012 }
8013
275f165f 8014 sp->bar0 = pci_ioremap_bar(pdev, 0);
1da177e4 8015 if (!sp->bar0) {
19a60522 8016 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
8017 dev->name);
8018 ret = -ENOMEM;
8019 goto bar0_remap_failed;
8020 }
8021
275f165f 8022 sp->bar1 = pci_ioremap_bar(pdev, 2);
1da177e4 8023 if (!sp->bar1) {
19a60522 8024 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
8025 dev->name);
8026 ret = -ENOMEM;
8027 goto bar1_remap_failed;
8028 }
8029
8030 dev->irq = pdev->irq;
d44570e4 8031 dev->base_addr = (unsigned long)sp->bar0;
1da177e4
LT
8032
8033 /* Initializing the BAR1 address as the start of the FIFO pointer. */
8034 for (j = 0; j < MAX_TX_FIFOS; j++) {
d44570e4
JP
8035 mac_control->tx_FIFO_start[j] =
8036 (struct TxFIFO_element __iomem *)
8037 (sp->bar1 + (j * 0x00020000));
1da177e4
LT
8038 }
8039
8040 /* Driver entry points */
04025095 8041 dev->netdev_ops = &s2io_netdev_ops;
1da177e4 8042 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02 8043 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
f0c54ace 8044 dev->features |= NETIF_F_LRO;
1da177e4 8045 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
f957bcf0 8046 if (sp->high_dma_flag == true)
1da177e4 8047 dev->features |= NETIF_F_HIGHDMA;
1da177e4 8048 dev->features |= NETIF_F_TSO;
f83ef8c0 8049 dev->features |= NETIF_F_TSO6;
db874e65 8050 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
8051 dev->features |= NETIF_F_UFO;
8052 dev->features |= NETIF_F_HW_CSUM;
8053 }
1da177e4 8054 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
8055 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8056 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 8057
e960fc5c 8058 pci_save_state(sp->pdev);
1da177e4
LT
8059
8060 /* Setting swapper control on the NIC, for proper reset operation */
8061 if (s2io_set_swapper(sp)) {
9e39f7c5 8062 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
1da177e4
LT
8063 dev->name);
8064 ret = -EAGAIN;
8065 goto set_swap_failed;
8066 }
8067
541ae68f
K
8068 /* Verify if the Herc works on the slot its placed into */
8069 if (sp->device_type & XFRAME_II_DEVICE) {
8070 mode = s2io_verify_pci_mode(sp);
8071 if (mode < 0) {
9e39f7c5
JP
8072 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
8073 __func__);
541ae68f
K
8074 ret = -EBADSLT;
8075 goto set_swap_failed;
8076 }
8077 }
8078
f61e0a35
SH
8079 if (sp->config.intr_type == MSI_X) {
8080 sp->num_entries = config->rx_ring_num + 1;
8081 ret = s2io_enable_msi_x(sp);
8082
8083 if (!ret) {
8084 ret = s2io_test_msi(sp);
8085 /* rollback MSI-X, will re-enable during add_isr() */
8086 remove_msix_isr(sp);
8087 }
8088 if (ret) {
8089
8090 DBG_PRINT(ERR_DBG,
9e39f7c5 8091 "MSI-X requested but failed to enable\n");
f61e0a35
SH
8092 sp->config.intr_type = INTA;
8093 }
8094 }
8095
8096 if (config->intr_type == MSI_X) {
13d866a9
JP
8097 for (i = 0; i < config->rx_ring_num ; i++) {
8098 struct ring_info *ring = &mac_control->rings[i];
8099
8100 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8101 }
f61e0a35
SH
8102 } else {
8103 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8104 }
8105
541ae68f
K
8106 /* Not needed for Herc */
8107 if (sp->device_type & XFRAME_I_DEVICE) {
8108 /*
8109 * Fix for all "FFs" MAC address problems observed on
8110 * Alpha platforms
8111 */
8112 fix_mac_address(sp);
8113 s2io_reset(sp);
8114 }
1da177e4
LT
8115
8116 /*
1da177e4
LT
8117 * MAC address initialization.
8118 * For now only one mac address will be read and used.
8119 */
8120 bar0 = sp->bar0;
8121 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
d44570e4 8122 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 8123 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 8124 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
8125 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8126 S2IO_BIT_RESET);
1da177e4 8127 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4 8128 mac_down = (u32)tmp64;
1da177e4
LT
8129 mac_up = (u32) (tmp64 >> 32);
8130
1da177e4
LT
8131 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8132 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8133 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8134 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8135 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8136 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8137
1da177e4
LT
8138 /* Set the factory defined MAC address initially */
8139 dev->addr_len = ETH_ALEN;
8140 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
2fd37688 8141 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
1da177e4 8142
faa4f796
SH
8143 /* initialize number of multicast & unicast MAC entries variables */
8144 if (sp->device_type == XFRAME_I_DEVICE) {
8145 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8146 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8147 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8148 } else if (sp->device_type == XFRAME_II_DEVICE) {
8149 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8150 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8151 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8152 }
8153
8154 /* store mac addresses from CAM to s2io_nic structure */
8155 do_s2io_store_unicast_mc(sp);
8156
f61e0a35
SH
8157 /* Configure MSIX vector for number of rings configured plus one */
8158 if ((sp->device_type == XFRAME_II_DEVICE) &&
d44570e4 8159 (config->intr_type == MSI_X))
f61e0a35
SH
8160 sp->num_entries = config->rx_ring_num + 1;
8161
d44570e4 8162 /* Store the values of the MSIX table in the s2io_nic structure */
c77dd43e 8163 store_xmsi_data(sp);
b41477f3
AR
8164 /* reset Nic and bring it to known state */
8165 s2io_reset(sp);
8166
1da177e4 8167 /*
99993af6 8168 * Initialize link state flags
541ae68f 8169 * and the card state parameter
1da177e4 8170 */
92b84437 8171 sp->state = 0;
1da177e4 8172
1da177e4 8173 /* Initialize spinlocks */
13d866a9
JP
8174 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8175 struct fifo_info *fifo = &mac_control->fifos[i];
8176
8177 spin_lock_init(&fifo->tx_lock);
8178 }
db874e65 8179
20346722
K
8180 /*
8181 * SXE-002: Configure link and activity LED to init state
8182 * on driver load.
1da177e4
LT
8183 */
8184 subid = sp->pdev->subsystem_device;
8185 if ((subid & 0xFF) >= 0x07) {
8186 val64 = readq(&bar0->gpio_control);
8187 val64 |= 0x0000800000000000ULL;
8188 writeq(val64, &bar0->gpio_control);
8189 val64 = 0x0411040400000000ULL;
d44570e4 8190 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
8191 val64 = readq(&bar0->gpio_control);
8192 }
8193
8194 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8195
8196 if (register_netdev(dev)) {
8197 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8198 ret = -ENODEV;
8199 goto register_failed;
8200 }
9dc737a7 8201 s2io_vpd_read(sp);
926bd900 8202 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
d44570e4 8203 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
44c10138 8204 sp->product_name, pdev->revision);
b41477f3
AR
8205 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8206 s2io_driver_version);
9e39f7c5
JP
8207 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8208 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
9dc737a7 8209 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8210 mode = s2io_print_pci_mode(sp);
541ae68f 8211 if (mode < 0) {
541ae68f 8212 ret = -EBADSLT;
9dc737a7 8213 unregister_netdev(dev);
541ae68f
K
8214 goto set_swap_failed;
8215 }
541ae68f 8216 }
d44570e4
JP
8217 switch (sp->rxd_mode) {
8218 case RXD_MODE_1:
8219 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8220 dev->name);
8221 break;
8222 case RXD_MODE_3B:
8223 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8224 dev->name);
8225 break;
9dc737a7 8226 }
db874e65 8227
f61e0a35
SH
8228 switch (sp->config.napi) {
8229 case 0:
8230 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8231 break;
8232 case 1:
db874e65 8233 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8234 break;
8235 }
3a3d5756
SH
8236
8237 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
d44570e4 8238 sp->config.tx_fifo_num);
3a3d5756 8239
0425b46a
SH
8240 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8241 sp->config.rx_ring_num);
8242
d44570e4
JP
8243 switch (sp->config.intr_type) {
8244 case INTA:
8245 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8246 break;
8247 case MSI_X:
8248 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8249 break;
9dc737a7 8250 }
3a3d5756 8251 if (sp->config.multiq) {
13d866a9
JP
8252 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8253 struct fifo_info *fifo = &mac_control->fifos[i];
8254
8255 fifo->multiq = config->multiq;
8256 }
3a3d5756 8257 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
d44570e4 8258 dev->name);
3a3d5756
SH
8259 } else
8260 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
d44570e4 8261 dev->name);
3a3d5756 8262
6cfc482b
SH
8263 switch (sp->config.tx_steering_type) {
8264 case NO_STEERING:
d44570e4
JP
8265 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8266 dev->name);
8267 break;
6cfc482b 8268 case TX_PRIORITY_STEERING:
d44570e4
JP
8269 DBG_PRINT(ERR_DBG,
8270 "%s: Priority steering enabled for transmit\n",
8271 dev->name);
6cfc482b
SH
8272 break;
8273 case TX_DEFAULT_STEERING:
d44570e4
JP
8274 DBG_PRINT(ERR_DBG,
8275 "%s: Default steering enabled for transmit\n",
8276 dev->name);
6cfc482b
SH
8277 }
8278
f0c54ace
AW
8279 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8280 dev->name);
db874e65 8281 if (ufo)
d44570e4
JP
8282 DBG_PRINT(ERR_DBG,
8283 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8284 dev->name);
7ba013ac 8285 /* Initialize device name */
9dc737a7 8286 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 8287
cd0fce03
BL
8288 if (vlan_tag_strip)
8289 sp->vlan_strip_flag = 1;
8290 else
8291 sp->vlan_strip_flag = 0;
8292
20346722
K
8293 /*
8294 * Make Link state as off at this point, when the Link change
8295 * interrupt comes the state will be automatically changed to
1da177e4
LT
8296 * the right state.
8297 */
8298 netif_carrier_off(dev);
1da177e4
LT
8299
8300 return 0;
8301
d44570e4
JP
8302register_failed:
8303set_swap_failed:
1da177e4 8304 iounmap(sp->bar1);
d44570e4 8305bar1_remap_failed:
1da177e4 8306 iounmap(sp->bar0);
d44570e4
JP
8307bar0_remap_failed:
8308mem_alloc_failed:
1da177e4
LT
8309 free_shared_mem(sp);
8310 pci_disable_device(pdev);
eccb8628 8311 pci_release_regions(pdev);
1da177e4
LT
8312 pci_set_drvdata(pdev, NULL);
8313 free_netdev(dev);
8314
8315 return ret;
8316}
8317
8318/**
20346722 8319 * s2io_rem_nic - Free the PCI device
1da177e4 8320 * @pdev: structure containing the PCI related information of the device.
20346722 8321 * Description: This function is called by the Pci subsystem to release a
1da177e4 8322 * PCI device and free up all resource held up by the device. This could
20346722 8323 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8324 * from memory.
8325 */
8326
8327static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8328{
a31ff388 8329 struct net_device *dev = pci_get_drvdata(pdev);
1ee6dd77 8330 struct s2io_nic *sp;
1da177e4
LT
8331
8332 if (dev == NULL) {
8333 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8334 return;
8335 }
8336
4cf1653a 8337 sp = netdev_priv(dev);
23f333a2
TH
8338
8339 cancel_work_sync(&sp->rst_timer_task);
8340 cancel_work_sync(&sp->set_link_task);
8341
1da177e4
LT
8342 unregister_netdev(dev);
8343
8344 free_shared_mem(sp);
8345 iounmap(sp->bar0);
8346 iounmap(sp->bar1);
eccb8628 8347 pci_release_regions(pdev);
1da177e4 8348 pci_set_drvdata(pdev, NULL);
1da177e4 8349 free_netdev(dev);
19a60522 8350 pci_disable_device(pdev);
1da177e4
LT
8351}
8352
8353/**
8354 * s2io_starter - Entry point for the driver
8355 * Description: This function is the entry point for the driver. It verifies
8356 * the module loadable parameters and initializes PCI configuration space.
8357 */
8358
43b7c451 8359static int __init s2io_starter(void)
1da177e4 8360{
29917620 8361 return pci_register_driver(&s2io_driver);
1da177e4
LT
8362}
8363
8364/**
20346722 8365 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
8366 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8367 */
8368
372cc597 8369static __exit void s2io_closer(void)
1da177e4
LT
8370{
8371 pci_unregister_driver(&s2io_driver);
8372 DBG_PRINT(INIT_DBG, "cleanup done\n");
8373}
8374
8375module_init(s2io_starter);
8376module_exit(s2io_closer);
7d3d0439 8377
6aa20a22 8378static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
d44570e4
JP
8379 struct tcphdr **tcp, struct RxD_t *rxdp,
8380 struct s2io_nic *sp)
7d3d0439
RA
8381{
8382 int ip_off;
8383 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8384
8385 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
d44570e4
JP
8386 DBG_PRINT(INIT_DBG,
8387 "%s: Non-TCP frames not supported for LRO\n",
b39d66a8 8388 __func__);
7d3d0439
RA
8389 return -1;
8390 }
8391
cdb5bf02 8392 /* Checking for DIX type or DIX type with VLAN */
d44570e4 8393 if ((l2_type == 0) || (l2_type == 4)) {
cdb5bf02
SH
8394 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8395 /*
8396 * If vlan stripping is disabled and the frame is VLAN tagged,
8397 * shift the offset by the VLAN header size bytes.
8398 */
cd0fce03 8399 if ((!sp->vlan_strip_flag) &&
d44570e4 8400 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
cdb5bf02
SH
8401 ip_off += HEADER_VLAN_SIZE;
8402 } else {
7d3d0439 8403 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8404 return -1;
7d3d0439
RA
8405 }
8406
8407 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8408 ip_len = (u8)((*ip)->ihl);
8409 ip_len <<= 2;
8410 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8411
8412 return 0;
8413}
8414
1ee6dd77 8415static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8416 struct tcphdr *tcp)
8417{
d44570e4
JP
8418 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8419 if ((lro->iph->saddr != ip->saddr) ||
8420 (lro->iph->daddr != ip->daddr) ||
8421 (lro->tcph->source != tcp->source) ||
8422 (lro->tcph->dest != tcp->dest))
7d3d0439
RA
8423 return -1;
8424 return 0;
8425}
8426
8427static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8428{
d44570e4 8429 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
7d3d0439
RA
8430}
8431
1ee6dd77 8432static void initiate_new_session(struct lro *lro, u8 *l2h,
d44570e4
JP
8433 struct iphdr *ip, struct tcphdr *tcp,
8434 u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439 8435{
d44570e4 8436 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8437 lro->l2h = l2h;
8438 lro->iph = ip;
8439 lro->tcph = tcp;
8440 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8441 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8442 lro->sg_num = 1;
8443 lro->total_len = ntohs(ip->tot_len);
8444 lro->frags_len = 0;
cdb5bf02 8445 lro->vlan_tag = vlan_tag;
6aa20a22 8446 /*
d44570e4
JP
8447 * Check if we saw TCP timestamp.
8448 * Other consistency checks have already been done.
8449 */
7d3d0439 8450 if (tcp->doff == 8) {
c8855953
SR
8451 __be32 *ptr;
8452 ptr = (__be32 *)(tcp+1);
7d3d0439 8453 lro->saw_ts = 1;
c8855953 8454 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8455 lro->cur_tsecr = *(ptr+2);
8456 }
8457 lro->in_use = 1;
8458}
8459
1ee6dd77 8460static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8461{
8462 struct iphdr *ip = lro->iph;
8463 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 8464 __sum16 nchk;
ffb5df6c
JP
8465 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8466
d44570e4 8467 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8468
8469 /* Update L3 header */
8470 ip->tot_len = htons(lro->total_len);
8471 ip->check = 0;
8472 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8473 ip->check = nchk;
8474
8475 /* Update L4 header */
8476 tcp->ack_seq = lro->tcp_ack;
8477 tcp->window = lro->window;
8478
8479 /* Update tsecr field if this session has timestamps enabled */
8480 if (lro->saw_ts) {
c8855953 8481 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8482 *(ptr+2) = lro->cur_tsecr;
8483 }
8484
8485 /* Update counters required for calculation of
8486 * average no. of packets aggregated.
8487 */
ffb5df6c
JP
8488 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8489 swstats->num_aggregations++;
7d3d0439
RA
8490}
8491
1ee6dd77 8492static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
d44570e4 8493 struct tcphdr *tcp, u32 l4_pyld)
7d3d0439 8494{
d44570e4 8495 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8496 lro->total_len += l4_pyld;
8497 lro->frags_len += l4_pyld;
8498 lro->tcp_next_seq += l4_pyld;
8499 lro->sg_num++;
8500
8501 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8502 lro->tcp_ack = tcp->ack_seq;
8503 lro->window = tcp->window;
6aa20a22 8504
7d3d0439 8505 if (lro->saw_ts) {
c8855953 8506 __be32 *ptr;
7d3d0439 8507 /* Update tsecr and tsval from this packet */
c8855953
SR
8508 ptr = (__be32 *)(tcp+1);
8509 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8510 lro->cur_tsecr = *(ptr + 2);
8511 }
8512}
8513
1ee6dd77 8514static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8515 struct tcphdr *tcp, u32 tcp_pyld_len)
8516{
7d3d0439
RA
8517 u8 *ptr;
8518
d44570e4 8519 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
79dc1901 8520
7d3d0439
RA
8521 if (!tcp_pyld_len) {
8522 /* Runt frame or a pure ack */
8523 return -1;
8524 }
8525
8526 if (ip->ihl != 5) /* IP has options */
8527 return -1;
8528
75c30b13
AR
8529 /* If we see CE codepoint in IP header, packet is not mergeable */
8530 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8531 return -1;
8532
8533 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
d44570e4
JP
8534 if (tcp->urg || tcp->psh || tcp->rst ||
8535 tcp->syn || tcp->fin ||
8536 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8537 /*
8538 * Currently recognize only the ack control word and
8539 * any other control field being set would result in
8540 * flushing the LRO session
8541 */
8542 return -1;
8543 }
8544
6aa20a22 8545 /*
7d3d0439
RA
8546 * Allow only one TCP timestamp option. Don't aggregate if
8547 * any other options are detected.
8548 */
8549 if (tcp->doff != 5 && tcp->doff != 8)
8550 return -1;
8551
8552 if (tcp->doff == 8) {
6aa20a22 8553 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8554 while (*ptr == TCPOPT_NOP)
8555 ptr++;
8556 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8557 return -1;
8558
8559 /* Ensure timestamp value increases monotonically */
8560 if (l_lro)
c8855953 8561 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8562 return -1;
8563
8564 /* timestamp echo reply should be non-zero */
c8855953 8565 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8566 return -1;
8567 }
8568
8569 return 0;
8570}
8571
d44570e4
JP
8572static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8573 u8 **tcp, u32 *tcp_len, struct lro **lro,
8574 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
8575{
8576 struct iphdr *ip;
8577 struct tcphdr *tcph;
8578 int ret = 0, i;
cdb5bf02 8579 u16 vlan_tag = 0;
ffb5df6c 8580 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439 8581
d44570e4
JP
8582 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8583 rxdp, sp);
8584 if (ret)
7d3d0439 8585 return ret;
7d3d0439 8586
d44570e4
JP
8587 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8588
cdb5bf02 8589 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8590 tcph = (struct tcphdr *)*tcp;
8591 *tcp_len = get_l4_pyld_length(ip, tcph);
d44570e4 8592 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8593 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8594 if (l_lro->in_use) {
8595 if (check_for_socket_match(l_lro, ip, tcph))
8596 continue;
8597 /* Sock pair matched */
8598 *lro = l_lro;
8599
8600 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
9e39f7c5
JP
8601 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8602 "expected 0x%x, actual 0x%x\n",
8603 __func__,
7d3d0439
RA
8604 (*lro)->tcp_next_seq,
8605 ntohl(tcph->seq));
8606
ffb5df6c 8607 swstats->outof_sequence_pkts++;
7d3d0439
RA
8608 ret = 2;
8609 break;
8610 }
8611
d44570e4
JP
8612 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8613 *tcp_len))
7d3d0439
RA
8614 ret = 1; /* Aggregate */
8615 else
8616 ret = 2; /* Flush both */
8617 break;
8618 }
8619 }
8620
8621 if (ret == 0) {
8622 /* Before searching for available LRO objects,
8623 * check if the pkt is L3/L4 aggregatable. If not
8624 * don't create new LRO session. Just send this
8625 * packet up.
8626 */
d44570e4 8627 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
7d3d0439 8628 return 5;
7d3d0439 8629
d44570e4 8630 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8631 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8632 if (!(l_lro->in_use)) {
8633 *lro = l_lro;
8634 ret = 3; /* Begin anew */
8635 break;
8636 }
8637 }
8638 }
8639
8640 if (ret == 0) { /* sessions exceeded */
9e39f7c5 8641 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
b39d66a8 8642 __func__);
7d3d0439
RA
8643 *lro = NULL;
8644 return ret;
8645 }
8646
8647 switch (ret) {
d44570e4
JP
8648 case 3:
8649 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8650 vlan_tag);
8651 break;
8652 case 2:
8653 update_L3L4_header(sp, *lro);
8654 break;
8655 case 1:
8656 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8657 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7d3d0439 8658 update_L3L4_header(sp, *lro);
d44570e4
JP
8659 ret = 4; /* Flush the LRO */
8660 }
8661 break;
8662 default:
9e39f7c5 8663 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
d44570e4 8664 break;
7d3d0439
RA
8665 }
8666
8667 return ret;
8668}
8669
1ee6dd77 8670static void clear_lro_session(struct lro *lro)
7d3d0439 8671{
1ee6dd77 8672 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8673
8674 memset(lro, 0, lro_struct_size);
8675}
8676
cdb5bf02 8677static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8678{
8679 struct net_device *dev = skb->dev;
4cf1653a 8680 struct s2io_nic *sp = netdev_priv(dev);
7d3d0439
RA
8681
8682 skb->protocol = eth_type_trans(skb, dev);
d44570e4 8683 if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
cdb5bf02
SH
8684 /* Queueing the vlan frame to the upper layer */
8685 if (sp->config.napi)
8686 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8687 else
8688 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8689 } else {
8690 if (sp->config.napi)
8691 netif_receive_skb(skb);
8692 else
8693 netif_rx(skb);
8694 }
7d3d0439
RA
8695}
8696
1ee6dd77 8697static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
d44570e4 8698 struct sk_buff *skb, u32 tcp_len)
7d3d0439 8699{
75c30b13 8700 struct sk_buff *first = lro->parent;
ffb5df6c 8701 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439
RA
8702
8703 first->len += tcp_len;
8704 first->data_len = lro->frags_len;
8705 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8706 if (skb_shinfo(first)->frag_list)
8707 lro->last_frag->next = skb;
7d3d0439
RA
8708 else
8709 skb_shinfo(first)->frag_list = skb;
372cc597 8710 first->truesize += skb->truesize;
75c30b13 8711 lro->last_frag = skb;
ffb5df6c 8712 swstats->clubbed_frms_cnt++;
7d3d0439 8713}
d796fdb7
LV
8714
8715/**
8716 * s2io_io_error_detected - called when PCI error is detected
8717 * @pdev: Pointer to PCI device
8453d43f 8718 * @state: The current pci connection state
d796fdb7
LV
8719 *
8720 * This function is called after a PCI bus error affecting
8721 * this device has been detected.
8722 */
8723static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
d44570e4 8724 pci_channel_state_t state)
d796fdb7
LV
8725{
8726 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8727 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8728
8729 netif_device_detach(netdev);
8730
1e3c8bd6
DN
8731 if (state == pci_channel_io_perm_failure)
8732 return PCI_ERS_RESULT_DISCONNECT;
8733
d796fdb7
LV
8734 if (netif_running(netdev)) {
8735 /* Bring down the card, while avoiding PCI I/O */
8736 do_s2io_card_down(sp, 0);
d796fdb7
LV
8737 }
8738 pci_disable_device(pdev);
8739
8740 return PCI_ERS_RESULT_NEED_RESET;
8741}
8742
8743/**
8744 * s2io_io_slot_reset - called after the pci bus has been reset.
8745 * @pdev: Pointer to PCI device
8746 *
8747 * Restart the card from scratch, as if from a cold-boot.
8748 * At this point, the card has exprienced a hard reset,
8749 * followed by fixups by BIOS, and has its config space
8750 * set up identically to what it was at cold boot.
8751 */
8752static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8753{
8754 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8755 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8756
8757 if (pci_enable_device(pdev)) {
6cef2b8e 8758 pr_err("Cannot re-enable PCI device after reset.\n");
d796fdb7
LV
8759 return PCI_ERS_RESULT_DISCONNECT;
8760 }
8761
8762 pci_set_master(pdev);
8763 s2io_reset(sp);
8764
8765 return PCI_ERS_RESULT_RECOVERED;
8766}
8767
8768/**
8769 * s2io_io_resume - called when traffic can start flowing again.
8770 * @pdev: Pointer to PCI device
8771 *
8772 * This callback is called when the error recovery driver tells
8773 * us that its OK to resume normal operation.
8774 */
8775static void s2io_io_resume(struct pci_dev *pdev)
8776{
8777 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8778 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8779
8780 if (netif_running(netdev)) {
8781 if (s2io_card_up(sp)) {
6cef2b8e 8782 pr_err("Can't bring device back up after reset.\n");
d796fdb7
LV
8783 return;
8784 }
8785
8786 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8787 s2io_card_down(sp);
6cef2b8e 8788 pr_err("Can't restore mac addr after reset.\n");
d796fdb7
LV
8789 return;
8790 }
8791 }
8792
8793 netif_device_attach(netdev);
fd2ea0a7 8794 netif_tx_wake_all_queues(netdev);
d796fdb7 8795}