be2net: fix mbox polling for signal reception
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491 15#include <linux/dma-mapping.h>
af19b491
AKS
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
451724c8 20#include <linux/aer.h>
f94bc1e7 21#include <linux/log2.h>
af19b491 22
7f9a0c34 23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 31
f7ec804a 32static struct workqueue_struct *qlcnic_wq;
b5e5492c 33static int qlcnic_mac_learn;
b11a25aa 34module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
af19b491 37static int use_msi = 1;
b11a25aa 38module_param(use_msi, int, 0444);
af19b491
AKS
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41static int use_msi_x = 1;
b11a25aa 42module_param(use_msi_x, int, 0444);
af19b491
AKS
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
9ce13ca8 45static int auto_fw_reset = 1;
af19b491
AKS
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
4d5bdb38 49static int load_fw_file;
b11a25aa 50module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
2e9d722d 53static int qlcnic_config_npars;
b11a25aa 54module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
af19b491
AKS
57static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev);
af19b491 62static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
63static void qlcnic_attach_work(struct work_struct *work);
64static void qlcnic_fwinit_work(struct work_struct *work);
65static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
71#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev);
73#endif
74
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
6df900e9 80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
7eb9855d 84static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
85static irqreturn_t qlcnic_intr(int irq, void *data);
86static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 90static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
91static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
b5e5492c
AKS
93static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
94static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 95static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
96static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
97static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
98static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
99static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
100 struct qlcnic_esw_func_cfg *);
b9796a14
AC
101static void qlcnic_vlan_rx_add(struct net_device *, u16);
102static void qlcnic_vlan_rx_del(struct net_device *, u16);
103
af19b491
AKS
104/* PCI Device ID Table */
105#define ENTRY(device) \
106 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
107 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
108
109#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
110
6a902881 111static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
112 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
113 {0,}
114};
115
116MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
117
118
b1fc6d3c 119inline void
af19b491
AKS
120qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
121 struct qlcnic_host_tx_ring *tx_ring)
122{
123 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
124}
125
126static const u32 msi_tgt_status[8] = {
127 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
128 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
129 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
130 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
131};
132
133static const
134struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
135
136static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
137{
138 writel(0, sds_ring->crb_intr_mask);
139}
140
141static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
142{
143 struct qlcnic_adapter *adapter = sds_ring->adapter;
144
145 writel(0x1, sds_ring->crb_intr_mask);
146
147 if (!QLCNIC_IS_MSI_FAMILY(adapter))
148 writel(0xfbff, adapter->tgt_mask_reg);
149}
150
151static int
152qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
153{
154 int size = sizeof(struct qlcnic_host_sds_ring) * count;
155
156 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
157
807540ba 158 return recv_ctx->sds_rings == NULL;
af19b491
AKS
159}
160
161static void
162qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
163{
164 if (recv_ctx->sds_rings != NULL)
165 kfree(recv_ctx->sds_rings);
166
167 recv_ctx->sds_rings = NULL;
168}
169
170static int
171qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
172{
173 int ring;
174 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 175 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
176
177 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
178 return -ENOMEM;
179
180 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
181 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 182
183 if (ring == adapter->max_sds_rings - 1)
184 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
185 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
186 else
187 netif_napi_add(netdev, &sds_ring->napi,
188 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
189 }
190
191 return 0;
192}
193
194static void
195qlcnic_napi_del(struct qlcnic_adapter *adapter)
196{
197 int ring;
198 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 199 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
200
201 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
202 sds_ring = &recv_ctx->sds_rings[ring];
203 netif_napi_del(&sds_ring->napi);
204 }
205
b1fc6d3c 206 qlcnic_free_sds_rings(adapter->recv_ctx);
af19b491
AKS
207}
208
209static void
210qlcnic_napi_enable(struct qlcnic_adapter *adapter)
211{
212 int ring;
213 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 214 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 215
780ab790
AKS
216 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
217 return;
218
af19b491
AKS
219 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
220 sds_ring = &recv_ctx->sds_rings[ring];
221 napi_enable(&sds_ring->napi);
222 qlcnic_enable_int(sds_ring);
223 }
224}
225
226static void
227qlcnic_napi_disable(struct qlcnic_adapter *adapter)
228{
229 int ring;
230 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 231 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 232
780ab790
AKS
233 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
234 return;
235
af19b491
AKS
236 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
237 sds_ring = &recv_ctx->sds_rings[ring];
238 qlcnic_disable_int(sds_ring);
239 napi_synchronize(&sds_ring->napi);
240 napi_disable(&sds_ring->napi);
241 }
242}
243
244static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
245{
246 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
247}
248
af19b491
AKS
249static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
250{
251 u32 control;
252 int pos;
253
254 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
255 if (pos) {
256 pci_read_config_dword(pdev, pos, &control);
257 if (enable)
258 control |= PCI_MSIX_FLAGS_ENABLE;
259 else
260 control = 0;
261 pci_write_config_dword(pdev, pos, control);
262 }
263}
264
265static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
266{
267 int i;
268
269 for (i = 0; i < count; i++)
270 adapter->msix_entries[i].entry = i;
271}
272
273static int
274qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
275{
2e9d722d 276 u8 mac_addr[ETH_ALEN];
af19b491
AKS
277 struct net_device *netdev = adapter->netdev;
278 struct pci_dev *pdev = adapter->pdev;
279
da48e6c3 280 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
281 return -EIO;
282
2e9d722d 283 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
284 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
285 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
286
287 /* set station address */
288
289 if (!is_valid_ether_addr(netdev->perm_addr))
290 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
291 netdev->dev_addr);
292
293 return 0;
294}
295
296static int qlcnic_set_mac(struct net_device *netdev, void *p)
297{
298 struct qlcnic_adapter *adapter = netdev_priv(netdev);
299 struct sockaddr *addr = p;
300
7373373d
RB
301 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
302 return -EOPNOTSUPP;
303
af19b491
AKS
304 if (!is_valid_ether_addr(addr->sa_data))
305 return -EINVAL;
306
8a15ad1f 307 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
308 netif_device_detach(netdev);
309 qlcnic_napi_disable(adapter);
310 }
311
312 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
313 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
314 qlcnic_set_multi(adapter->netdev);
315
8a15ad1f 316 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
317 netif_device_attach(netdev);
318 qlcnic_napi_enable(adapter);
319 }
320 return 0;
321}
322
323static const struct net_device_ops qlcnic_netdev_ops = {
324 .ndo_open = qlcnic_open,
325 .ndo_stop = qlcnic_close,
326 .ndo_start_xmit = qlcnic_xmit_frame,
327 .ndo_get_stats = qlcnic_get_stats,
328 .ndo_validate_addr = eth_validate_addr,
329 .ndo_set_multicast_list = qlcnic_set_multi,
330 .ndo_set_mac_address = qlcnic_set_mac,
331 .ndo_change_mtu = qlcnic_change_mtu,
135d84a9
MM
332 .ndo_fix_features = qlcnic_fix_features,
333 .ndo_set_features = qlcnic_set_features,
af19b491 334 .ndo_tx_timeout = qlcnic_tx_timeout,
b9796a14
AC
335 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
336 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
af19b491
AKS
337#ifdef CONFIG_NET_POLL_CONTROLLER
338 .ndo_poll_controller = qlcnic_poll_controller,
339#endif
340};
341
2e9d722d 342static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
343 .config_bridged_mode = qlcnic_config_bridged_mode,
344 .config_led = qlcnic_config_led,
9f26f547
AC
345 .start_firmware = qlcnic_start_firmware
346};
347
348static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
349 .config_bridged_mode = qlcnicvf_config_bridged_mode,
350 .config_led = qlcnicvf_config_led,
9f26f547 351 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
352};
353
f94bc1e7 354static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
af19b491 355{
af19b491 356 struct pci_dev *pdev = adapter->pdev;
f94bc1e7 357 int err = -1;
af19b491
AKS
358
359 adapter->max_sds_rings = 1;
af19b491 360 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
af19b491
AKS
361 qlcnic_set_msix_bit(pdev, 0);
362
363 if (adapter->msix_supported) {
f94bc1e7 364 enable_msix:
af19b491
AKS
365 qlcnic_init_msix_entries(adapter, num_msix);
366 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
367 if (err == 0) {
368 adapter->flags |= QLCNIC_MSIX_ENABLED;
369 qlcnic_set_msix_bit(pdev, 1);
370
b1fc6d3c 371 adapter->max_sds_rings = num_msix;
af19b491
AKS
372
373 dev_info(&pdev->dev, "using msi-x interrupts\n");
f94bc1e7 374 return err;
af19b491 375 }
f94bc1e7
SC
376 if (err > 0) {
377 num_msix = rounddown_pow_of_two(err);
378 if (num_msix)
379 goto enable_msix;
380 }
381 }
382 return err;
383}
af19b491 384
af19b491 385
f94bc1e7
SC
386static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
387{
388 const struct qlcnic_legacy_intr_set *legacy_intrp;
389 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
390
391 if (use_msi && !pci_enable_msi(pdev)) {
392 adapter->flags |= QLCNIC_MSI_ENABLED;
393 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
b1fc6d3c 394 msi_tgt_status[adapter->ahw->pci_func]);
af19b491
AKS
395 dev_info(&pdev->dev, "using msi interrupts\n");
396 adapter->msix_entries[0].vector = pdev->irq;
397 return;
398 }
399
f94bc1e7
SC
400 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
401
402 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
403 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
404 legacy_intrp->tgt_status_reg);
405 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
406 legacy_intrp->tgt_mask_reg);
407 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
408
409 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
410 ISR_INT_STATE_REG);
af19b491
AKS
411 dev_info(&pdev->dev, "using legacy interrupts\n");
412 adapter->msix_entries[0].vector = pdev->irq;
413}
414
f94bc1e7
SC
415static void
416qlcnic_setup_intr(struct qlcnic_adapter *adapter)
417{
418 int num_msix;
419
420 if (adapter->msix_supported) {
421 num_msix = (num_online_cpus() >=
422 QLCNIC_DEF_NUM_STS_DESC_RINGS) ?
423 QLCNIC_DEF_NUM_STS_DESC_RINGS :
424 QLCNIC_MIN_NUM_RSS_RINGS;
425 } else
426 num_msix = 1;
427
428 if (!qlcnic_enable_msix(adapter, num_msix))
429 return;
430
431 qlcnic_enable_msi_legacy(adapter);
432}
433
af19b491
AKS
434static void
435qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
436{
437 if (adapter->flags & QLCNIC_MSIX_ENABLED)
438 pci_disable_msix(adapter->pdev);
439 if (adapter->flags & QLCNIC_MSI_ENABLED)
440 pci_disable_msi(adapter->pdev);
441}
442
443static void
444qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
445{
b1fc6d3c
AC
446 if (adapter->ahw->pci_base0 != NULL)
447 iounmap(adapter->ahw->pci_base0);
af19b491
AKS
448}
449
346fe763
RB
450static int
451qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
452{
e88db3bd 453 struct qlcnic_pci_info *pci_info;
900853a4 454 int i, ret = 0;
346fe763
RB
455 u8 pfn;
456
e88db3bd
DC
457 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
458 if (!pci_info)
459 return -ENOMEM;
460
ca315ac2 461 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 462 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 463 if (!adapter->npars) {
900853a4 464 ret = -ENOMEM;
e88db3bd
DC
465 goto err_pci_info;
466 }
346fe763 467
ca315ac2 468 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
469 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
470 if (!adapter->eswitch) {
900853a4 471 ret = -ENOMEM;
ca315ac2 472 goto err_npars;
346fe763
RB
473 }
474
475 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
476 if (ret)
477 goto err_eswitch;
346fe763 478
ca315ac2
DC
479 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
480 pfn = pci_info[i].id;
f848d6dd
SC
481 if (pfn > QLCNIC_MAX_PCI_FUNC) {
482 ret = QL_STATUS_INVALID_PARAM;
483 goto err_eswitch;
484 }
a1c0c459
SC
485 adapter->npars[pfn].active = (u8)pci_info[i].active;
486 adapter->npars[pfn].type = (u8)pci_info[i].type;
487 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
488 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
489 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
490 }
491
ca315ac2
DC
492 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
493 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
494
e88db3bd 495 kfree(pci_info);
ca315ac2
DC
496 return 0;
497
498err_eswitch:
346fe763
RB
499 kfree(adapter->eswitch);
500 adapter->eswitch = NULL;
ca315ac2 501err_npars:
346fe763 502 kfree(adapter->npars);
ca315ac2 503 adapter->npars = NULL;
e88db3bd
DC
504err_pci_info:
505 kfree(pci_info);
346fe763
RB
506
507 return ret;
508}
509
2e9d722d
AC
510static int
511qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
512{
513 u8 id;
514 u32 ref_count;
515 int i, ret = 1;
516 u32 data = QLCNIC_MGMT_FUNC;
b1fc6d3c 517 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d
AC
518
519 /* If other drivers are not in use set their privilege level */
31018e06 520 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
521 ret = qlcnic_api_lock(adapter);
522 if (ret)
523 goto err_lock;
2e9d722d 524
0e33c664
AC
525 if (qlcnic_config_npars) {
526 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 527 id = i;
0e33c664 528 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
b1fc6d3c 529 id == adapter->ahw->pci_func)
0e33c664
AC
530 continue;
531 data |= (qlcnic_config_npars &
532 QLC_DEV_SET_DRV(0xf, id));
533 }
534 } else {
535 data = readl(priv_op);
b1fc6d3c 536 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
0e33c664 537 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
b1fc6d3c 538 adapter->ahw->pci_func));
2e9d722d
AC
539 }
540 writel(data, priv_op);
2e9d722d
AC
541 qlcnic_api_unlock(adapter);
542err_lock:
543 return ret;
544}
545
0866d96d
AC
546static void
547qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
548{
549 void __iomem *msix_base_addr;
550 void __iomem *priv_op;
551 u32 func;
552 u32 msix_base;
553 u32 op_mode, priv_level;
554
555 /* Determine FW API version */
b1fc6d3c
AC
556 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
557 QLCNIC_FW_API);
2e9d722d
AC
558
559 /* Find PCI function number */
560 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
b1fc6d3c 561 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
2e9d722d
AC
562 msix_base = readl(msix_base_addr);
563 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
b1fc6d3c 564 adapter->ahw->pci_func = func;
2e9d722d
AC
565
566 /* Determine function privilege level */
b1fc6d3c 567 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d 568 op_mode = readl(priv_op);
0e33c664 569 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 570 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 571 else
b1fc6d3c 572 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
2e9d722d 573
0866d96d 574 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
575 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
576 dev_info(&adapter->pdev->dev,
577 "HAL Version: %d Non Privileged function\n",
578 adapter->fw_hal_version);
579 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
580 } else
581 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
582}
583
af19b491
AKS
584static int
585qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
586{
587 void __iomem *mem_ptr0 = NULL;
588 resource_size_t mem_base;
589 unsigned long mem_len, pci_len0 = 0;
590
591 struct pci_dev *pdev = adapter->pdev;
af19b491 592
af19b491
AKS
593 /* remap phys address */
594 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
595 mem_len = pci_resource_len(pdev, 0);
596
597 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
598
599 mem_ptr0 = pci_ioremap_bar(pdev, 0);
600 if (mem_ptr0 == NULL) {
601 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
602 return -EIO;
603 }
604 pci_len0 = mem_len;
605 } else {
606 return -EIO;
607 }
608
609 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
610
b1fc6d3c
AC
611 adapter->ahw->pci_base0 = mem_ptr0;
612 adapter->ahw->pci_len0 = pci_len0;
af19b491 613
0866d96d 614 qlcnic_check_vf(adapter);
2e9d722d 615
b1fc6d3c
AC
616 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
617 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
618 adapter->ahw->pci_func)));
af19b491
AKS
619
620 return 0;
621}
622
623static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
624{
625 struct pci_dev *pdev = adapter->pdev;
626 int i, found = 0;
627
628 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
629 if (qlcnic_boards[i].vendor == pdev->vendor &&
630 qlcnic_boards[i].device == pdev->device &&
631 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
632 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
633 sprintf(name, "%pM: %s" ,
634 adapter->mac_addr,
635 qlcnic_boards[i].short_name);
af19b491
AKS
636 found = 1;
637 break;
638 }
639
640 }
641
642 if (!found)
7f9a0c34 643 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
644}
645
646static void
647qlcnic_check_options(struct qlcnic_adapter *adapter)
648{
649 u32 fw_major, fw_minor, fw_build;
af19b491 650 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
651
652 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
653 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
654 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
655
656 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
657
251a84c9
AKS
658 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
659 fw_major, fw_minor, fw_build);
b1fc6d3c 660 if (adapter->ahw->port_type == QLCNIC_XGBE) {
90d19005
SC
661 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
662 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
663 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
664 } else {
665 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
666 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
667 }
668
af19b491 669 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
670 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
671
b1fc6d3c 672 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
af19b491
AKS
673 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
674 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
675 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
676 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
677 }
678
679 adapter->msix_supported = !!use_msi_x;
af19b491
AKS
680
681 adapter->num_txd = MAX_CMD_DESCRIPTORS;
682
251b036a 683 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
684}
685
174240a8
RB
686static int
687qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
688{
689 int err;
690 struct qlcnic_info nic_info;
691
b1fc6d3c 692 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
174240a8
RB
693 if (err)
694 return err;
695
a1c0c459 696 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
697 adapter->switch_mode = nic_info.switch_mode;
698 adapter->max_tx_ques = nic_info.max_tx_ques;
699 adapter->max_rx_ques = nic_info.max_rx_ques;
700 adapter->capabilities = nic_info.capabilities;
701 adapter->max_mac_filters = nic_info.max_mac_filters;
702 adapter->max_mtu = nic_info.max_mtu;
703
704 if (adapter->capabilities & BIT_6)
705 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
706 else
707 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
708
709 return err;
710}
711
8cf61f89
AKS
712static void
713qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
714 struct qlcnic_esw_func_cfg *esw_cfg)
715{
716 if (esw_cfg->discard_tagged)
717 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
718 else
719 adapter->flags |= QLCNIC_TAGGING_ENABLED;
720
721 if (esw_cfg->vlan_id)
722 adapter->pvid = esw_cfg->vlan_id;
723 else
724 adapter->pvid = 0;
725}
726
b9796a14
AC
727static void
728qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
729{
730 struct qlcnic_adapter *adapter = netdev_priv(netdev);
731 set_bit(vid, adapter->vlans);
732}
733
734static void
735qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
736{
737 struct qlcnic_adapter *adapter = netdev_priv(netdev);
738
739 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
740 clear_bit(vid, adapter->vlans);
741}
742
0325d69b
RB
743static void
744qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
745 struct qlcnic_esw_func_cfg *esw_cfg)
746{
ee07c1a7
RB
747 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
748 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
749
750 if (esw_cfg->mac_anti_spoof)
751 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 752
7373373d
RB
753 if (!esw_cfg->mac_override)
754 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
755
ee07c1a7
RB
756 if (!esw_cfg->promisc_mode)
757 adapter->flags |= QLCNIC_PROMISC_DISABLED;
758
0325d69b
RB
759 qlcnic_set_netdev_features(adapter, esw_cfg);
760}
761
762static int
763qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
764{
765 struct qlcnic_esw_func_cfg esw_cfg;
766
767 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
768 return 0;
769
b1fc6d3c 770 esw_cfg.pci_func = adapter->ahw->pci_func;
0325d69b
RB
771 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
772 return -EIO;
8cf61f89 773 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
774 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
775
776 return 0;
777}
778
779static void
780qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
781 struct qlcnic_esw_func_cfg *esw_cfg)
782{
783 struct net_device *netdev = adapter->netdev;
784 unsigned long features, vlan_features;
785
135d84a9 786 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
0325d69b
RB
787 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
788 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
b9796a14 789 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
0325d69b
RB
790
791 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
792 features |= (NETIF_F_TSO | NETIF_F_TSO6);
793 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
794 }
b56421d0
RB
795
796 if (netdev->features & NETIF_F_LRO)
0325d69b
RB
797 features |= NETIF_F_LRO;
798
799 if (esw_cfg->offload_flags & BIT_0) {
800 netdev->features |= features;
0325d69b
RB
801 if (!(esw_cfg->offload_flags & BIT_1))
802 netdev->features &= ~NETIF_F_TSO;
803 if (!(esw_cfg->offload_flags & BIT_2))
804 netdev->features &= ~NETIF_F_TSO6;
805 } else {
806 netdev->features &= ~features;
0325d69b
RB
807 }
808
809 netdev->vlan_features = (features & vlan_features);
810}
811
0866d96d
AC
812static int
813qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
814{
815 void __iomem *priv_op;
816 u32 op_mode, priv_level;
817 int err = 0;
818
174240a8
RB
819 err = qlcnic_initialize_nic(adapter);
820 if (err)
821 return err;
822
0866d96d
AC
823 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
824 return 0;
825
b1fc6d3c 826 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
0866d96d 827 op_mode = readl(priv_op);
b1fc6d3c 828 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d
AC
829
830 if (op_mode == QLC_DEV_DRV_DEFAULT)
831 priv_level = QLCNIC_MGMT_FUNC;
832 else
b1fc6d3c 833 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d 834
174240a8 835 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
836 if (priv_level == QLCNIC_MGMT_FUNC) {
837 adapter->op_mode = QLCNIC_MGMT_FUNC;
838 err = qlcnic_init_pci_info(adapter);
839 if (err)
840 return err;
841 /* Set privilege level for other functions */
842 qlcnic_set_function_modes(adapter);
843 dev_info(&adapter->pdev->dev,
844 "HAL Version: %d, Management function\n",
845 adapter->fw_hal_version);
846 } else if (priv_level == QLCNIC_PRIV_FUNC) {
847 adapter->op_mode = QLCNIC_PRIV_FUNC;
848 dev_info(&adapter->pdev->dev,
849 "HAL Version: %d, Privileged function\n",
850 adapter->fw_hal_version);
851 }
174240a8 852 }
0866d96d
AC
853
854 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
855
856 return err;
857}
858
0325d69b
RB
859static int
860qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
861{
862 struct qlcnic_esw_func_cfg esw_cfg;
863 struct qlcnic_npar_info *npar;
864 u8 i;
865
174240a8 866 if (adapter->need_fw_reset)
0325d69b
RB
867 return 0;
868
869 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
870 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
871 continue;
872 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
873 esw_cfg.pci_func = i;
874 esw_cfg.offload_flags = BIT_0;
7373373d 875 esw_cfg.mac_override = BIT_0;
ee07c1a7 876 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
877 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
878 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
879 if (qlcnic_config_switch_port(adapter, &esw_cfg))
880 return -EIO;
881 npar = &adapter->npars[i];
882 npar->pvid = esw_cfg.vlan_id;
7373373d 883 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
884 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
885 npar->discard_tagged = esw_cfg.discard_tagged;
886 npar->promisc_mode = esw_cfg.promisc_mode;
887 npar->offload_flags = esw_cfg.offload_flags;
888 }
889
890 return 0;
891}
892
4e8acb01
RB
893static int
894qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
895 struct qlcnic_npar_info *npar, int pci_func)
896{
897 struct qlcnic_esw_func_cfg esw_cfg;
898 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
899 esw_cfg.pci_func = pci_func;
900 esw_cfg.vlan_id = npar->pvid;
7373373d 901 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
902 esw_cfg.discard_tagged = npar->discard_tagged;
903 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
904 esw_cfg.offload_flags = npar->offload_flags;
905 esw_cfg.promisc_mode = npar->promisc_mode;
906 if (qlcnic_config_switch_port(adapter, &esw_cfg))
907 return -EIO;
908
909 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
910 if (qlcnic_config_switch_port(adapter, &esw_cfg))
911 return -EIO;
912
913 return 0;
914}
915
cea8975e
AC
916static int
917qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
918{
4e8acb01 919 int i, err;
cea8975e
AC
920 struct qlcnic_npar_info *npar;
921 struct qlcnic_info nic_info;
922
174240a8 923 if (!adapter->need_fw_reset)
cea8975e
AC
924 return 0;
925
4e8acb01
RB
926 /* Set the NPAR config data after FW reset */
927 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
928 npar = &adapter->npars[i];
929 if (npar->type != QLCNIC_TYPE_NIC)
930 continue;
931 err = qlcnic_get_nic_info(adapter, &nic_info, i);
932 if (err)
933 return err;
934 nic_info.min_tx_bw = npar->min_bw;
935 nic_info.max_tx_bw = npar->max_bw;
936 err = qlcnic_set_nic_info(adapter, &nic_info);
937 if (err)
938 return err;
cea8975e 939
4e8acb01
RB
940 if (npar->enable_pm) {
941 err = qlcnic_config_port_mirroring(adapter,
942 npar->dest_npar, 1, i);
943 if (err)
944 return err;
cea8975e 945 }
4e8acb01
RB
946 err = qlcnic_reset_eswitch_config(adapter, npar, i);
947 if (err)
948 return err;
cea8975e 949 }
4e8acb01 950 return 0;
cea8975e
AC
951}
952
78f84e1a
AKS
953static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
954{
955 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
956 u32 npar_state;
957
958 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
959 return 0;
960
961 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
962 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
963 msleep(1000);
964 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
965 }
966 if (!npar_opt_timeo) {
967 dev_err(&adapter->pdev->dev,
968 "Waiting for NPAR state to opertional timeout\n");
969 return -EIO;
970 }
971 return 0;
972}
973
174240a8
RB
974static int
975qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
976{
977 int err;
978
979 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
980 adapter->op_mode != QLCNIC_MGMT_FUNC)
981 return 0;
982
983 err = qlcnic_set_default_offload_settings(adapter);
984 if (err)
985 return err;
986
987 err = qlcnic_reset_npar_config(adapter);
988 if (err)
989 return err;
990
991 qlcnic_dev_set_npar_ready(adapter);
992
993 return err;
994}
995
af19b491
AKS
996static int
997qlcnic_start_firmware(struct qlcnic_adapter *adapter)
998{
d4066833 999 int err;
af19b491 1000
aa5e18c0
SC
1001 err = qlcnic_can_start_firmware(adapter);
1002 if (err < 0)
1003 return err;
1004 else if (!err)
d4066833 1005 goto check_fw_status;
af19b491 1006
4d5bdb38
AKS
1007 if (load_fw_file)
1008 qlcnic_request_firmware(adapter);
8f891387 1009 else {
8cfdce08
SC
1010 err = qlcnic_check_flash_fw_ver(adapter);
1011 if (err)
8f891387 1012 goto err_out;
1013
4d5bdb38 1014 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1015 }
af19b491
AKS
1016
1017 err = qlcnic_need_fw_reset(adapter);
af19b491 1018 if (err == 0)
4e70812b 1019 goto check_fw_status;
af19b491 1020
d4066833
SC
1021 err = qlcnic_pinit_from_rom(adapter);
1022 if (err)
1023 goto err_out;
af19b491
AKS
1024
1025 err = qlcnic_load_firmware(adapter);
1026 if (err)
1027 goto err_out;
1028
1029 qlcnic_release_firmware(adapter);
d4066833 1030 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1031
d4066833
SC
1032check_fw_status:
1033 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1034 if (err)
1035 goto err_out;
1036
1037 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1038 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1039
0866d96d
AC
1040 err = qlcnic_check_eswitch_mode(adapter);
1041 if (err) {
1042 dev_err(&adapter->pdev->dev,
1043 "Memory allocation failed for eswitch\n");
1044 goto err_out;
1045 }
174240a8
RB
1046 err = qlcnic_set_mgmt_operations(adapter);
1047 if (err)
1048 goto err_out;
1049
1050 qlcnic_check_options(adapter);
af19b491
AKS
1051 adapter->need_fw_reset = 0;
1052
a7fc948f
AKS
1053 qlcnic_release_firmware(adapter);
1054 return 0;
af19b491
AKS
1055
1056err_out:
a7fc948f
AKS
1057 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1058 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1059
af19b491
AKS
1060 qlcnic_release_firmware(adapter);
1061 return err;
1062}
1063
1064static int
1065qlcnic_request_irq(struct qlcnic_adapter *adapter)
1066{
1067 irq_handler_t handler;
1068 struct qlcnic_host_sds_ring *sds_ring;
1069 int err, ring;
1070
1071 unsigned long flags = 0;
1072 struct net_device *netdev = adapter->netdev;
b1fc6d3c 1073 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 1074
7eb9855d
AKS
1075 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1076 handler = qlcnic_tmp_intr;
1077 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1078 flags |= IRQF_SHARED;
1079
1080 } else {
1081 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1082 handler = qlcnic_msix_intr;
1083 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1084 handler = qlcnic_msi_intr;
1085 else {
1086 flags |= IRQF_SHARED;
1087 handler = qlcnic_intr;
1088 }
af19b491
AKS
1089 }
1090 adapter->irq = netdev->irq;
1091
1092 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1093 sds_ring = &recv_ctx->sds_rings[ring];
1094 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1095 err = request_irq(sds_ring->irq, handler,
1096 flags, sds_ring->name, sds_ring);
1097 if (err)
1098 return err;
1099 }
1100
1101 return 0;
1102}
1103
1104static void
1105qlcnic_free_irq(struct qlcnic_adapter *adapter)
1106{
1107 int ring;
1108 struct qlcnic_host_sds_ring *sds_ring;
1109
b1fc6d3c 1110 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
1111
1112 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1113 sds_ring = &recv_ctx->sds_rings[ring];
1114 free_irq(sds_ring->irq, sds_ring);
1115 }
1116}
1117
af19b491
AKS
1118static int
1119__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1120{
8a15ad1f
AKS
1121 int ring;
1122 struct qlcnic_host_rds_ring *rds_ring;
1123
af19b491
AKS
1124 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1125 return -EIO;
1126
8a15ad1f
AKS
1127 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1128 return 0;
0325d69b
RB
1129 if (qlcnic_set_eswitch_port_config(adapter))
1130 return -EIO;
8a15ad1f
AKS
1131
1132 if (qlcnic_fw_create_ctx(adapter))
1133 return -EIO;
1134
1135 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1136 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1137 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1138 }
1139
af19b491
AKS
1140 qlcnic_set_multi(netdev);
1141 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1142
b1fc6d3c 1143 adapter->ahw->linkup = 0;
af19b491
AKS
1144
1145 if (adapter->max_sds_rings > 1)
1146 qlcnic_config_rss(adapter, 1);
1147
1148 qlcnic_config_intr_coalesce(adapter);
1149
24763d80 1150 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1151 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1152
1153 qlcnic_napi_enable(adapter);
1154
1155 qlcnic_linkevent_request(adapter, 1);
1156
68bf1c68 1157 adapter->reset_context = 0;
af19b491
AKS
1158 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1159 return 0;
1160}
1161
1162/* Usage: During resume and firmware recovery module.*/
1163
1164static int
1165qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1166{
1167 int err = 0;
1168
1169 rtnl_lock();
1170 if (netif_running(netdev))
1171 err = __qlcnic_up(adapter, netdev);
1172 rtnl_unlock();
1173
1174 return err;
1175}
1176
1177static void
1178__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1179{
1180 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1181 return;
1182
1183 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1184 return;
1185
1186 smp_mb();
1187 spin_lock(&adapter->tx_clean_lock);
1188 netif_carrier_off(netdev);
1189 netif_tx_disable(netdev);
1190
1191 qlcnic_free_mac_list(adapter);
1192
b5e5492c
AKS
1193 if (adapter->fhash.fnum)
1194 qlcnic_delete_lb_filters(adapter);
1195
af19b491
AKS
1196 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1197
1198 qlcnic_napi_disable(adapter);
1199
8a15ad1f
AKS
1200 qlcnic_fw_destroy_ctx(adapter);
1201
1202 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1203 qlcnic_release_tx_buffers(adapter);
1204 spin_unlock(&adapter->tx_clean_lock);
1205}
1206
1207/* Usage: During suspend and firmware recovery module */
1208
1209static void
1210qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1211{
1212 rtnl_lock();
1213 if (netif_running(netdev))
1214 __qlcnic_down(adapter, netdev);
1215 rtnl_unlock();
1216
1217}
1218
1219static int
1220qlcnic_attach(struct qlcnic_adapter *adapter)
1221{
1222 struct net_device *netdev = adapter->netdev;
1223 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1224 int err;
af19b491
AKS
1225
1226 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1227 return 0;
1228
af19b491
AKS
1229 err = qlcnic_napi_add(adapter, netdev);
1230 if (err)
1231 return err;
1232
1233 err = qlcnic_alloc_sw_resources(adapter);
1234 if (err) {
1235 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1236 goto err_out_napi_del;
af19b491
AKS
1237 }
1238
1239 err = qlcnic_alloc_hw_resources(adapter);
1240 if (err) {
1241 dev_err(&pdev->dev, "Error in setting hw resources\n");
1242 goto err_out_free_sw;
1243 }
1244
af19b491
AKS
1245 err = qlcnic_request_irq(adapter);
1246 if (err) {
1247 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1248 goto err_out_free_hw;
af19b491
AKS
1249 }
1250
af19b491
AKS
1251 qlcnic_create_sysfs_entries(adapter);
1252
1253 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1254 return 0;
1255
8a15ad1f 1256err_out_free_hw:
af19b491
AKS
1257 qlcnic_free_hw_resources(adapter);
1258err_out_free_sw:
1259 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1260err_out_napi_del:
1261 qlcnic_napi_del(adapter);
af19b491
AKS
1262 return err;
1263}
1264
1265static void
1266qlcnic_detach(struct qlcnic_adapter *adapter)
1267{
1268 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1269 return;
1270
1271 qlcnic_remove_sysfs_entries(adapter);
1272
1273 qlcnic_free_hw_resources(adapter);
1274 qlcnic_release_rx_buffers(adapter);
1275 qlcnic_free_irq(adapter);
1276 qlcnic_napi_del(adapter);
1277 qlcnic_free_sw_resources(adapter);
1278
1279 adapter->is_up = 0;
1280}
1281
7eb9855d
AKS
1282void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1283{
1284 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1285 struct qlcnic_host_sds_ring *sds_ring;
1286 int ring;
1287
78ad3892 1288 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1289 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1290 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1291 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1292 qlcnic_disable_int(sds_ring);
1293 }
7eb9855d
AKS
1294 }
1295
8a15ad1f
AKS
1296 qlcnic_fw_destroy_ctx(adapter);
1297
7eb9855d
AKS
1298 qlcnic_detach(adapter);
1299
1300 adapter->diag_test = 0;
1301 adapter->max_sds_rings = max_sds_rings;
1302
1303 if (qlcnic_attach(adapter))
34ce3626 1304 goto out;
7eb9855d
AKS
1305
1306 if (netif_running(netdev))
1307 __qlcnic_up(adapter, netdev);
34ce3626 1308out:
7eb9855d
AKS
1309 netif_device_attach(netdev);
1310}
1311
b1fc6d3c
AC
1312static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1313{
1314 int err = 0;
1315 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1316 GFP_KERNEL);
1317 if (!adapter->ahw) {
1318 dev_err(&adapter->pdev->dev,
1319 "Failed to allocate recv ctx resources for adapter\n");
1320 err = -ENOMEM;
1321 goto err_out;
1322 }
1323 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1324 GFP_KERNEL);
1325 if (!adapter->recv_ctx) {
1326 dev_err(&adapter->pdev->dev,
1327 "Failed to allocate recv ctx resources for adapter\n");
1328 kfree(adapter->ahw);
1329 adapter->ahw = NULL;
1330 err = -ENOMEM;
8816d009 1331 goto err_out;
b1fc6d3c 1332 }
8816d009
AC
1333 /* Initialize interrupt coalesce parameters */
1334 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1335 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1336 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
b1fc6d3c
AC
1337err_out:
1338 return err;
1339}
1340
1341static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1342{
1343 kfree(adapter->recv_ctx);
1344 adapter->recv_ctx = NULL;
1345
1346 kfree(adapter->ahw);
1347 adapter->ahw = NULL;
1348}
1349
7eb9855d
AKS
1350int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1351{
1352 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1353 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1354 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1355 int ring;
1356 int ret;
1357
1358 netif_device_detach(netdev);
1359
1360 if (netif_running(netdev))
1361 __qlcnic_down(adapter, netdev);
1362
1363 qlcnic_detach(adapter);
1364
1365 adapter->max_sds_rings = 1;
1366 adapter->diag_test = test;
1367
1368 ret = qlcnic_attach(adapter);
34ce3626
AKS
1369 if (ret) {
1370 netif_device_attach(netdev);
7eb9855d 1371 return ret;
34ce3626 1372 }
7eb9855d 1373
8a15ad1f
AKS
1374 ret = qlcnic_fw_create_ctx(adapter);
1375 if (ret) {
1376 qlcnic_detach(adapter);
57e46248 1377 netif_device_attach(netdev);
8a15ad1f
AKS
1378 return ret;
1379 }
1380
1381 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1382 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1383 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1384 }
1385
cdaff185
AKS
1386 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1387 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1388 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1389 qlcnic_enable_int(sds_ring);
1390 }
7eb9855d 1391 }
78ad3892 1392 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1393
1394 return 0;
1395}
1396
68bf1c68
AKS
1397/* Reset context in hardware only */
1398static int
1399qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1400{
1401 struct net_device *netdev = adapter->netdev;
1402
1403 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1404 return -EBUSY;
1405
1406 netif_device_detach(netdev);
1407
1408 qlcnic_down(adapter, netdev);
1409
1410 qlcnic_up(adapter, netdev);
1411
1412 netif_device_attach(netdev);
1413
1414 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1415 return 0;
1416}
1417
af19b491
AKS
1418int
1419qlcnic_reset_context(struct qlcnic_adapter *adapter)
1420{
1421 int err = 0;
1422 struct net_device *netdev = adapter->netdev;
1423
1424 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1425 return -EBUSY;
1426
1427 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1428
1429 netif_device_detach(netdev);
1430
1431 if (netif_running(netdev))
1432 __qlcnic_down(adapter, netdev);
1433
1434 qlcnic_detach(adapter);
1435
1436 if (netif_running(netdev)) {
1437 err = qlcnic_attach(adapter);
1438 if (!err)
34ce3626 1439 __qlcnic_up(adapter, netdev);
af19b491
AKS
1440 }
1441
1442 netif_device_attach(netdev);
1443 }
1444
af19b491
AKS
1445 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1446 return err;
1447}
1448
1449static int
1450qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1451 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1452{
1453 int err;
1454 struct pci_dev *pdev = adapter->pdev;
1455
af19b491
AKS
1456 adapter->mc_enabled = 0;
1457 adapter->max_mc_count = 38;
1458
1459 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1460 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1461
1462 qlcnic_change_mtu(netdev, netdev->mtu);
1463
1464 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1465
135d84a9
MM
1466 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1467 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ac8d0c4f 1468
135d84a9
MM
1469 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1470 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1471 if (pci_using_dac)
1472 netdev->hw_features |= NETIF_F_HIGHDMA;
af19b491 1473
135d84a9 1474 netdev->vlan_features = netdev->hw_features;
af19b491
AKS
1475
1476 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
135d84a9 1477 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
af19b491 1478 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
135d84a9
MM
1479 netdev->hw_features |= NETIF_F_LRO;
1480
1481 netdev->features |= netdev->hw_features |
1482 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1483
af19b491
AKS
1484 netdev->irq = adapter->msix_entries[0].vector;
1485
af19b491 1486 netif_carrier_off(netdev);
af19b491
AKS
1487
1488 err = register_netdev(netdev);
1489 if (err) {
1490 dev_err(&pdev->dev, "failed to register net device\n");
1491 return err;
1492 }
1493
1494 return 0;
1495}
1496
1bb09fb9
AKS
1497static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1498{
1499 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1500 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1501 *pci_using_dac = 1;
1502 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1503 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1504 *pci_using_dac = 0;
1505 else {
1506 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1507 return -EIO;
1508 }
1509
1510 return 0;
1511}
1512
f94bc1e7
SC
1513static int
1514qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1515{
1516 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1517 GFP_KERNEL);
1518
1519 if (adapter->msix_entries)
1520 return 0;
1521
1522 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1523 return -ENOMEM;
1524}
1525
af19b491
AKS
1526static int __devinit
1527qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1528{
1529 struct net_device *netdev = NULL;
1530 struct qlcnic_adapter *adapter = NULL;
1531 int err;
af19b491 1532 uint8_t revision_id;
1bb09fb9 1533 uint8_t pci_using_dac;
da48e6c3 1534 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1535
1536 err = pci_enable_device(pdev);
1537 if (err)
1538 return err;
1539
1540 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1541 err = -ENODEV;
1542 goto err_out_disable_pdev;
1543 }
1544
1bb09fb9
AKS
1545 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1546 if (err)
1547 goto err_out_disable_pdev;
1548
af19b491
AKS
1549 err = pci_request_regions(pdev, qlcnic_driver_name);
1550 if (err)
1551 goto err_out_disable_pdev;
1552
1553 pci_set_master(pdev);
451724c8 1554 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1555
1556 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1557 if (!netdev) {
1558 dev_err(&pdev->dev, "failed to allocate net_device\n");
1559 err = -ENOMEM;
1560 goto err_out_free_res;
1561 }
1562
1563 SET_NETDEV_DEV(netdev, &pdev->dev);
1564
1565 adapter = netdev_priv(netdev);
1566 adapter->netdev = netdev;
1567 adapter->pdev = pdev;
af19b491 1568
b1fc6d3c
AC
1569 if (qlcnic_alloc_adapter_resources(adapter))
1570 goto err_out_free_netdev;
1571
1572 adapter->dev_rst_time = jiffies;
af19b491 1573 revision_id = pdev->revision;
b1fc6d3c 1574 adapter->ahw->revision_id = revision_id;
af19b491 1575
b1fc6d3c
AC
1576 rwlock_init(&adapter->ahw->crb_lock);
1577 mutex_init(&adapter->ahw->mem_lock);
af19b491
AKS
1578
1579 spin_lock_init(&adapter->tx_clean_lock);
1580 INIT_LIST_HEAD(&adapter->mac_list);
1581
1582 err = qlcnic_setup_pci_map(adapter);
1583 if (err)
b1fc6d3c 1584 goto err_out_free_hw;
af19b491
AKS
1585
1586 /* This will be reset for mezz cards */
b1fc6d3c 1587 adapter->portnum = adapter->ahw->pci_func;
af19b491
AKS
1588
1589 err = qlcnic_get_board_info(adapter);
1590 if (err) {
1591 dev_err(&pdev->dev, "Error getting board config info.\n");
1592 goto err_out_iounmap;
1593 }
1594
8cfdce08
SC
1595 err = qlcnic_setup_idc_param(adapter);
1596 if (err)
b3a24649 1597 goto err_out_iounmap;
af19b491 1598
1dc0f3c5 1599 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1600
9f26f547 1601 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1602 if (err) {
1603 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1604 goto err_out_decr_ref;
a7fc948f 1605 }
af19b491 1606
da48e6c3
RB
1607 if (qlcnic_read_mac_addr(adapter))
1608 dev_warn(&pdev->dev, "failed to read mac addr\n");
1609
1610 if (adapter->portnum == 0) {
1611 get_brd_name(adapter, brd_name);
1612
1613 pr_info("%s: %s Board Chip rev 0x%x\n",
1614 module_name(THIS_MODULE),
b1fc6d3c 1615 brd_name, adapter->ahw->revision_id);
da48e6c3
RB
1616 }
1617
af19b491
AKS
1618 qlcnic_clear_stats(adapter);
1619
f94bc1e7
SC
1620 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1621 if (err)
1622 goto err_out_decr_ref;
1623
af19b491
AKS
1624 qlcnic_setup_intr(adapter);
1625
1bb09fb9 1626 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1627 if (err)
1628 goto err_out_disable_msi;
1629
1630 pci_set_drvdata(pdev, adapter);
1631
1632 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1633
b1fc6d3c 1634 switch (adapter->ahw->port_type) {
af19b491
AKS
1635 case QLCNIC_GBE:
1636 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1637 adapter->netdev->name);
1638 break;
1639 case QLCNIC_XGBE:
1640 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1641 adapter->netdev->name);
1642 break;
1643 }
1644
b5e5492c 1645 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1646 qlcnic_create_diag_entries(adapter);
1647
1648 return 0;
1649
1650err_out_disable_msi:
1651 qlcnic_teardown_intr(adapter);
f94bc1e7 1652 kfree(adapter->msix_entries);
af19b491
AKS
1653
1654err_out_decr_ref:
21854f02 1655 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1656
1657err_out_iounmap:
1658 qlcnic_cleanup_pci_map(adapter);
1659
b1fc6d3c
AC
1660err_out_free_hw:
1661 qlcnic_free_adapter_resources(adapter);
1662
af19b491
AKS
1663err_out_free_netdev:
1664 free_netdev(netdev);
1665
1666err_out_free_res:
1667 pci_release_regions(pdev);
1668
1669err_out_disable_pdev:
1670 pci_set_drvdata(pdev, NULL);
1671 pci_disable_device(pdev);
1672 return err;
1673}
1674
1675static void __devexit qlcnic_remove(struct pci_dev *pdev)
1676{
1677 struct qlcnic_adapter *adapter;
1678 struct net_device *netdev;
1679
1680 adapter = pci_get_drvdata(pdev);
1681 if (adapter == NULL)
1682 return;
1683
1684 netdev = adapter->netdev;
1685
1686 qlcnic_cancel_fw_work(adapter);
1687
1688 unregister_netdev(netdev);
1689
af19b491
AKS
1690 qlcnic_detach(adapter);
1691
2e9d722d
AC
1692 if (adapter->npars != NULL)
1693 kfree(adapter->npars);
1694 if (adapter->eswitch != NULL)
1695 kfree(adapter->eswitch);
1696
21854f02 1697 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1698
1699 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1700
b5e5492c
AKS
1701 qlcnic_free_lb_filters_mem(adapter);
1702
af19b491 1703 qlcnic_teardown_intr(adapter);
f94bc1e7 1704 kfree(adapter->msix_entries);
af19b491
AKS
1705
1706 qlcnic_remove_diag_entries(adapter);
1707
1708 qlcnic_cleanup_pci_map(adapter);
1709
1710 qlcnic_release_firmware(adapter);
1711
451724c8 1712 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1713 pci_release_regions(pdev);
1714 pci_disable_device(pdev);
1715 pci_set_drvdata(pdev, NULL);
1716
b1fc6d3c 1717 qlcnic_free_adapter_resources(adapter);
af19b491
AKS
1718 free_netdev(netdev);
1719}
1720static int __qlcnic_shutdown(struct pci_dev *pdev)
1721{
1722 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1723 struct net_device *netdev = adapter->netdev;
1724 int retval;
1725
1726 netif_device_detach(netdev);
1727
1728 qlcnic_cancel_fw_work(adapter);
1729
1730 if (netif_running(netdev))
1731 qlcnic_down(adapter, netdev);
1732
21854f02 1733 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1734
1735 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1736
1737 retval = pci_save_state(pdev);
1738 if (retval)
1739 return retval;
1740
1741 if (qlcnic_wol_supported(adapter)) {
1742 pci_enable_wake(pdev, PCI_D3cold, 1);
1743 pci_enable_wake(pdev, PCI_D3hot, 1);
1744 }
1745
1746 return 0;
1747}
1748
1749static void qlcnic_shutdown(struct pci_dev *pdev)
1750{
1751 if (__qlcnic_shutdown(pdev))
1752 return;
1753
1754 pci_disable_device(pdev);
1755}
1756
1757#ifdef CONFIG_PM
1758static int
1759qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1760{
1761 int retval;
1762
1763 retval = __qlcnic_shutdown(pdev);
1764 if (retval)
1765 return retval;
1766
1767 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1768 return 0;
1769}
1770
1771static int
1772qlcnic_resume(struct pci_dev *pdev)
1773{
1774 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1775 struct net_device *netdev = adapter->netdev;
1776 int err;
1777
1778 err = pci_enable_device(pdev);
1779 if (err)
1780 return err;
1781
1782 pci_set_power_state(pdev, PCI_D0);
1783 pci_set_master(pdev);
1784 pci_restore_state(pdev);
1785
9f26f547 1786 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1787 if (err) {
1788 dev_err(&pdev->dev, "failed to start firmware\n");
1789 return err;
1790 }
1791
1792 if (netif_running(netdev)) {
af19b491
AKS
1793 err = qlcnic_up(adapter, netdev);
1794 if (err)
52486a3a 1795 goto done;
af19b491 1796
aec1e845 1797 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1798 }
52486a3a 1799done:
af19b491
AKS
1800 netif_device_attach(netdev);
1801 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1802 return 0;
af19b491
AKS
1803}
1804#endif
1805
1806static int qlcnic_open(struct net_device *netdev)
1807{
1808 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1809 int err;
1810
af19b491
AKS
1811 err = qlcnic_attach(adapter);
1812 if (err)
1813 return err;
1814
1815 err = __qlcnic_up(adapter, netdev);
1816 if (err)
1817 goto err_out;
1818
1819 netif_start_queue(netdev);
1820
1821 return 0;
1822
1823err_out:
1824 qlcnic_detach(adapter);
1825 return err;
1826}
1827
1828/*
1829 * qlcnic_close - Disables a network interface entry point
1830 */
1831static int qlcnic_close(struct net_device *netdev)
1832{
1833 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1834
1835 __qlcnic_down(adapter, netdev);
1836 return 0;
1837}
1838
b5e5492c
AKS
1839static void
1840qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1841{
1842 void *head;
1843 int i;
1844
1845 if (!qlcnic_mac_learn)
1846 return;
1847
1848 spin_lock_init(&adapter->mac_learn_lock);
1849
1850 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1851 GFP_KERNEL);
1852 if (!head)
1853 return;
1854
1855 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1856 adapter->fhash.fhead = (struct hlist_head *)head;
1857
1858 for (i = 0; i < adapter->fhash.fmax; i++)
1859 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1860}
1861
1862static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1863{
1864 if (adapter->fhash.fmax && adapter->fhash.fhead)
1865 kfree(adapter->fhash.fhead);
1866
1867 adapter->fhash.fhead = NULL;
1868 adapter->fhash.fmax = 0;
1869}
1870
1871static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1872 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1873{
1874 struct cmd_desc_type0 *hwdesc;
1875 struct qlcnic_nic_req *req;
1876 struct qlcnic_mac_req *mac_req;
7e56cac4 1877 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1878 u32 producer;
1879 u64 word;
1880
1881 producer = tx_ring->producer;
1882 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1883
1884 req = (struct qlcnic_nic_req *)hwdesc;
1885 memset(req, 0, sizeof(struct qlcnic_nic_req));
1886 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1887
1888 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1889 req->req_hdr = cpu_to_le64(word);
1890
1891 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1892 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1893 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1894
7e56cac4
SC
1895 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1896 vlan_req->vlan_id = vlan_id;
03c5d770 1897
b5e5492c 1898 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
036d61f0 1899 smp_mb();
b5e5492c
AKS
1900}
1901
1902#define QLCNIC_MAC_HASH(MAC)\
1903 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1904
1905static void
1906qlcnic_send_filter(struct qlcnic_adapter *adapter,
1907 struct qlcnic_host_tx_ring *tx_ring,
1908 struct cmd_desc_type0 *first_desc,
1909 struct sk_buff *skb)
1910{
1911 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1912 struct qlcnic_filter *fil, *tmp_fil;
1913 struct hlist_node *tmp_hnode, *n;
1914 struct hlist_head *head;
1915 u64 src_addr = 0;
7e56cac4 1916 __le16 vlan_id = 0;
b5e5492c
AKS
1917 u8 hindex;
1918
1919 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1920 return;
1921
1922 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1923 return;
1924
03c5d770
AKS
1925 /* Only NPAR capable devices support vlan based learning*/
1926 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1927 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1928 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1929 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1930 head = &(adapter->fhash.fhead[hindex]);
1931
1932 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1933 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1934 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1935
1936 if (jiffies >
1937 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1938 qlcnic_change_filter(adapter, src_addr, vlan_id,
1939 tx_ring);
b5e5492c
AKS
1940 tmp_fil->ftime = jiffies;
1941 return;
1942 }
1943 }
1944
1945 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1946 if (!fil)
1947 return;
1948
03c5d770 1949 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1950
1951 fil->ftime = jiffies;
03c5d770 1952 fil->vlan_id = vlan_id;
b5e5492c
AKS
1953 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1954 spin_lock(&adapter->mac_learn_lock);
1955 hlist_add_head(&(fil->fnode), head);
1956 adapter->fhash.fnum++;
1957 spin_unlock(&adapter->mac_learn_lock);
1958}
1959
036d61f0
AC
1960static int
1961qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
af19b491
AKS
1962 struct cmd_desc_type0 *first_desc,
1963 struct sk_buff *skb)
1964{
036d61f0
AC
1965 u8 opcode = 0, hdr_len = 0;
1966 u16 flags = 0, vlan_tci = 0;
1967 int copied, offset, copy_len;
af19b491
AKS
1968 struct cmd_desc_type0 *hwdesc;
1969 struct vlan_ethhdr *vh;
036d61f0
AC
1970 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1971 u16 protocol = ntohs(skb->protocol);
2e9d722d 1972 u32 producer = tx_ring->producer;
036d61f0
AC
1973
1974 if (protocol == ETH_P_8021Q) {
1975 vh = (struct vlan_ethhdr *)skb->data;
1976 flags = FLAGS_VLAN_TAGGED;
1977 vlan_tci = vh->h_vlan_TCI;
1978 } else if (vlan_tx_tag_present(skb)) {
1979 flags = FLAGS_VLAN_OOB;
1980 vlan_tci = vlan_tx_tag_get(skb);
1981 }
1982 if (unlikely(adapter->pvid)) {
1983 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1984 return -EIO;
1985 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
1986 goto set_flags;
1987
1988 flags = FLAGS_VLAN_OOB;
1989 vlan_tci = adapter->pvid;
1990 }
1991set_flags:
1992 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
1993 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
af19b491 1994
2e9d722d
AC
1995 if (*(skb->data) & BIT_0) {
1996 flags |= BIT_0;
1997 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1998 }
036d61f0
AC
1999 opcode = TX_ETHER_PKT;
2000 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
af19b491
AKS
2001 skb_shinfo(skb)->gso_size > 0) {
2002
2003 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2004
2005 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2006 first_desc->total_hdr_length = hdr_len;
036d61f0
AC
2007
2008 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2009
2010 /* For LSO, we need to copy the MAC/IP/TCP headers into
2011 * the descriptor ring */
2012 copied = 0;
2013 offset = 2;
2014
2015 if (flags & FLAGS_VLAN_OOB) {
af19b491
AKS
2016 first_desc->total_hdr_length += VLAN_HLEN;
2017 first_desc->tcp_hdr_offset = VLAN_HLEN;
2018 first_desc->ip_hdr_offset = VLAN_HLEN;
2019 /* Only in case of TSO on vlan device */
2020 flags |= FLAGS_VLAN_TAGGED;
036d61f0
AC
2021
2022 /* Create a TSO vlan header template for firmware */
2023
2024 hwdesc = &tx_ring->desc_head[producer];
2025 tx_ring->cmd_buf_arr[producer].skb = NULL;
2026
2027 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2028 offset, hdr_len + VLAN_HLEN);
2029
2030 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2031 skb_copy_from_linear_data(skb, vh, 12);
2032 vh->h_vlan_proto = htons(ETH_P_8021Q);
2033 vh->h_vlan_TCI = htons(vlan_tci);
2034
2035 skb_copy_from_linear_data_offset(skb, 12,
2036 (char *)vh + 16, copy_len - 16);
2037
2038 copied = copy_len - VLAN_HLEN;
2039 offset = 0;
2040
2041 producer = get_next_index(producer, tx_ring->num_desc);
af19b491
AKS
2042 }
2043
036d61f0
AC
2044 while (copied < hdr_len) {
2045
2046 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2047 offset, (hdr_len - copied));
2048
2049 hwdesc = &tx_ring->desc_head[producer];
2050 tx_ring->cmd_buf_arr[producer].skb = NULL;
2051
2052 skb_copy_from_linear_data_offset(skb, copied,
2053 (char *) hwdesc + offset, copy_len);
2054
2055 copied += copy_len;
2056 offset = 0;
2057
2058 producer = get_next_index(producer, tx_ring->num_desc);
2059 }
2060
2061 tx_ring->producer = producer;
2062 smp_mb();
2063 adapter->stats.lso_frames++;
af19b491
AKS
2064
2065 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2066 u8 l4proto;
2067
036d61f0 2068 if (protocol == ETH_P_IP) {
af19b491
AKS
2069 l4proto = ip_hdr(skb)->protocol;
2070
2071 if (l4proto == IPPROTO_TCP)
2072 opcode = TX_TCP_PKT;
2073 else if (l4proto == IPPROTO_UDP)
2074 opcode = TX_UDP_PKT;
036d61f0 2075 } else if (protocol == ETH_P_IPV6) {
af19b491
AKS
2076 l4proto = ipv6_hdr(skb)->nexthdr;
2077
2078 if (l4proto == IPPROTO_TCP)
2079 opcode = TX_TCPV6_PKT;
2080 else if (l4proto == IPPROTO_UDP)
2081 opcode = TX_UDPV6_PKT;
2082 }
2083 }
af19b491
AKS
2084 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2085 first_desc->ip_hdr_offset += skb_network_offset(skb);
2086 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2087
036d61f0 2088 return 0;
af19b491
AKS
2089}
2090
2091static int
2092qlcnic_map_tx_skb(struct pci_dev *pdev,
2093 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2094{
2095 struct qlcnic_skb_frag *nf;
2096 struct skb_frag_struct *frag;
2097 int i, nr_frags;
2098 dma_addr_t map;
2099
2100 nr_frags = skb_shinfo(skb)->nr_frags;
2101 nf = &pbuf->frag_array[0];
2102
2103 map = pci_map_single(pdev, skb->data,
2104 skb_headlen(skb), PCI_DMA_TODEVICE);
2105 if (pci_dma_mapping_error(pdev, map))
2106 goto out_err;
2107
2108 nf->dma = map;
2109 nf->length = skb_headlen(skb);
2110
2111 for (i = 0; i < nr_frags; i++) {
2112 frag = &skb_shinfo(skb)->frags[i];
2113 nf = &pbuf->frag_array[i+1];
2114
2115 map = pci_map_page(pdev, frag->page, frag->page_offset,
2116 frag->size, PCI_DMA_TODEVICE);
2117 if (pci_dma_mapping_error(pdev, map))
2118 goto unwind;
2119
2120 nf->dma = map;
2121 nf->length = frag->size;
2122 }
2123
2124 return 0;
2125
2126unwind:
2127 while (--i >= 0) {
2128 nf = &pbuf->frag_array[i+1];
2129 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2130 }
2131
2132 nf = &pbuf->frag_array[0];
2133 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2134
2135out_err:
2136 return -ENOMEM;
2137}
2138
036d61f0
AC
2139static void
2140qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2141 struct qlcnic_cmd_buffer *pbuf)
8cf61f89 2142{
036d61f0
AC
2143 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2144 int nr_frags = skb_shinfo(skb)->nr_frags;
2145 int i;
8cf61f89 2146
036d61f0
AC
2147 for (i = 0; i < nr_frags; i++) {
2148 nf = &pbuf->frag_array[i+1];
2149 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
8cf61f89 2150 }
8cf61f89 2151
036d61f0
AC
2152 nf = &pbuf->frag_array[0];
2153 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
8cf61f89
AKS
2154}
2155
af19b491
AKS
2156static inline void
2157qlcnic_clear_cmddesc(u64 *desc)
2158{
2159 desc[0] = 0ULL;
2160 desc[2] = 0ULL;
8cf61f89 2161 desc[7] = 0ULL;
af19b491
AKS
2162}
2163
cdaff185 2164netdev_tx_t
af19b491
AKS
2165qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2166{
2167 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2168 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2169 struct qlcnic_cmd_buffer *pbuf;
2170 struct qlcnic_skb_frag *buffrag;
2171 struct cmd_desc_type0 *hwdesc, *first_desc;
2172 struct pci_dev *pdev;
dcb50aff 2173 struct ethhdr *phdr;
91a403ca 2174 int delta = 0;
af19b491
AKS
2175 int i, k;
2176
2177 u32 producer;
036d61f0 2178 int frag_count;
af19b491
AKS
2179 u32 num_txd = tx_ring->num_desc;
2180
780ab790
AKS
2181 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2182 netif_stop_queue(netdev);
2183 return NETDEV_TX_BUSY;
2184 }
2185
fe4d434d 2186 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2187 phdr = (struct ethhdr *)skb->data;
2188 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2189 adapter->mac_addr))
2190 goto drop_packet;
2191 }
2192
af19b491 2193 frag_count = skb_shinfo(skb)->nr_frags + 1;
91a403ca
AKS
2194 /* 14 frags supported for normal packet and
2195 * 32 frags supported for TSO packet
2196 */
2197 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2198
2199 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2200 delta += skb_shinfo(skb)->frags[i].size;
2201
2202 if (!__pskb_pull_tail(skb, delta))
2203 goto drop_packet;
2204
2205 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2206 }
af19b491 2207
ef71ff83 2208 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2209 netif_stop_queue(netdev);
ef71ff83
RB
2210 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2211 netif_start_queue(netdev);
2212 else {
2213 adapter->stats.xmit_off++;
2214 return NETDEV_TX_BUSY;
2215 }
af19b491
AKS
2216 }
2217
2218 producer = tx_ring->producer;
2219 pbuf = &tx_ring->cmd_buf_arr[producer];
2220
2221 pdev = adapter->pdev;
2222
8cf61f89
AKS
2223 first_desc = hwdesc = &tx_ring->desc_head[producer];
2224 qlcnic_clear_cmddesc((u64 *)hwdesc);
2225
8ae6df97
AKS
2226 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2227 adapter->stats.tx_dma_map_error++;
af19b491 2228 goto drop_packet;
8ae6df97 2229 }
af19b491
AKS
2230
2231 pbuf->skb = skb;
2232 pbuf->frag_count = frag_count;
2233
af19b491
AKS
2234 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2235 qlcnic_set_tx_port(first_desc, adapter->portnum);
2236
2237 for (i = 0; i < frag_count; i++) {
2238
2239 k = i % 4;
2240
2241 if ((k == 0) && (i > 0)) {
2242 /* move to next desc.*/
2243 producer = get_next_index(producer, num_txd);
2244 hwdesc = &tx_ring->desc_head[producer];
2245 qlcnic_clear_cmddesc((u64 *)hwdesc);
2246 tx_ring->cmd_buf_arr[producer].skb = NULL;
2247 }
2248
2249 buffrag = &pbuf->frag_array[i];
2250
2251 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2252 switch (k) {
2253 case 0:
2254 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2255 break;
2256 case 1:
2257 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2258 break;
2259 case 2:
2260 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2261 break;
2262 case 3:
2263 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2264 break;
2265 }
2266 }
2267
2268 tx_ring->producer = get_next_index(producer, num_txd);
036d61f0 2269 smp_mb();
af19b491 2270
036d61f0
AC
2271 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2272 goto unwind_buff;
af19b491 2273
b5e5492c
AKS
2274 if (qlcnic_mac_learn)
2275 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2276
af19b491
AKS
2277 qlcnic_update_cmd_producer(adapter, tx_ring);
2278
2279 adapter->stats.txbytes += skb->len;
2280 adapter->stats.xmitcalled++;
2281
2282 return NETDEV_TX_OK;
2283
036d61f0
AC
2284unwind_buff:
2285 qlcnic_unmap_buffers(pdev, skb, pbuf);
af19b491
AKS
2286drop_packet:
2287 adapter->stats.txdropped++;
2288 dev_kfree_skb_any(skb);
2289 return NETDEV_TX_OK;
2290}
2291
2292static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2293{
2294 struct net_device *netdev = adapter->netdev;
2295 u32 temp, temp_state, temp_val;
2296 int rv = 0;
2297
2298 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2299
2300 temp_state = qlcnic_get_temp_state(temp);
2301 temp_val = qlcnic_get_temp_val(temp);
2302
2303 if (temp_state == QLCNIC_TEMP_PANIC) {
2304 dev_err(&netdev->dev,
2305 "Device temperature %d degrees C exceeds"
2306 " maximum allowed. Hardware has been shut down.\n",
2307 temp_val);
2308 rv = 1;
2309 } else if (temp_state == QLCNIC_TEMP_WARN) {
2310 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2311 dev_err(&netdev->dev,
2312 "Device temperature %d degrees C "
2313 "exceeds operating range."
2314 " Immediate action needed.\n",
2315 temp_val);
2316 }
2317 } else {
2318 if (adapter->temp == QLCNIC_TEMP_WARN) {
2319 dev_info(&netdev->dev,
2320 "Device temperature is now %d degrees C"
2321 " in normal range.\n", temp_val);
2322 }
2323 }
2324 adapter->temp = temp_state;
2325 return rv;
2326}
2327
2328void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2329{
2330 struct net_device *netdev = adapter->netdev;
2331
b1fc6d3c 2332 if (adapter->ahw->linkup && !linkup) {
69324275 2333 netdev_info(netdev, "NIC Link is down\n");
b1fc6d3c 2334 adapter->ahw->linkup = 0;
af19b491
AKS
2335 if (netif_running(netdev)) {
2336 netif_carrier_off(netdev);
2337 netif_stop_queue(netdev);
2338 }
b1fc6d3c 2339 } else if (!adapter->ahw->linkup && linkup) {
69324275 2340 netdev_info(netdev, "NIC Link is up\n");
b1fc6d3c 2341 adapter->ahw->linkup = 1;
af19b491
AKS
2342 if (netif_running(netdev)) {
2343 netif_carrier_on(netdev);
2344 netif_wake_queue(netdev);
2345 }
2346 }
2347}
2348
2349static void qlcnic_tx_timeout(struct net_device *netdev)
2350{
2351 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2352
2353 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2354 return;
2355
2356 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2357
2358 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2359 adapter->need_fw_reset = 1;
2360 else
2361 adapter->reset_context = 1;
af19b491
AKS
2362}
2363
2364static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2365{
2366 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2367 struct net_device_stats *stats = &netdev->stats;
2368
af19b491
AKS
2369 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2370 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2371 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2372 stats->tx_bytes = adapter->stats.txbytes;
2373 stats->rx_dropped = adapter->stats.rxdropped;
2374 stats->tx_dropped = adapter->stats.txdropped;
2375
2376 return stats;
2377}
2378
7eb9855d 2379static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2380{
af19b491
AKS
2381 u32 status;
2382
2383 status = readl(adapter->isr_int_vec);
2384
2385 if (!(status & adapter->int_vec_bit))
2386 return IRQ_NONE;
2387
2388 /* check interrupt state machine, to be sure */
2389 status = readl(adapter->crb_int_state_reg);
2390 if (!ISR_LEGACY_INT_TRIGGERED(status))
2391 return IRQ_NONE;
2392
2393 writel(0xffffffff, adapter->tgt_status_reg);
2394 /* read twice to ensure write is flushed */
2395 readl(adapter->isr_int_vec);
2396 readl(adapter->isr_int_vec);
2397
7eb9855d
AKS
2398 return IRQ_HANDLED;
2399}
2400
2401static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2402{
2403 struct qlcnic_host_sds_ring *sds_ring = data;
2404 struct qlcnic_adapter *adapter = sds_ring->adapter;
2405
2406 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2407 goto done;
2408 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2409 writel(0xffffffff, adapter->tgt_status_reg);
2410 goto done;
2411 }
2412
2413 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2414 return IRQ_NONE;
2415
2416done:
2417 adapter->diag_cnt++;
2418 qlcnic_enable_int(sds_ring);
2419 return IRQ_HANDLED;
2420}
2421
2422static irqreturn_t qlcnic_intr(int irq, void *data)
2423{
2424 struct qlcnic_host_sds_ring *sds_ring = data;
2425 struct qlcnic_adapter *adapter = sds_ring->adapter;
2426
2427 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2428 return IRQ_NONE;
2429
af19b491
AKS
2430 napi_schedule(&sds_ring->napi);
2431
2432 return IRQ_HANDLED;
2433}
2434
2435static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2436{
2437 struct qlcnic_host_sds_ring *sds_ring = data;
2438 struct qlcnic_adapter *adapter = sds_ring->adapter;
2439
2440 /* clear interrupt */
2441 writel(0xffffffff, adapter->tgt_status_reg);
2442
2443 napi_schedule(&sds_ring->napi);
2444 return IRQ_HANDLED;
2445}
2446
2447static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2448{
2449 struct qlcnic_host_sds_ring *sds_ring = data;
2450
2451 napi_schedule(&sds_ring->napi);
2452 return IRQ_HANDLED;
2453}
2454
2455static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2456{
2457 u32 sw_consumer, hw_consumer;
2458 int count = 0, i;
2459 struct qlcnic_cmd_buffer *buffer;
2460 struct pci_dev *pdev = adapter->pdev;
2461 struct net_device *netdev = adapter->netdev;
2462 struct qlcnic_skb_frag *frag;
2463 int done;
2464 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2465
2466 if (!spin_trylock(&adapter->tx_clean_lock))
2467 return 1;
2468
2469 sw_consumer = tx_ring->sw_consumer;
2470 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2471
2472 while (sw_consumer != hw_consumer) {
2473 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2474 if (buffer->skb) {
2475 frag = &buffer->frag_array[0];
2476 pci_unmap_single(pdev, frag->dma, frag->length,
2477 PCI_DMA_TODEVICE);
2478 frag->dma = 0ULL;
2479 for (i = 1; i < buffer->frag_count; i++) {
2480 frag++;
2481 pci_unmap_page(pdev, frag->dma, frag->length,
2482 PCI_DMA_TODEVICE);
2483 frag->dma = 0ULL;
2484 }
2485
2486 adapter->stats.xmitfinished++;
2487 dev_kfree_skb_any(buffer->skb);
2488 buffer->skb = NULL;
2489 }
2490
2491 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2492 if (++count >= MAX_STATUS_HANDLE)
2493 break;
2494 }
2495
2496 if (count && netif_running(netdev)) {
2497 tx_ring->sw_consumer = sw_consumer;
2498
2499 smp_mb();
2500
2501 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2502 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2503 netif_wake_queue(netdev);
8bfe8b91 2504 adapter->stats.xmit_on++;
af19b491 2505 }
af19b491 2506 }
ef71ff83 2507 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2508 }
2509 /*
2510 * If everything is freed up to consumer then check if the ring is full
2511 * If the ring is full then check if more needs to be freed and
2512 * schedule the call back again.
2513 *
2514 * This happens when there are 2 CPUs. One could be freeing and the
2515 * other filling it. If the ring is full when we get out of here and
2516 * the card has already interrupted the host then the host can miss the
2517 * interrupt.
2518 *
2519 * There is still a possible race condition and the host could miss an
2520 * interrupt. The card has to take care of this.
2521 */
2522 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2523 done = (sw_consumer == hw_consumer);
2524 spin_unlock(&adapter->tx_clean_lock);
2525
2526 return done;
2527}
2528
2529static int qlcnic_poll(struct napi_struct *napi, int budget)
2530{
2531 struct qlcnic_host_sds_ring *sds_ring =
2532 container_of(napi, struct qlcnic_host_sds_ring, napi);
2533
2534 struct qlcnic_adapter *adapter = sds_ring->adapter;
2535
2536 int tx_complete;
2537 int work_done;
2538
2539 tx_complete = qlcnic_process_cmd_ring(adapter);
2540
2541 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2542
2543 if ((work_done < budget) && tx_complete) {
2544 napi_complete(&sds_ring->napi);
2545 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2546 qlcnic_enable_int(sds_ring);
2547 }
2548
2549 return work_done;
2550}
2551
8f891387 2552static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2553{
2554 struct qlcnic_host_sds_ring *sds_ring =
2555 container_of(napi, struct qlcnic_host_sds_ring, napi);
2556
2557 struct qlcnic_adapter *adapter = sds_ring->adapter;
2558 int work_done;
2559
2560 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2561
2562 if (work_done < budget) {
2563 napi_complete(&sds_ring->napi);
2564 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2565 qlcnic_enable_int(sds_ring);
2566 }
2567
2568 return work_done;
2569}
2570
af19b491
AKS
2571#ifdef CONFIG_NET_POLL_CONTROLLER
2572static void qlcnic_poll_controller(struct net_device *netdev)
2573{
bf82791e
YL
2574 int ring;
2575 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2576 struct qlcnic_adapter *adapter = netdev_priv(netdev);
b1fc6d3c 2577 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
bf82791e 2578
af19b491 2579 disable_irq(adapter->irq);
bf82791e
YL
2580 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2581 sds_ring = &recv_ctx->sds_rings[ring];
2582 qlcnic_intr(adapter->irq, sds_ring);
2583 }
af19b491
AKS
2584 enable_irq(adapter->irq);
2585}
2586#endif
2587
6df900e9
SC
2588static void
2589qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2590{
2591 u32 val;
2592
2593 val = adapter->portnum & 0xf;
2594 val |= encoding << 7;
2595 val |= (jiffies - adapter->dev_rst_time) << 8;
2596
2597 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2598 adapter->dev_rst_time = jiffies;
2599}
2600
ade91f8e
AKS
2601static int
2602qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2603{
2604 u32 val;
2605
2606 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2607 state != QLCNIC_DEV_NEED_QUISCENT);
2608
2609 if (qlcnic_api_lock(adapter))
ade91f8e 2610 return -EIO;
af19b491
AKS
2611
2612 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2613
2614 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2615 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2616 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2617 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2618
2619 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2620
2621 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2622
2623 return 0;
af19b491
AKS
2624}
2625
1b95a839
AKS
2626static int
2627qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2628{
2629 u32 val;
2630
2631 if (qlcnic_api_lock(adapter))
2632 return -EBUSY;
2633
2634 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2635 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2636 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2637
2638 qlcnic_api_unlock(adapter);
2639
2640 return 0;
2641}
2642
af19b491 2643static void
21854f02 2644qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2645{
2646 u32 val;
2647
2648 if (qlcnic_api_lock(adapter))
2649 goto err;
2650
31018e06 2651 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2652 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2653 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2654
21854f02
AKS
2655 if (failed) {
2656 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2657 dev_info(&adapter->pdev->dev,
2658 "Device state set to Failed. Please Reboot\n");
2659 } else if (!(val & 0x11111111))
af19b491
AKS
2660 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2661
2662 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2663 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2664 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2665
2666 qlcnic_api_unlock(adapter);
2667err:
2668 adapter->fw_fail_cnt = 0;
2669 clear_bit(__QLCNIC_START_FW, &adapter->state);
2670 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2671}
2672
f73dfc50 2673/* Grab api lock, before checking state */
af19b491
AKS
2674static int
2675qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2676{
2677 int act, state;
2678
2679 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2680 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2681
2682 if (((state & 0x11111111) == (act & 0x11111111)) ||
2683 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2684 return 0;
2685 else
2686 return 1;
2687}
2688
96f8118c
SC
2689static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2690{
2691 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2692
2693 if (val != QLCNIC_DRV_IDC_VER) {
2694 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2695 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2696 }
2697
2698 return 0;
2699}
2700
af19b491
AKS
2701static int
2702qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2703{
2704 u32 val, prev_state;
aa5e18c0 2705 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2706 u8 portnum = adapter->portnum;
96f8118c 2707 u8 ret;
af19b491 2708
f73dfc50
AKS
2709 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2710 return 1;
2711
af19b491
AKS
2712 if (qlcnic_api_lock(adapter))
2713 return -1;
2714
31018e06 2715 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2716 if (!(val & (1 << (portnum * 4)))) {
2717 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2718 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2719 }
2720
2721 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2722 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2723
2724 switch (prev_state) {
2725 case QLCNIC_DEV_COLD:
bbd8c6a4 2726 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2727 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2728 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2729 qlcnic_api_unlock(adapter);
2730 return 1;
2731
2732 case QLCNIC_DEV_READY:
96f8118c 2733 ret = qlcnic_check_idc_ver(adapter);
af19b491 2734 qlcnic_api_unlock(adapter);
96f8118c 2735 return ret;
af19b491
AKS
2736
2737 case QLCNIC_DEV_NEED_RESET:
2738 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2739 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2740 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2741 break;
2742
2743 case QLCNIC_DEV_NEED_QUISCENT:
2744 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2745 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2746 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2747 break;
2748
2749 case QLCNIC_DEV_FAILED:
a7fc948f 2750 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2751 qlcnic_api_unlock(adapter);
2752 return -1;
bbd8c6a4
AKS
2753
2754 case QLCNIC_DEV_INITIALIZING:
2755 case QLCNIC_DEV_QUISCENT:
2756 break;
af19b491
AKS
2757 }
2758
2759 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2760
2761 do {
af19b491 2762 msleep(1000);
a5e463d0
SC
2763 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2764
2765 if (prev_state == QLCNIC_DEV_QUISCENT)
2766 continue;
2767 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2768
65b5b420
AKS
2769 if (!dev_init_timeo) {
2770 dev_err(&adapter->pdev->dev,
2771 "Waiting for device to initialize timeout\n");
af19b491 2772 return -1;
65b5b420 2773 }
af19b491
AKS
2774
2775 if (qlcnic_api_lock(adapter))
2776 return -1;
2777
2778 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2779 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2780 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2781
96f8118c 2782 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2783 qlcnic_api_unlock(adapter);
2784
96f8118c 2785 return ret;
af19b491
AKS
2786}
2787
2788static void
2789qlcnic_fwinit_work(struct work_struct *work)
2790{
2791 struct qlcnic_adapter *adapter = container_of(work,
2792 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2793 u32 dev_state = 0xf;
af19b491 2794
f73dfc50
AKS
2795 if (qlcnic_api_lock(adapter))
2796 goto err_ret;
af19b491 2797
a5e463d0 2798 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2799 if (dev_state == QLCNIC_DEV_QUISCENT ||
2800 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2801 qlcnic_api_unlock(adapter);
2802 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2803 FW_POLL_DELAY * 2);
2804 return;
2805 }
2806
9f26f547 2807 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2808 qlcnic_api_unlock(adapter);
2809 goto wait_npar;
9f26f547
AC
2810 }
2811
f73dfc50
AKS
2812 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2813 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2814 adapter->reset_ack_timeo);
2815 goto skip_ack_check;
2816 }
2817
2818 if (!qlcnic_check_drv_state(adapter)) {
2819skip_ack_check:
2820 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2821
f73dfc50
AKS
2822 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2823 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2824 QLCNIC_DEV_INITIALIZING);
2825 set_bit(__QLCNIC_START_FW, &adapter->state);
2826 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2827 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2828 }
2829
f73dfc50
AKS
2830 qlcnic_api_unlock(adapter);
2831
9f26f547 2832 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2833 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2834 adapter->fw_wait_cnt = 0;
af19b491
AKS
2835 return;
2836 }
af19b491
AKS
2837 goto err_ret;
2838 }
2839
f73dfc50 2840 qlcnic_api_unlock(adapter);
aa5e18c0 2841
9f26f547 2842wait_npar:
af19b491 2843 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2844 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2845
af19b491 2846 switch (dev_state) {
3c4b23b1 2847 case QLCNIC_DEV_READY:
9f26f547 2848 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2849 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2850 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2851 return;
2852 }
3c4b23b1
AKS
2853 case QLCNIC_DEV_FAILED:
2854 break;
2855 default:
2856 qlcnic_schedule_work(adapter,
2857 qlcnic_fwinit_work, FW_POLL_DELAY);
2858 return;
af19b491
AKS
2859 }
2860
2861err_ret:
f73dfc50
AKS
2862 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2863 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2864 netif_device_attach(adapter->netdev);
21854f02 2865 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2866}
2867
2868static void
2869qlcnic_detach_work(struct work_struct *work)
2870{
2871 struct qlcnic_adapter *adapter = container_of(work,
2872 struct qlcnic_adapter, fw_work.work);
2873 struct net_device *netdev = adapter->netdev;
2874 u32 status;
2875
2876 netif_device_detach(netdev);
2877
b8c17620
AKS
2878 /* Dont grab rtnl lock during Quiscent mode */
2879 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2880 if (netif_running(netdev))
2881 __qlcnic_down(adapter, netdev);
2882 } else
2883 qlcnic_down(adapter, netdev);
af19b491 2884
af19b491
AKS
2885 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2886
2887 if (status & QLCNIC_RCODE_FATAL_ERROR)
2888 goto err_ret;
2889
2890 if (adapter->temp == QLCNIC_TEMP_PANIC)
2891 goto err_ret;
2892
ade91f8e
AKS
2893 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2894 goto err_ret;
af19b491
AKS
2895
2896 adapter->fw_wait_cnt = 0;
2897
2898 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2899
2900 return;
2901
2902err_ret:
65b5b420
AKS
2903 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2904 status, adapter->temp);
34ce3626 2905 netif_device_attach(netdev);
21854f02 2906 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2907}
2908
3c4b23b1
AKS
2909/*Transit NPAR state to NON Operational */
2910static void
2911qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2912{
2913 u32 state;
2914
2915 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2916 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2917 return;
2918
2919 if (qlcnic_api_lock(adapter))
2920 return;
2921 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2922 qlcnic_api_unlock(adapter);
2923}
2924
f73dfc50 2925/*Transit to RESET state from READY state only */
af19b491
AKS
2926static void
2927qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2928{
2929 u32 state;
2930
cea8975e 2931 adapter->need_fw_reset = 1;
af19b491
AKS
2932 if (qlcnic_api_lock(adapter))
2933 return;
2934
2935 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2936
f73dfc50 2937 if (state == QLCNIC_DEV_READY) {
af19b491 2938 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2939 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2940 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2941 }
2942
3c4b23b1 2943 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2944 qlcnic_api_unlock(adapter);
2945}
2946
9f26f547
AC
2947/* Transit to NPAR READY state from NPAR NOT READY state */
2948static void
2949qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2950{
9f26f547
AC
2951 if (qlcnic_api_lock(adapter))
2952 return;
2953
3c4b23b1
AKS
2954 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2955 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2956
2957 qlcnic_api_unlock(adapter);
2958}
2959
af19b491
AKS
2960static void
2961qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2962 work_func_t func, int delay)
2963{
451724c8
SC
2964 if (test_bit(__QLCNIC_AER, &adapter->state))
2965 return;
2966
af19b491 2967 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
2968 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2969 round_jiffies_relative(delay));
af19b491
AKS
2970}
2971
2972static void
2973qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2974{
2975 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2976 msleep(10);
2977
2978 cancel_delayed_work_sync(&adapter->fw_work);
2979}
2980
2981static void
2982qlcnic_attach_work(struct work_struct *work)
2983{
2984 struct qlcnic_adapter *adapter = container_of(work,
2985 struct qlcnic_adapter, fw_work.work);
2986 struct net_device *netdev = adapter->netdev;
b18971d1 2987 u32 npar_state;
af19b491 2988
b18971d1
AKS
2989 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2990 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2991 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2992 qlcnic_clr_all_drv_state(adapter, 0);
2993 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2994 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2995 FW_POLL_DELAY);
2996 else
2997 goto attach;
2998 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2999 return;
3000 }
3001attach:
af19b491 3002 if (netif_running(netdev)) {
52486a3a 3003 if (qlcnic_up(adapter, netdev))
af19b491 3004 goto done;
af19b491 3005
aec1e845 3006 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
3007 }
3008
af19b491 3009done:
34ce3626 3010 netif_device_attach(netdev);
af19b491
AKS
3011 adapter->fw_fail_cnt = 0;
3012 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
3013
3014 if (!qlcnic_clr_drv_state(adapter))
3015 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3016 FW_POLL_DELAY);
af19b491
AKS
3017}
3018
3019static int
3020qlcnic_check_health(struct qlcnic_adapter *adapter)
3021{
4e70812b 3022 u32 state = 0, heartbeat;
af19b491
AKS
3023 struct net_device *netdev = adapter->netdev;
3024
3025 if (qlcnic_check_temp(adapter))
3026 goto detach;
3027
2372a5f1 3028 if (adapter->need_fw_reset)
af19b491 3029 qlcnic_dev_request_reset(adapter);
af19b491
AKS
3030
3031 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 3032 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 3033 qlcnic_set_npar_non_operational(adapter);
af19b491 3034 adapter->need_fw_reset = 1;
b8c17620
AKS
3035 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3036 goto detach;
af19b491 3037
4e70812b
SC
3038 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3039 if (heartbeat != adapter->heartbeat) {
3040 adapter->heartbeat = heartbeat;
af19b491
AKS
3041 adapter->fw_fail_cnt = 0;
3042 if (adapter->need_fw_reset)
3043 goto detach;
68bf1c68 3044
9ce13ca8 3045 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
3046 qlcnic_reset_hw_context(adapter);
3047 adapter->netdev->trans_start = jiffies;
3048 }
3049
af19b491
AKS
3050 return 0;
3051 }
3052
3053 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3054 return 0;
3055
3056 qlcnic_dev_request_reset(adapter);
3057
9ce13ca8 3058 if (auto_fw_reset)
0df170b6 3059 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
3060
3061 dev_info(&netdev->dev, "firmware hang detected\n");
3062
3063detach:
3064 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3065 QLCNIC_DEV_NEED_RESET;
3066
9ce13ca8 3067 if (auto_fw_reset &&
65b5b420
AKS
3068 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3069
af19b491 3070 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3071 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3072 }
af19b491
AKS
3073
3074 return 1;
3075}
3076
3077static void
3078qlcnic_fw_poll_work(struct work_struct *work)
3079{
3080 struct qlcnic_adapter *adapter = container_of(work,
3081 struct qlcnic_adapter, fw_work.work);
3082
3083 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3084 goto reschedule;
3085
3086
3087 if (qlcnic_check_health(adapter))
3088 return;
3089
b5e5492c
AKS
3090 if (adapter->fhash.fnum)
3091 qlcnic_prune_lb_filters(adapter);
3092
af19b491
AKS
3093reschedule:
3094 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3095}
3096
451724c8
SC
3097static int qlcnic_is_first_func(struct pci_dev *pdev)
3098{
3099 struct pci_dev *oth_pdev;
3100 int val = pdev->devfn;
3101
3102 while (val-- > 0) {
3103 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3104 (pdev->bus), pdev->bus->number,
3105 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3106 if (!oth_pdev)
3107 continue;
451724c8 3108
bfc978fa
AKS
3109 if (oth_pdev->current_state != PCI_D3cold) {
3110 pci_dev_put(oth_pdev);
451724c8 3111 return 0;
bfc978fa
AKS
3112 }
3113 pci_dev_put(oth_pdev);
451724c8
SC
3114 }
3115 return 1;
3116}
3117
3118static int qlcnic_attach_func(struct pci_dev *pdev)
3119{
3120 int err, first_func;
3121 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3122 struct net_device *netdev = adapter->netdev;
3123
3124 pdev->error_state = pci_channel_io_normal;
3125
3126 err = pci_enable_device(pdev);
3127 if (err)
3128 return err;
3129
3130 pci_set_power_state(pdev, PCI_D0);
3131 pci_set_master(pdev);
3132 pci_restore_state(pdev);
3133
3134 first_func = qlcnic_is_first_func(pdev);
3135
3136 if (qlcnic_api_lock(adapter))
3137 return -EINVAL;
3138
933fce12 3139 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3140 adapter->need_fw_reset = 1;
3141 set_bit(__QLCNIC_START_FW, &adapter->state);
3142 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3143 QLCDB(adapter, DRV, "Restarting fw\n");
3144 }
3145 qlcnic_api_unlock(adapter);
3146
3147 err = adapter->nic_ops->start_firmware(adapter);
3148 if (err)
3149 return err;
3150
3151 qlcnic_clr_drv_state(adapter);
3152 qlcnic_setup_intr(adapter);
3153
3154 if (netif_running(netdev)) {
3155 err = qlcnic_attach(adapter);
3156 if (err) {
21854f02 3157 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3158 clear_bit(__QLCNIC_AER, &adapter->state);
3159 netif_device_attach(netdev);
3160 return err;
3161 }
3162
3163 err = qlcnic_up(adapter, netdev);
3164 if (err)
3165 goto done;
3166
aec1e845 3167 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3168 }
3169 done:
3170 netif_device_attach(netdev);
3171 return err;
3172}
3173
3174static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3175 pci_channel_state_t state)
3176{
3177 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3178 struct net_device *netdev = adapter->netdev;
3179
3180 if (state == pci_channel_io_perm_failure)
3181 return PCI_ERS_RESULT_DISCONNECT;
3182
3183 if (state == pci_channel_io_normal)
3184 return PCI_ERS_RESULT_RECOVERED;
3185
3186 set_bit(__QLCNIC_AER, &adapter->state);
3187 netif_device_detach(netdev);
3188
3189 cancel_delayed_work_sync(&adapter->fw_work);
3190
3191 if (netif_running(netdev))
3192 qlcnic_down(adapter, netdev);
3193
3194 qlcnic_detach(adapter);
3195 qlcnic_teardown_intr(adapter);
3196
3197 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3198
3199 pci_save_state(pdev);
3200 pci_disable_device(pdev);
3201
3202 return PCI_ERS_RESULT_NEED_RESET;
3203}
3204
3205static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3206{
3207 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3208 PCI_ERS_RESULT_RECOVERED;
3209}
3210
3211static void qlcnic_io_resume(struct pci_dev *pdev)
3212{
3213 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3214
3215 pci_cleanup_aer_uncorrect_error_status(pdev);
3216
3217 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3218 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3219 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3220 FW_POLL_DELAY);
3221}
3222
87eb743b
AC
3223static int
3224qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3225{
3226 int err;
3227
3228 err = qlcnic_can_start_firmware(adapter);
3229 if (err)
3230 return err;
3231
78f84e1a
AKS
3232 err = qlcnic_check_npar_opertional(adapter);
3233 if (err)
3234 return err;
3c4b23b1 3235
174240a8
RB
3236 err = qlcnic_initialize_nic(adapter);
3237 if (err)
3238 return err;
3239
87eb743b
AC
3240 qlcnic_check_options(adapter);
3241
7373373d
RB
3242 err = qlcnic_set_eswitch_port_config(adapter);
3243 if (err)
3244 return err;
3245
87eb743b
AC
3246 adapter->need_fw_reset = 0;
3247
3248 return err;
3249}
3250
3251static int
3252qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3253{
3254 return -EOPNOTSUPP;
3255}
3256
3257static int
3258qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3259{
3260 return -EOPNOTSUPP;
3261}
3262
af19b491
AKS
3263static ssize_t
3264qlcnic_store_bridged_mode(struct device *dev,
3265 struct device_attribute *attr, const char *buf, size_t len)
3266{
3267 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3268 unsigned long new;
3269 int ret = -EINVAL;
3270
3271 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3272 goto err_out;
3273
8a15ad1f 3274 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3275 goto err_out;
3276
3277 if (strict_strtoul(buf, 2, &new))
3278 goto err_out;
3279
2e9d722d 3280 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3281 ret = len;
3282
3283err_out:
3284 return ret;
3285}
3286
3287static ssize_t
3288qlcnic_show_bridged_mode(struct device *dev,
3289 struct device_attribute *attr, char *buf)
3290{
3291 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3292 int bridged_mode = 0;
3293
3294 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3295 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3296
3297 return sprintf(buf, "%d\n", bridged_mode);
3298}
3299
3300static struct device_attribute dev_attr_bridged_mode = {
3301 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3302 .show = qlcnic_show_bridged_mode,
3303 .store = qlcnic_store_bridged_mode,
3304};
3305
3306static ssize_t
3307qlcnic_store_diag_mode(struct device *dev,
3308 struct device_attribute *attr, const char *buf, size_t len)
3309{
3310 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3311 unsigned long new;
3312
3313 if (strict_strtoul(buf, 2, &new))
3314 return -EINVAL;
3315
3316 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3317 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3318
3319 return len;
3320}
3321
3322static ssize_t
3323qlcnic_show_diag_mode(struct device *dev,
3324 struct device_attribute *attr, char *buf)
3325{
3326 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3327
3328 return sprintf(buf, "%d\n",
3329 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3330}
3331
3332static struct device_attribute dev_attr_diag_mode = {
3333 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3334 .show = qlcnic_show_diag_mode,
3335 .store = qlcnic_store_diag_mode,
3336};
3337
f94bc1e7
SC
3338int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3339{
3340 if (!use_msi_x && !use_msi) {
3341 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3342 return -EINVAL;
3343 }
3344
3345 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3346 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3347 " powers of 2\n", max_hw);
3348 return -EINVAL;
3349 }
3350 return 0;
3351
3352}
3353
3354int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3355{
3356 struct net_device *netdev = adapter->netdev;
3357 int err = 0;
3358
3359 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3360 return -EBUSY;
3361
3362 netif_device_detach(netdev);
3363 if (netif_running(netdev))
3364 __qlcnic_down(adapter, netdev);
3365 qlcnic_detach(adapter);
3366 qlcnic_teardown_intr(adapter);
3367
3368 if (qlcnic_enable_msix(adapter, data)) {
3369 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3370 qlcnic_enable_msi_legacy(adapter);
3371 }
3372
3373 if (netif_running(netdev)) {
3374 err = qlcnic_attach(adapter);
3375 if (err)
3376 goto done;
3377 err = __qlcnic_up(adapter, netdev);
3378 if (err)
3379 goto done;
3380 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3381 }
3382 done:
3383 netif_device_attach(netdev);
3384 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3385 return err;
3386}
3387
af19b491
AKS
3388static int
3389qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3390 loff_t offset, size_t size)
3391{
897e8c7c
DP
3392 size_t crb_size = 4;
3393
af19b491
AKS
3394 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3395 return -EIO;
3396
897e8c7c
DP
3397 if (offset < QLCNIC_PCI_CRBSPACE) {
3398 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3399 QLCNIC_PCI_CAMQM_END))
3400 crb_size = 8;
3401 else
3402 return -EINVAL;
3403 }
af19b491 3404
897e8c7c
DP
3405 if ((size != crb_size) || (offset & (crb_size-1)))
3406 return -EINVAL;
af19b491
AKS
3407
3408 return 0;
3409}
3410
3411static ssize_t
2c3c8bea
CW
3412qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3413 struct bin_attribute *attr,
af19b491
AKS
3414 char *buf, loff_t offset, size_t size)
3415{
3416 struct device *dev = container_of(kobj, struct device, kobj);
3417 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3418 u32 data;
897e8c7c 3419 u64 qmdata;
af19b491
AKS
3420 int ret;
3421
3422 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3423 if (ret != 0)
3424 return ret;
3425
897e8c7c
DP
3426 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3427 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3428 memcpy(buf, &qmdata, size);
3429 } else {
3430 data = QLCRD32(adapter, offset);
3431 memcpy(buf, &data, size);
3432 }
af19b491
AKS
3433 return size;
3434}
3435
3436static ssize_t
2c3c8bea
CW
3437qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3438 struct bin_attribute *attr,
af19b491
AKS
3439 char *buf, loff_t offset, size_t size)
3440{
3441 struct device *dev = container_of(kobj, struct device, kobj);
3442 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3443 u32 data;
897e8c7c 3444 u64 qmdata;
af19b491
AKS
3445 int ret;
3446
3447 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3448 if (ret != 0)
3449 return ret;
3450
897e8c7c
DP
3451 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3452 memcpy(&qmdata, buf, size);
3453 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3454 } else {
3455 memcpy(&data, buf, size);
3456 QLCWR32(adapter, offset, data);
3457 }
af19b491
AKS
3458 return size;
3459}
3460
3461static int
3462qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3463 loff_t offset, size_t size)
3464{
3465 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3466 return -EIO;
3467
3468 if ((size != 8) || (offset & 0x7))
3469 return -EIO;
3470
3471 return 0;
3472}
3473
3474static ssize_t
2c3c8bea
CW
3475qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3476 struct bin_attribute *attr,
af19b491
AKS
3477 char *buf, loff_t offset, size_t size)
3478{
3479 struct device *dev = container_of(kobj, struct device, kobj);
3480 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3481 u64 data;
3482 int ret;
3483
3484 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3485 if (ret != 0)
3486 return ret;
3487
3488 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3489 return -EIO;
3490
3491 memcpy(buf, &data, size);
3492
3493 return size;
3494}
3495
3496static ssize_t
2c3c8bea
CW
3497qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3498 struct bin_attribute *attr,
af19b491
AKS
3499 char *buf, loff_t offset, size_t size)
3500{
3501 struct device *dev = container_of(kobj, struct device, kobj);
3502 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3503 u64 data;
3504 int ret;
3505
3506 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3507 if (ret != 0)
3508 return ret;
3509
3510 memcpy(&data, buf, size);
3511
3512 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3513 return -EIO;
3514
3515 return size;
3516}
3517
3518
3519static struct bin_attribute bin_attr_crb = {
3520 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3521 .size = 0,
3522 .read = qlcnic_sysfs_read_crb,
3523 .write = qlcnic_sysfs_write_crb,
3524};
3525
3526static struct bin_attribute bin_attr_mem = {
3527 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3528 .size = 0,
3529 .read = qlcnic_sysfs_read_mem,
3530 .write = qlcnic_sysfs_write_mem,
3531};
3532
cea8975e 3533static int
346fe763
RB
3534validate_pm_config(struct qlcnic_adapter *adapter,
3535 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3536{
3537
3538 u8 src_pci_func, s_esw_id, d_esw_id;
3539 u8 dest_pci_func;
3540 int i;
3541
3542 for (i = 0; i < count; i++) {
3543 src_pci_func = pm_cfg[i].pci_func;
3544 dest_pci_func = pm_cfg[i].dest_npar;
3545 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3546 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3547 return QL_STATUS_INVALID_PARAM;
3548
3549 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3550 return QL_STATUS_INVALID_PARAM;
3551
3552 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3553 return QL_STATUS_INVALID_PARAM;
3554
346fe763
RB
3555 s_esw_id = adapter->npars[src_pci_func].phy_port;
3556 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3557
3558 if (s_esw_id != d_esw_id)
3559 return QL_STATUS_INVALID_PARAM;
3560
3561 }
3562 return 0;
3563
3564}
3565
3566static ssize_t
3567qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3568 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3569{
3570 struct device *dev = container_of(kobj, struct device, kobj);
3571 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3572 struct qlcnic_pm_func_cfg *pm_cfg;
3573 u32 id, action, pci_func;
3574 int count, rem, i, ret;
3575
3576 count = size / sizeof(struct qlcnic_pm_func_cfg);
3577 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3578 if (rem)
3579 return QL_STATUS_INVALID_PARAM;
3580
3581 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3582
3583 ret = validate_pm_config(adapter, pm_cfg, count);
3584 if (ret)
3585 return ret;
3586 for (i = 0; i < count; i++) {
3587 pci_func = pm_cfg[i].pci_func;
4e8acb01 3588 action = !!pm_cfg[i].action;
346fe763
RB
3589 id = adapter->npars[pci_func].phy_port;
3590 ret = qlcnic_config_port_mirroring(adapter, id,
3591 action, pci_func);
3592 if (ret)
3593 return ret;
3594 }
3595
3596 for (i = 0; i < count; i++) {
3597 pci_func = pm_cfg[i].pci_func;
3598 id = adapter->npars[pci_func].phy_port;
4e8acb01 3599 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3600 adapter->npars[pci_func].dest_npar = id;
3601 }
3602 return size;
3603}
3604
3605static ssize_t
3606qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3607 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3608{
3609 struct device *dev = container_of(kobj, struct device, kobj);
3610 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3611 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3612 int i;
3613
3614 if (size != sizeof(pm_cfg))
3615 return QL_STATUS_INVALID_PARAM;
3616
3617 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3618 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3619 continue;
3620 pm_cfg[i].action = adapter->npars[i].enable_pm;
3621 pm_cfg[i].dest_npar = 0;
3622 pm_cfg[i].pci_func = i;
3623 }
3624 memcpy(buf, &pm_cfg, size);
3625
3626 return size;
3627}
3628
cea8975e 3629static int
346fe763 3630validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3631 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3632{
7613c87b 3633 u32 op_mode;
346fe763
RB
3634 u8 pci_func;
3635 int i;
7613c87b 3636
b1fc6d3c 3637 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
7613c87b 3638
346fe763
RB
3639 for (i = 0; i < count; i++) {
3640 pci_func = esw_cfg[i].pci_func;
3641 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3642 return QL_STATUS_INVALID_PARAM;
3643
4e8acb01
RB
3644 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3645 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3646 return QL_STATUS_INVALID_PARAM;
346fe763 3647
4e8acb01
RB
3648 switch (esw_cfg[i].op_mode) {
3649 case QLCNIC_PORT_DEFAULTS:
7613c87b 3650 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3651 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3652 if (esw_cfg[i].mac_anti_spoof != 0)
3653 return QL_STATUS_INVALID_PARAM;
3654 if (esw_cfg[i].mac_override != 1)
3655 return QL_STATUS_INVALID_PARAM;
3656 if (esw_cfg[i].promisc_mode != 1)
3657 return QL_STATUS_INVALID_PARAM;
7373373d 3658 }
4e8acb01
RB
3659 break;
3660 case QLCNIC_ADD_VLAN:
346fe763
RB
3661 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3662 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3663 if (!esw_cfg[i].op_type)
3664 return QL_STATUS_INVALID_PARAM;
3665 break;
3666 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3667 if (!esw_cfg[i].op_type)
3668 return QL_STATUS_INVALID_PARAM;
3669 break;
3670 default:
346fe763 3671 return QL_STATUS_INVALID_PARAM;
4e8acb01 3672 }
346fe763 3673 }
346fe763
RB
3674 return 0;
3675}
3676
3677static ssize_t
3678qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3679 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3680{
3681 struct device *dev = container_of(kobj, struct device, kobj);
3682 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3683 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3684 struct qlcnic_npar_info *npar;
346fe763 3685 int count, rem, i, ret;
0325d69b 3686 u8 pci_func, op_mode = 0;
346fe763
RB
3687
3688 count = size / sizeof(struct qlcnic_esw_func_cfg);
3689 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3690 if (rem)
3691 return QL_STATUS_INVALID_PARAM;
3692
3693 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3694 ret = validate_esw_config(adapter, esw_cfg, count);
3695 if (ret)
3696 return ret;
3697
3698 for (i = 0; i < count; i++) {
0325d69b
RB
3699 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3700 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3701 return QL_STATUS_INVALID_PARAM;
e9a47700 3702
b1fc6d3c 3703 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
e9a47700
RB
3704 continue;
3705
3706 op_mode = esw_cfg[i].op_mode;
3707 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3708 esw_cfg[i].op_mode = op_mode;
b1fc6d3c 3709 esw_cfg[i].pci_func = adapter->ahw->pci_func;
e9a47700
RB
3710
3711 switch (esw_cfg[i].op_mode) {
3712 case QLCNIC_PORT_DEFAULTS:
3713 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3714 break;
8cf61f89
AKS
3715 case QLCNIC_ADD_VLAN:
3716 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3717 break;
3718 case QLCNIC_DEL_VLAN:
3719 esw_cfg[i].vlan_id = 0;
3720 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3721 break;
0325d69b 3722 }
346fe763
RB
3723 }
3724
0325d69b
RB
3725 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3726 goto out;
e9a47700 3727
346fe763
RB
3728 for (i = 0; i < count; i++) {
3729 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3730 npar = &adapter->npars[pci_func];
3731 switch (esw_cfg[i].op_mode) {
3732 case QLCNIC_PORT_DEFAULTS:
3733 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3734 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3735 npar->offload_flags = esw_cfg[i].offload_flags;
3736 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3737 npar->discard_tagged = esw_cfg[i].discard_tagged;
3738 break;
3739 case QLCNIC_ADD_VLAN:
3740 npar->pvid = esw_cfg[i].vlan_id;
3741 break;
3742 case QLCNIC_DEL_VLAN:
3743 npar->pvid = 0;
3744 break;
3745 }
346fe763 3746 }
0325d69b 3747out:
346fe763
RB
3748 return size;
3749}
3750
3751static ssize_t
3752qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3753 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3754{
3755 struct device *dev = container_of(kobj, struct device, kobj);
3756 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3757 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3758 u8 i;
346fe763
RB
3759
3760 if (size != sizeof(esw_cfg))
3761 return QL_STATUS_INVALID_PARAM;
3762
3763 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3764 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3765 continue;
4e8acb01
RB
3766 esw_cfg[i].pci_func = i;
3767 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3768 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3769 }
3770 memcpy(buf, &esw_cfg, size);
3771
3772 return size;
3773}
3774
cea8975e 3775static int
346fe763
RB
3776validate_npar_config(struct qlcnic_adapter *adapter,
3777 struct qlcnic_npar_func_cfg *np_cfg, int count)
3778{
3779 u8 pci_func, i;
3780
3781 for (i = 0; i < count; i++) {
3782 pci_func = np_cfg[i].pci_func;
3783 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3784 return QL_STATUS_INVALID_PARAM;
3785
3786 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3787 return QL_STATUS_INVALID_PARAM;
3788
d12b0d9a
RB
3789 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3790 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
3791 return QL_STATUS_INVALID_PARAM;
3792 }
3793 return 0;
3794}
3795
3796static ssize_t
3797qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3798 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3799{
3800 struct device *dev = container_of(kobj, struct device, kobj);
3801 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3802 struct qlcnic_info nic_info;
3803 struct qlcnic_npar_func_cfg *np_cfg;
3804 int i, count, rem, ret;
3805 u8 pci_func;
3806
3807 count = size / sizeof(struct qlcnic_npar_func_cfg);
3808 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3809 if (rem)
3810 return QL_STATUS_INVALID_PARAM;
3811
3812 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3813 ret = validate_npar_config(adapter, np_cfg, count);
3814 if (ret)
3815 return ret;
3816
3817 for (i = 0; i < count ; i++) {
3818 pci_func = np_cfg[i].pci_func;
3819 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3820 if (ret)
3821 return ret;
3822 nic_info.pci_func = pci_func;
3823 nic_info.min_tx_bw = np_cfg[i].min_bw;
3824 nic_info.max_tx_bw = np_cfg[i].max_bw;
3825 ret = qlcnic_set_nic_info(adapter, &nic_info);
3826 if (ret)
3827 return ret;
cea8975e
AC
3828 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3829 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3830 }
3831
3832 return size;
3833
3834}
3835static ssize_t
3836qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3837 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3838{
3839 struct device *dev = container_of(kobj, struct device, kobj);
3840 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3841 struct qlcnic_info nic_info;
3842 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3843 int i, ret;
3844
3845 if (size != sizeof(np_cfg))
3846 return QL_STATUS_INVALID_PARAM;
3847
3848 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3849 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3850 continue;
3851 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3852 if (ret)
3853 return ret;
3854
3855 np_cfg[i].pci_func = i;
a1c0c459 3856 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3857 np_cfg[i].port_num = nic_info.phys_port;
3858 np_cfg[i].fw_capab = nic_info.capabilities;
3859 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3860 np_cfg[i].max_bw = nic_info.max_tx_bw;
3861 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3862 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3863 }
3864 memcpy(buf, &np_cfg, size);
3865 return size;
3866}
3867
b6021212
AKS
3868static ssize_t
3869qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3870 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3871{
3872 struct device *dev = container_of(kobj, struct device, kobj);
3873 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3874 struct qlcnic_esw_statistics port_stats;
3875 int ret;
3876
3877 if (size != sizeof(struct qlcnic_esw_statistics))
3878 return QL_STATUS_INVALID_PARAM;
3879
3880 if (offset >= QLCNIC_MAX_PCI_FUNC)
3881 return QL_STATUS_INVALID_PARAM;
3882
3883 memset(&port_stats, 0, size);
3884 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3885 &port_stats.rx);
3886 if (ret)
3887 return ret;
3888
3889 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3890 &port_stats.tx);
3891 if (ret)
3892 return ret;
3893
3894 memcpy(buf, &port_stats, size);
3895 return size;
3896}
3897
3898static ssize_t
3899qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3900 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3901{
3902 struct device *dev = container_of(kobj, struct device, kobj);
3903 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3904 struct qlcnic_esw_statistics esw_stats;
3905 int ret;
3906
3907 if (size != sizeof(struct qlcnic_esw_statistics))
3908 return QL_STATUS_INVALID_PARAM;
3909
3910 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3911 return QL_STATUS_INVALID_PARAM;
3912
3913 memset(&esw_stats, 0, size);
3914 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3915 &esw_stats.rx);
3916 if (ret)
3917 return ret;
3918
3919 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3920 &esw_stats.tx);
3921 if (ret)
3922 return ret;
3923
3924 memcpy(buf, &esw_stats, size);
3925 return size;
3926}
3927
3928static ssize_t
3929qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3930 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3931{
3932 struct device *dev = container_of(kobj, struct device, kobj);
3933 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3934 int ret;
3935
3936 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3937 return QL_STATUS_INVALID_PARAM;
3938
3939 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3940 QLCNIC_QUERY_RX_COUNTER);
3941 if (ret)
3942 return ret;
3943
3944 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3945 QLCNIC_QUERY_TX_COUNTER);
3946 if (ret)
3947 return ret;
3948
3949 return size;
3950}
3951
3952static ssize_t
3953qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3954 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3955{
3956
3957 struct device *dev = container_of(kobj, struct device, kobj);
3958 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3959 int ret;
3960
3961 if (offset >= QLCNIC_MAX_PCI_FUNC)
3962 return QL_STATUS_INVALID_PARAM;
3963
3964 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3965 QLCNIC_QUERY_RX_COUNTER);
3966 if (ret)
3967 return ret;
3968
3969 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3970 QLCNIC_QUERY_TX_COUNTER);
3971 if (ret)
3972 return ret;
3973
3974 return size;
3975}
3976
346fe763
RB
3977static ssize_t
3978qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3979 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3980{
3981 struct device *dev = container_of(kobj, struct device, kobj);
3982 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3983 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3984 struct qlcnic_pci_info *pci_info;
346fe763
RB
3985 int i, ret;
3986
3987 if (size != sizeof(pci_cfg))
3988 return QL_STATUS_INVALID_PARAM;
3989
e88db3bd
DC
3990 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3991 if (!pci_info)
3992 return -ENOMEM;
3993
346fe763 3994 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3995 if (ret) {
3996 kfree(pci_info);
346fe763 3997 return ret;
e88db3bd 3998 }
346fe763
RB
3999
4000 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4001 pci_cfg[i].pci_func = pci_info[i].id;
4002 pci_cfg[i].func_type = pci_info[i].type;
4003 pci_cfg[i].port_num = pci_info[i].default_port;
4004 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4005 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4006 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4007 }
4008 memcpy(buf, &pci_cfg, size);
e88db3bd 4009 kfree(pci_info);
346fe763 4010 return size;
346fe763
RB
4011}
4012static struct bin_attribute bin_attr_npar_config = {
4013 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4014 .size = 0,
4015 .read = qlcnic_sysfs_read_npar_config,
4016 .write = qlcnic_sysfs_write_npar_config,
4017};
4018
4019static struct bin_attribute bin_attr_pci_config = {
4020 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4021 .size = 0,
4022 .read = qlcnic_sysfs_read_pci_config,
4023 .write = NULL,
4024};
4025
b6021212
AKS
4026static struct bin_attribute bin_attr_port_stats = {
4027 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4028 .size = 0,
4029 .read = qlcnic_sysfs_get_port_stats,
4030 .write = qlcnic_sysfs_clear_port_stats,
4031};
4032
4033static struct bin_attribute bin_attr_esw_stats = {
4034 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4035 .size = 0,
4036 .read = qlcnic_sysfs_get_esw_stats,
4037 .write = qlcnic_sysfs_clear_esw_stats,
4038};
4039
346fe763
RB
4040static struct bin_attribute bin_attr_esw_config = {
4041 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4042 .size = 0,
4043 .read = qlcnic_sysfs_read_esw_config,
4044 .write = qlcnic_sysfs_write_esw_config,
4045};
4046
4047static struct bin_attribute bin_attr_pm_config = {
4048 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4049 .size = 0,
4050 .read = qlcnic_sysfs_read_pm_config,
4051 .write = qlcnic_sysfs_write_pm_config,
4052};
4053
af19b491
AKS
4054static void
4055qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4056{
4057 struct device *dev = &adapter->pdev->dev;
4058
4059 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4060 if (device_create_file(dev, &dev_attr_bridged_mode))
4061 dev_warn(dev,
4062 "failed to create bridged_mode sysfs entry\n");
4063}
4064
4065static void
4066qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4067{
4068 struct device *dev = &adapter->pdev->dev;
4069
4070 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4071 device_remove_file(dev, &dev_attr_bridged_mode);
4072}
4073
4074static void
4075qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4076{
4077 struct device *dev = &adapter->pdev->dev;
4078
b6021212
AKS
4079 if (device_create_bin_file(dev, &bin_attr_port_stats))
4080 dev_info(dev, "failed to create port stats sysfs entry");
4081
132ff00a
AC
4082 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4083 return;
af19b491
AKS
4084 if (device_create_file(dev, &dev_attr_diag_mode))
4085 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4086 if (device_create_bin_file(dev, &bin_attr_crb))
4087 dev_info(dev, "failed to create crb sysfs entry\n");
4088 if (device_create_bin_file(dev, &bin_attr_mem))
4089 dev_info(dev, "failed to create mem sysfs entry\n");
53478fef
SC
4090 if (device_create_bin_file(dev, &bin_attr_pci_config))
4091 dev_info(dev, "failed to create pci config sysfs entry");
4e8acb01
RB
4092 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4093 return;
4094 if (device_create_bin_file(dev, &bin_attr_esw_config))
4095 dev_info(dev, "failed to create esw config sysfs entry");
4096 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4097 return;
346fe763
RB
4098 if (device_create_bin_file(dev, &bin_attr_npar_config))
4099 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4100 if (device_create_bin_file(dev, &bin_attr_pm_config))
4101 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4102 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4103 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4104}
4105
af19b491
AKS
4106static void
4107qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4108{
4109 struct device *dev = &adapter->pdev->dev;
4110
b6021212
AKS
4111 device_remove_bin_file(dev, &bin_attr_port_stats);
4112
132ff00a
AC
4113 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4114 return;
af19b491
AKS
4115 device_remove_file(dev, &dev_attr_diag_mode);
4116 device_remove_bin_file(dev, &bin_attr_crb);
4117 device_remove_bin_file(dev, &bin_attr_mem);
53478fef 4118 device_remove_bin_file(dev, &bin_attr_pci_config);
4e8acb01
RB
4119 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4120 return;
4121 device_remove_bin_file(dev, &bin_attr_esw_config);
4122 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4123 return;
346fe763 4124 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4125 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4126 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4127}
4128
4129#ifdef CONFIG_INET
4130
4131#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4132
af19b491 4133static void
aec1e845
AKS
4134qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4135 struct net_device *dev, unsigned long event)
af19b491
AKS
4136{
4137 struct in_device *indev;
af19b491 4138
af19b491
AKS
4139 indev = in_dev_get(dev);
4140 if (!indev)
4141 return;
4142
4143 for_ifa(indev) {
4144 switch (event) {
4145 case NETDEV_UP:
4146 qlcnic_config_ipaddr(adapter,
4147 ifa->ifa_address, QLCNIC_IP_UP);
4148 break;
4149 case NETDEV_DOWN:
4150 qlcnic_config_ipaddr(adapter,
4151 ifa->ifa_address, QLCNIC_IP_DOWN);
4152 break;
4153 default:
4154 break;
4155 }
4156 } endfor_ifa(indev);
4157
4158 in_dev_put(indev);
af19b491
AKS
4159}
4160
aec1e845
AKS
4161static void
4162qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4163{
4164 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4165 struct net_device *dev;
4166 u16 vid;
4167
4168 qlcnic_config_indev_addr(adapter, netdev, event);
4169
b9796a14
AC
4170 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4171 dev = vlan_find_dev(netdev, vid);
aec1e845
AKS
4172 if (!dev)
4173 continue;
aec1e845
AKS
4174 qlcnic_config_indev_addr(adapter, dev, event);
4175 }
4176}
4177
af19b491
AKS
4178static int qlcnic_netdev_event(struct notifier_block *this,
4179 unsigned long event, void *ptr)
4180{
4181 struct qlcnic_adapter *adapter;
4182 struct net_device *dev = (struct net_device *)ptr;
4183
4184recheck:
4185 if (dev == NULL)
4186 goto done;
4187
4188 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4189 dev = vlan_dev_real_dev(dev);
4190 goto recheck;
4191 }
4192
4193 if (!is_qlcnic_netdev(dev))
4194 goto done;
4195
4196 adapter = netdev_priv(dev);
4197
4198 if (!adapter)
4199 goto done;
4200
8a15ad1f 4201 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4202 goto done;
4203
aec1e845 4204 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4205done:
4206 return NOTIFY_DONE;
4207}
4208
4209static int
4210qlcnic_inetaddr_event(struct notifier_block *this,
4211 unsigned long event, void *ptr)
4212{
4213 struct qlcnic_adapter *adapter;
4214 struct net_device *dev;
4215
4216 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4217
4218 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4219
4220recheck:
aec1e845 4221 if (dev == NULL)
af19b491
AKS
4222 goto done;
4223
4224 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4225 dev = vlan_dev_real_dev(dev);
4226 goto recheck;
4227 }
4228
4229 if (!is_qlcnic_netdev(dev))
4230 goto done;
4231
4232 adapter = netdev_priv(dev);
4233
251a84c9 4234 if (!adapter)
af19b491
AKS
4235 goto done;
4236
8a15ad1f 4237 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4238 goto done;
4239
4240 switch (event) {
4241 case NETDEV_UP:
4242 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4243 break;
4244 case NETDEV_DOWN:
4245 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4246 break;
4247 default:
4248 break;
4249 }
4250
4251done:
4252 return NOTIFY_DONE;
4253}
4254
4255static struct notifier_block qlcnic_netdev_cb = {
4256 .notifier_call = qlcnic_netdev_event,
4257};
4258
4259static struct notifier_block qlcnic_inetaddr_cb = {
4260 .notifier_call = qlcnic_inetaddr_event,
4261};
4262#else
4263static void
aec1e845 4264qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4265{ }
4266#endif
451724c8
SC
4267static struct pci_error_handlers qlcnic_err_handler = {
4268 .error_detected = qlcnic_io_error_detected,
4269 .slot_reset = qlcnic_io_slot_reset,
4270 .resume = qlcnic_io_resume,
4271};
af19b491
AKS
4272
4273static struct pci_driver qlcnic_driver = {
4274 .name = qlcnic_driver_name,
4275 .id_table = qlcnic_pci_tbl,
4276 .probe = qlcnic_probe,
4277 .remove = __devexit_p(qlcnic_remove),
4278#ifdef CONFIG_PM
4279 .suspend = qlcnic_suspend,
4280 .resume = qlcnic_resume,
4281#endif
451724c8
SC
4282 .shutdown = qlcnic_shutdown,
4283 .err_handler = &qlcnic_err_handler
4284
af19b491
AKS
4285};
4286
4287static int __init qlcnic_init_module(void)
4288{
0cf3a14c 4289 int ret;
af19b491
AKS
4290
4291 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4292
f7ec804a
AKS
4293 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4294 if (qlcnic_wq == NULL) {
4295 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4296 return -ENOMEM;
4297 }
4298
af19b491
AKS
4299#ifdef CONFIG_INET
4300 register_netdevice_notifier(&qlcnic_netdev_cb);
4301 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4302#endif
4303
0cf3a14c
AKS
4304 ret = pci_register_driver(&qlcnic_driver);
4305 if (ret) {
4306#ifdef CONFIG_INET
4307 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4308 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4309#endif
f7ec804a 4310 destroy_workqueue(qlcnic_wq);
0cf3a14c 4311 }
af19b491 4312
0cf3a14c 4313 return ret;
af19b491
AKS
4314}
4315
4316module_init(qlcnic_init_module);
4317
4318static void __exit qlcnic_exit_module(void)
4319{
4320
4321 pci_unregister_driver(&qlcnic_driver);
4322
4323#ifdef CONFIG_INET
4324 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4325 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4326#endif
f7ec804a 4327 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4328}
4329
4330module_exit(qlcnic_exit_module);