Merge branch 'master' into next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491
AKS
15#include <linux/dma-mapping.h>
16#include <linux/if_vlan.h>
17#include <net/ip.h>
18#include <linux/ipv6.h>
19#include <linux/inetdevice.h>
20#include <linux/sysfs.h>
451724c8 21#include <linux/aer.h>
af19b491 22
7f9a0c34 23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 31
f7ec804a 32static struct workqueue_struct *qlcnic_wq;
b5e5492c 33static int qlcnic_mac_learn;
b11a25aa 34module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
af19b491 37static int use_msi = 1;
b11a25aa 38module_param(use_msi, int, 0444);
af19b491
AKS
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41static int use_msi_x = 1;
b11a25aa 42module_param(use_msi_x, int, 0444);
af19b491
AKS
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
9ce13ca8 45static int auto_fw_reset = 1;
af19b491
AKS
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
4d5bdb38 49static int load_fw_file;
b11a25aa 50module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
2e9d722d 53static int qlcnic_config_npars;
b11a25aa 54module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
af19b491
AKS
57static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev);
af19b491 62static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
63static void qlcnic_attach_work(struct work_struct *work);
64static void qlcnic_fwinit_work(struct work_struct *work);
65static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
71#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev);
73#endif
74
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
6df900e9 80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
7eb9855d 84static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
85static irqreturn_t qlcnic_intr(int irq, void *data);
86static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 90static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
91static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
b5e5492c
AKS
93static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
94static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 95static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
96static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
97static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
98static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
99static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
100 struct qlcnic_esw_func_cfg *);
af19b491
AKS
101/* PCI Device ID Table */
102#define ENTRY(device) \
103 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
104 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
105
106#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
107
6a902881 108static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
109 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
110 {0,}
111};
112
113MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
114
115
116void
117qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
118 struct qlcnic_host_tx_ring *tx_ring)
119{
120 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
121}
122
123static const u32 msi_tgt_status[8] = {
124 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
125 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
126 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
127 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
128};
129
130static const
131struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
132
133static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
134{
135 writel(0, sds_ring->crb_intr_mask);
136}
137
138static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
139{
140 struct qlcnic_adapter *adapter = sds_ring->adapter;
141
142 writel(0x1, sds_ring->crb_intr_mask);
143
144 if (!QLCNIC_IS_MSI_FAMILY(adapter))
145 writel(0xfbff, adapter->tgt_mask_reg);
146}
147
148static int
149qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
150{
151 int size = sizeof(struct qlcnic_host_sds_ring) * count;
152
153 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
154
807540ba 155 return recv_ctx->sds_rings == NULL;
af19b491
AKS
156}
157
158static void
159qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
160{
161 if (recv_ctx->sds_rings != NULL)
162 kfree(recv_ctx->sds_rings);
163
164 recv_ctx->sds_rings = NULL;
165}
166
167static int
168qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
169{
170 int ring;
171 struct qlcnic_host_sds_ring *sds_ring;
172 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
173
174 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
175 return -ENOMEM;
176
177 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
178 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 179
180 if (ring == adapter->max_sds_rings - 1)
181 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
182 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
183 else
184 netif_napi_add(netdev, &sds_ring->napi,
185 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
186 }
187
188 return 0;
189}
190
191static void
192qlcnic_napi_del(struct qlcnic_adapter *adapter)
193{
194 int ring;
195 struct qlcnic_host_sds_ring *sds_ring;
196 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
197
198 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
199 sds_ring = &recv_ctx->sds_rings[ring];
200 netif_napi_del(&sds_ring->napi);
201 }
202
203 qlcnic_free_sds_rings(&adapter->recv_ctx);
204}
205
206static void
207qlcnic_napi_enable(struct qlcnic_adapter *adapter)
208{
209 int ring;
210 struct qlcnic_host_sds_ring *sds_ring;
211 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
212
780ab790
AKS
213 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
214 return;
215
af19b491
AKS
216 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
217 sds_ring = &recv_ctx->sds_rings[ring];
218 napi_enable(&sds_ring->napi);
219 qlcnic_enable_int(sds_ring);
220 }
221}
222
223static void
224qlcnic_napi_disable(struct qlcnic_adapter *adapter)
225{
226 int ring;
227 struct qlcnic_host_sds_ring *sds_ring;
228 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
229
780ab790
AKS
230 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
231 return;
232
af19b491
AKS
233 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
234 sds_ring = &recv_ctx->sds_rings[ring];
235 qlcnic_disable_int(sds_ring);
236 napi_synchronize(&sds_ring->napi);
237 napi_disable(&sds_ring->napi);
238 }
239}
240
241static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
242{
243 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
244}
245
af19b491
AKS
246static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
247{
248 u32 control;
249 int pos;
250
251 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
252 if (pos) {
253 pci_read_config_dword(pdev, pos, &control);
254 if (enable)
255 control |= PCI_MSIX_FLAGS_ENABLE;
256 else
257 control = 0;
258 pci_write_config_dword(pdev, pos, control);
259 }
260}
261
262static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
263{
264 int i;
265
266 for (i = 0; i < count; i++)
267 adapter->msix_entries[i].entry = i;
268}
269
270static int
271qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
272{
2e9d722d 273 u8 mac_addr[ETH_ALEN];
af19b491
AKS
274 struct net_device *netdev = adapter->netdev;
275 struct pci_dev *pdev = adapter->pdev;
276
da48e6c3 277 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
278 return -EIO;
279
2e9d722d 280 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
281 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
282 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
283
284 /* set station address */
285
286 if (!is_valid_ether_addr(netdev->perm_addr))
287 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
288 netdev->dev_addr);
289
290 return 0;
291}
292
293static int qlcnic_set_mac(struct net_device *netdev, void *p)
294{
295 struct qlcnic_adapter *adapter = netdev_priv(netdev);
296 struct sockaddr *addr = p;
297
7373373d
RB
298 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
299 return -EOPNOTSUPP;
300
af19b491
AKS
301 if (!is_valid_ether_addr(addr->sa_data))
302 return -EINVAL;
303
8a15ad1f 304 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
305 netif_device_detach(netdev);
306 qlcnic_napi_disable(adapter);
307 }
308
309 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
310 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
311 qlcnic_set_multi(adapter->netdev);
312
8a15ad1f 313 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
314 netif_device_attach(netdev);
315 qlcnic_napi_enable(adapter);
316 }
317 return 0;
318}
319
d5790663
AKS
320static void qlcnic_vlan_rx_register(struct net_device *netdev,
321 struct vlan_group *grp)
322{
323 struct qlcnic_adapter *adapter = netdev_priv(netdev);
324 adapter->vlgrp = grp;
325}
326
af19b491
AKS
327static const struct net_device_ops qlcnic_netdev_ops = {
328 .ndo_open = qlcnic_open,
329 .ndo_stop = qlcnic_close,
330 .ndo_start_xmit = qlcnic_xmit_frame,
331 .ndo_get_stats = qlcnic_get_stats,
332 .ndo_validate_addr = eth_validate_addr,
333 .ndo_set_multicast_list = qlcnic_set_multi,
334 .ndo_set_mac_address = qlcnic_set_mac,
335 .ndo_change_mtu = qlcnic_change_mtu,
336 .ndo_tx_timeout = qlcnic_tx_timeout,
d5790663 337 .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
af19b491
AKS
338#ifdef CONFIG_NET_POLL_CONTROLLER
339 .ndo_poll_controller = qlcnic_poll_controller,
340#endif
341};
342
2e9d722d 343static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
344 .config_bridged_mode = qlcnic_config_bridged_mode,
345 .config_led = qlcnic_config_led,
9f26f547
AC
346 .start_firmware = qlcnic_start_firmware
347};
348
349static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
350 .config_bridged_mode = qlcnicvf_config_bridged_mode,
351 .config_led = qlcnicvf_config_led,
9f26f547 352 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
353};
354
af19b491
AKS
355static void
356qlcnic_setup_intr(struct qlcnic_adapter *adapter)
357{
358 const struct qlcnic_legacy_intr_set *legacy_intrp;
359 struct pci_dev *pdev = adapter->pdev;
360 int err, num_msix;
361
362 if (adapter->rss_supported) {
363 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
364 MSIX_ENTRIES_PER_ADAPTER : 2;
365 } else
366 num_msix = 1;
367
368 adapter->max_sds_rings = 1;
369
370 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
371
372 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
373
374 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
375 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
376 legacy_intrp->tgt_status_reg);
377 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
378 legacy_intrp->tgt_mask_reg);
379 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
380
381 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
382 ISR_INT_STATE_REG);
383
384 qlcnic_set_msix_bit(pdev, 0);
385
386 if (adapter->msix_supported) {
387
388 qlcnic_init_msix_entries(adapter, num_msix);
389 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
390 if (err == 0) {
391 adapter->flags |= QLCNIC_MSIX_ENABLED;
392 qlcnic_set_msix_bit(pdev, 1);
393
394 if (adapter->rss_supported)
395 adapter->max_sds_rings = num_msix;
396
397 dev_info(&pdev->dev, "using msi-x interrupts\n");
398 return;
399 }
400
401 if (err > 0)
402 pci_disable_msix(pdev);
403
404 /* fall through for msi */
405 }
406
407 if (use_msi && !pci_enable_msi(pdev)) {
408 adapter->flags |= QLCNIC_MSI_ENABLED;
409 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
410 msi_tgt_status[adapter->ahw.pci_func]);
411 dev_info(&pdev->dev, "using msi interrupts\n");
412 adapter->msix_entries[0].vector = pdev->irq;
413 return;
414 }
415
416 dev_info(&pdev->dev, "using legacy interrupts\n");
417 adapter->msix_entries[0].vector = pdev->irq;
418}
419
420static void
421qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
422{
423 if (adapter->flags & QLCNIC_MSIX_ENABLED)
424 pci_disable_msix(adapter->pdev);
425 if (adapter->flags & QLCNIC_MSI_ENABLED)
426 pci_disable_msi(adapter->pdev);
427}
428
429static void
430qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
431{
432 if (adapter->ahw.pci_base0 != NULL)
433 iounmap(adapter->ahw.pci_base0);
434}
435
346fe763
RB
436static int
437qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
438{
e88db3bd 439 struct qlcnic_pci_info *pci_info;
900853a4 440 int i, ret = 0;
346fe763
RB
441 u8 pfn;
442
e88db3bd
DC
443 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
444 if (!pci_info)
445 return -ENOMEM;
446
ca315ac2 447 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 448 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 449 if (!adapter->npars) {
900853a4 450 ret = -ENOMEM;
e88db3bd
DC
451 goto err_pci_info;
452 }
346fe763 453
ca315ac2 454 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
455 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
456 if (!adapter->eswitch) {
900853a4 457 ret = -ENOMEM;
ca315ac2 458 goto err_npars;
346fe763
RB
459 }
460
461 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
462 if (ret)
463 goto err_eswitch;
346fe763 464
ca315ac2
DC
465 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
466 pfn = pci_info[i].id;
467 if (pfn > QLCNIC_MAX_PCI_FUNC)
468 return QL_STATUS_INVALID_PARAM;
a1c0c459
SC
469 adapter->npars[pfn].active = (u8)pci_info[i].active;
470 adapter->npars[pfn].type = (u8)pci_info[i].type;
471 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
472 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
473 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
474 }
475
ca315ac2
DC
476 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
477 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
478
e88db3bd 479 kfree(pci_info);
ca315ac2
DC
480 return 0;
481
482err_eswitch:
346fe763
RB
483 kfree(adapter->eswitch);
484 adapter->eswitch = NULL;
ca315ac2 485err_npars:
346fe763 486 kfree(adapter->npars);
ca315ac2 487 adapter->npars = NULL;
e88db3bd
DC
488err_pci_info:
489 kfree(pci_info);
346fe763
RB
490
491 return ret;
492}
493
2e9d722d
AC
494static int
495qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
496{
497 u8 id;
498 u32 ref_count;
499 int i, ret = 1;
500 u32 data = QLCNIC_MGMT_FUNC;
501 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
502
503 /* If other drivers are not in use set their privilege level */
31018e06 504 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
505 ret = qlcnic_api_lock(adapter);
506 if (ret)
507 goto err_lock;
2e9d722d 508
0e33c664
AC
509 if (qlcnic_config_npars) {
510 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 511 id = i;
0e33c664
AC
512 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
513 id == adapter->ahw.pci_func)
514 continue;
515 data |= (qlcnic_config_npars &
516 QLC_DEV_SET_DRV(0xf, id));
517 }
518 } else {
519 data = readl(priv_op);
520 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
521 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
522 adapter->ahw.pci_func));
2e9d722d
AC
523 }
524 writel(data, priv_op);
2e9d722d
AC
525 qlcnic_api_unlock(adapter);
526err_lock:
527 return ret;
528}
529
0866d96d
AC
530static void
531qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
532{
533 void __iomem *msix_base_addr;
534 void __iomem *priv_op;
535 u32 func;
536 u32 msix_base;
537 u32 op_mode, priv_level;
538
539 /* Determine FW API version */
540 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
541
542 /* Find PCI function number */
543 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
544 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
545 msix_base = readl(msix_base_addr);
546 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
547 adapter->ahw.pci_func = func;
548
549 /* Determine function privilege level */
550 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
551 op_mode = readl(priv_op);
0e33c664 552 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 553 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 554 else
2e9d722d
AC
555 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
556
0866d96d 557 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
558 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
559 dev_info(&adapter->pdev->dev,
560 "HAL Version: %d Non Privileged function\n",
561 adapter->fw_hal_version);
562 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
563 } else
564 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
565}
566
af19b491
AKS
567static int
568qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
569{
570 void __iomem *mem_ptr0 = NULL;
571 resource_size_t mem_base;
572 unsigned long mem_len, pci_len0 = 0;
573
574 struct pci_dev *pdev = adapter->pdev;
af19b491 575
af19b491
AKS
576 /* remap phys address */
577 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
578 mem_len = pci_resource_len(pdev, 0);
579
580 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
581
582 mem_ptr0 = pci_ioremap_bar(pdev, 0);
583 if (mem_ptr0 == NULL) {
584 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
585 return -EIO;
586 }
587 pci_len0 = mem_len;
588 } else {
589 return -EIO;
590 }
591
592 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
593
594 adapter->ahw.pci_base0 = mem_ptr0;
595 adapter->ahw.pci_len0 = pci_len0;
596
0866d96d 597 qlcnic_check_vf(adapter);
2e9d722d 598
af19b491 599 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 600 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
601
602 return 0;
603}
604
605static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
606{
607 struct pci_dev *pdev = adapter->pdev;
608 int i, found = 0;
609
610 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
611 if (qlcnic_boards[i].vendor == pdev->vendor &&
612 qlcnic_boards[i].device == pdev->device &&
613 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
614 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
615 sprintf(name, "%pM: %s" ,
616 adapter->mac_addr,
617 qlcnic_boards[i].short_name);
af19b491
AKS
618 found = 1;
619 break;
620 }
621
622 }
623
624 if (!found)
7f9a0c34 625 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
626}
627
628static void
629qlcnic_check_options(struct qlcnic_adapter *adapter)
630{
631 u32 fw_major, fw_minor, fw_build;
af19b491 632 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
633
634 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
635 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
636 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
637
638 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
639
251a84c9
AKS
640 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
641 fw_major, fw_minor, fw_build);
af19b491 642 if (adapter->ahw.port_type == QLCNIC_XGBE) {
90d19005
SC
643 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
644 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
645 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
646 } else {
647 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
648 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
649 }
650
af19b491 651 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
652 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
653
af19b491
AKS
654 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
655 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
656 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
657 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
658 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
659 }
660
661 adapter->msix_supported = !!use_msi_x;
662 adapter->rss_supported = !!use_msi_x;
663
664 adapter->num_txd = MAX_CMD_DESCRIPTORS;
665
251b036a 666 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
667}
668
174240a8
RB
669static int
670qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
671{
672 int err;
673 struct qlcnic_info nic_info;
674
675 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
676 if (err)
677 return err;
678
a1c0c459 679 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
680 adapter->switch_mode = nic_info.switch_mode;
681 adapter->max_tx_ques = nic_info.max_tx_ques;
682 adapter->max_rx_ques = nic_info.max_rx_ques;
683 adapter->capabilities = nic_info.capabilities;
684 adapter->max_mac_filters = nic_info.max_mac_filters;
685 adapter->max_mtu = nic_info.max_mtu;
686
687 if (adapter->capabilities & BIT_6)
688 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
689 else
690 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
691
692 return err;
693}
694
8cf61f89
AKS
695static void
696qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
697 struct qlcnic_esw_func_cfg *esw_cfg)
698{
699 if (esw_cfg->discard_tagged)
700 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
701 else
702 adapter->flags |= QLCNIC_TAGGING_ENABLED;
703
704 if (esw_cfg->vlan_id)
705 adapter->pvid = esw_cfg->vlan_id;
706 else
707 adapter->pvid = 0;
708}
709
0325d69b
RB
710static void
711qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
712 struct qlcnic_esw_func_cfg *esw_cfg)
713{
ee07c1a7
RB
714 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
715 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
716
717 if (esw_cfg->mac_anti_spoof)
718 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 719
7373373d
RB
720 if (!esw_cfg->mac_override)
721 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
722
ee07c1a7
RB
723 if (!esw_cfg->promisc_mode)
724 adapter->flags |= QLCNIC_PROMISC_DISABLED;
725
0325d69b
RB
726 qlcnic_set_netdev_features(adapter, esw_cfg);
727}
728
729static int
730qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
731{
732 struct qlcnic_esw_func_cfg esw_cfg;
733
734 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
735 return 0;
736
737 esw_cfg.pci_func = adapter->ahw.pci_func;
738 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
739 return -EIO;
8cf61f89 740 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
741 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
742
743 return 0;
744}
745
746static void
747qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
748 struct qlcnic_esw_func_cfg *esw_cfg)
749{
750 struct net_device *netdev = adapter->netdev;
751 unsigned long features, vlan_features;
752
753 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
754 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
755 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
756 NETIF_F_IPV6_CSUM);
757
758 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
759 features |= (NETIF_F_TSO | NETIF_F_TSO6);
760 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
761 }
762 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
763 features |= NETIF_F_LRO;
764
765 if (esw_cfg->offload_flags & BIT_0) {
766 netdev->features |= features;
767 adapter->rx_csum = 1;
768 if (!(esw_cfg->offload_flags & BIT_1))
769 netdev->features &= ~NETIF_F_TSO;
770 if (!(esw_cfg->offload_flags & BIT_2))
771 netdev->features &= ~NETIF_F_TSO6;
772 } else {
773 netdev->features &= ~features;
774 adapter->rx_csum = 0;
775 }
776
777 netdev->vlan_features = (features & vlan_features);
778}
779
0866d96d
AC
780static int
781qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
782{
783 void __iomem *priv_op;
784 u32 op_mode, priv_level;
785 int err = 0;
786
174240a8
RB
787 err = qlcnic_initialize_nic(adapter);
788 if (err)
789 return err;
790
0866d96d
AC
791 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
792 return 0;
793
794 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
795 op_mode = readl(priv_op);
796 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
797
798 if (op_mode == QLC_DEV_DRV_DEFAULT)
799 priv_level = QLCNIC_MGMT_FUNC;
800 else
801 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
802
174240a8 803 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
804 if (priv_level == QLCNIC_MGMT_FUNC) {
805 adapter->op_mode = QLCNIC_MGMT_FUNC;
806 err = qlcnic_init_pci_info(adapter);
807 if (err)
808 return err;
809 /* Set privilege level for other functions */
810 qlcnic_set_function_modes(adapter);
811 dev_info(&adapter->pdev->dev,
812 "HAL Version: %d, Management function\n",
813 adapter->fw_hal_version);
814 } else if (priv_level == QLCNIC_PRIV_FUNC) {
815 adapter->op_mode = QLCNIC_PRIV_FUNC;
816 dev_info(&adapter->pdev->dev,
817 "HAL Version: %d, Privileged function\n",
818 adapter->fw_hal_version);
819 }
174240a8 820 }
0866d96d
AC
821
822 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
823
824 return err;
825}
826
0325d69b
RB
827static int
828qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
829{
830 struct qlcnic_esw_func_cfg esw_cfg;
831 struct qlcnic_npar_info *npar;
832 u8 i;
833
174240a8 834 if (adapter->need_fw_reset)
0325d69b
RB
835 return 0;
836
837 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
838 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
839 continue;
840 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
841 esw_cfg.pci_func = i;
842 esw_cfg.offload_flags = BIT_0;
7373373d 843 esw_cfg.mac_override = BIT_0;
ee07c1a7 844 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
845 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
846 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
847 if (qlcnic_config_switch_port(adapter, &esw_cfg))
848 return -EIO;
849 npar = &adapter->npars[i];
850 npar->pvid = esw_cfg.vlan_id;
7373373d 851 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
852 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
853 npar->discard_tagged = esw_cfg.discard_tagged;
854 npar->promisc_mode = esw_cfg.promisc_mode;
855 npar->offload_flags = esw_cfg.offload_flags;
856 }
857
858 return 0;
859}
860
4e8acb01
RB
861static int
862qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
863 struct qlcnic_npar_info *npar, int pci_func)
864{
865 struct qlcnic_esw_func_cfg esw_cfg;
866 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
867 esw_cfg.pci_func = pci_func;
868 esw_cfg.vlan_id = npar->pvid;
7373373d 869 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
870 esw_cfg.discard_tagged = npar->discard_tagged;
871 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
872 esw_cfg.offload_flags = npar->offload_flags;
873 esw_cfg.promisc_mode = npar->promisc_mode;
874 if (qlcnic_config_switch_port(adapter, &esw_cfg))
875 return -EIO;
876
877 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
878 if (qlcnic_config_switch_port(adapter, &esw_cfg))
879 return -EIO;
880
881 return 0;
882}
883
cea8975e
AC
884static int
885qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
886{
4e8acb01 887 int i, err;
cea8975e
AC
888 struct qlcnic_npar_info *npar;
889 struct qlcnic_info nic_info;
890
174240a8 891 if (!adapter->need_fw_reset)
cea8975e
AC
892 return 0;
893
4e8acb01
RB
894 /* Set the NPAR config data after FW reset */
895 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
896 npar = &adapter->npars[i];
897 if (npar->type != QLCNIC_TYPE_NIC)
898 continue;
899 err = qlcnic_get_nic_info(adapter, &nic_info, i);
900 if (err)
901 return err;
902 nic_info.min_tx_bw = npar->min_bw;
903 nic_info.max_tx_bw = npar->max_bw;
904 err = qlcnic_set_nic_info(adapter, &nic_info);
905 if (err)
906 return err;
cea8975e 907
4e8acb01
RB
908 if (npar->enable_pm) {
909 err = qlcnic_config_port_mirroring(adapter,
910 npar->dest_npar, 1, i);
911 if (err)
912 return err;
cea8975e 913 }
4e8acb01
RB
914 err = qlcnic_reset_eswitch_config(adapter, npar, i);
915 if (err)
916 return err;
cea8975e 917 }
4e8acb01 918 return 0;
cea8975e
AC
919}
920
78f84e1a
AKS
921static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
922{
923 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
924 u32 npar_state;
925
926 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
927 return 0;
928
929 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
930 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
931 msleep(1000);
932 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
933 }
934 if (!npar_opt_timeo) {
935 dev_err(&adapter->pdev->dev,
936 "Waiting for NPAR state to opertional timeout\n");
937 return -EIO;
938 }
939 return 0;
940}
941
174240a8
RB
942static int
943qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
944{
945 int err;
946
947 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
948 adapter->op_mode != QLCNIC_MGMT_FUNC)
949 return 0;
950
951 err = qlcnic_set_default_offload_settings(adapter);
952 if (err)
953 return err;
954
955 err = qlcnic_reset_npar_config(adapter);
956 if (err)
957 return err;
958
959 qlcnic_dev_set_npar_ready(adapter);
960
961 return err;
962}
963
af19b491
AKS
964static int
965qlcnic_start_firmware(struct qlcnic_adapter *adapter)
966{
d4066833 967 int err;
af19b491 968
aa5e18c0
SC
969 err = qlcnic_can_start_firmware(adapter);
970 if (err < 0)
971 return err;
972 else if (!err)
d4066833 973 goto check_fw_status;
af19b491 974
4d5bdb38
AKS
975 if (load_fw_file)
976 qlcnic_request_firmware(adapter);
8f891387 977 else {
8cfdce08
SC
978 err = qlcnic_check_flash_fw_ver(adapter);
979 if (err)
8f891387 980 goto err_out;
981
4d5bdb38 982 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 983 }
af19b491
AKS
984
985 err = qlcnic_need_fw_reset(adapter);
af19b491 986 if (err == 0)
4e70812b 987 goto check_fw_status;
af19b491 988
d4066833
SC
989 err = qlcnic_pinit_from_rom(adapter);
990 if (err)
991 goto err_out;
af19b491
AKS
992
993 err = qlcnic_load_firmware(adapter);
994 if (err)
995 goto err_out;
996
997 qlcnic_release_firmware(adapter);
d4066833 998 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 999
d4066833
SC
1000check_fw_status:
1001 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1002 if (err)
1003 goto err_out;
1004
1005 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1006 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1007
0866d96d
AC
1008 err = qlcnic_check_eswitch_mode(adapter);
1009 if (err) {
1010 dev_err(&adapter->pdev->dev,
1011 "Memory allocation failed for eswitch\n");
1012 goto err_out;
1013 }
174240a8
RB
1014 err = qlcnic_set_mgmt_operations(adapter);
1015 if (err)
1016 goto err_out;
1017
1018 qlcnic_check_options(adapter);
af19b491
AKS
1019 adapter->need_fw_reset = 0;
1020
a7fc948f
AKS
1021 qlcnic_release_firmware(adapter);
1022 return 0;
af19b491
AKS
1023
1024err_out:
a7fc948f
AKS
1025 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1026 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1027
af19b491
AKS
1028 qlcnic_release_firmware(adapter);
1029 return err;
1030}
1031
1032static int
1033qlcnic_request_irq(struct qlcnic_adapter *adapter)
1034{
1035 irq_handler_t handler;
1036 struct qlcnic_host_sds_ring *sds_ring;
1037 int err, ring;
1038
1039 unsigned long flags = 0;
1040 struct net_device *netdev = adapter->netdev;
1041 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1042
7eb9855d
AKS
1043 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1044 handler = qlcnic_tmp_intr;
1045 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1046 flags |= IRQF_SHARED;
1047
1048 } else {
1049 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1050 handler = qlcnic_msix_intr;
1051 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1052 handler = qlcnic_msi_intr;
1053 else {
1054 flags |= IRQF_SHARED;
1055 handler = qlcnic_intr;
1056 }
af19b491
AKS
1057 }
1058 adapter->irq = netdev->irq;
1059
1060 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1061 sds_ring = &recv_ctx->sds_rings[ring];
1062 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1063 err = request_irq(sds_ring->irq, handler,
1064 flags, sds_ring->name, sds_ring);
1065 if (err)
1066 return err;
1067 }
1068
1069 return 0;
1070}
1071
1072static void
1073qlcnic_free_irq(struct qlcnic_adapter *adapter)
1074{
1075 int ring;
1076 struct qlcnic_host_sds_ring *sds_ring;
1077
1078 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1079
1080 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1081 sds_ring = &recv_ctx->sds_rings[ring];
1082 free_irq(sds_ring->irq, sds_ring);
1083 }
1084}
1085
1086static void
1087qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1088{
1089 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1090 adapter->coal.normal.data.rx_time_us =
1091 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1092 adapter->coal.normal.data.rx_packets =
1093 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1094 adapter->coal.normal.data.tx_time_us =
1095 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1096 adapter->coal.normal.data.tx_packets =
1097 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1098}
1099
1100static int
1101__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1102{
8a15ad1f
AKS
1103 int ring;
1104 struct qlcnic_host_rds_ring *rds_ring;
1105
af19b491
AKS
1106 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1107 return -EIO;
1108
8a15ad1f
AKS
1109 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1110 return 0;
0325d69b
RB
1111 if (qlcnic_set_eswitch_port_config(adapter))
1112 return -EIO;
8a15ad1f
AKS
1113
1114 if (qlcnic_fw_create_ctx(adapter))
1115 return -EIO;
1116
1117 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1118 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1119 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1120 }
1121
af19b491
AKS
1122 qlcnic_set_multi(netdev);
1123 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1124
1125 adapter->ahw.linkup = 0;
1126
1127 if (adapter->max_sds_rings > 1)
1128 qlcnic_config_rss(adapter, 1);
1129
1130 qlcnic_config_intr_coalesce(adapter);
1131
24763d80 1132 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1133 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1134
1135 qlcnic_napi_enable(adapter);
1136
1137 qlcnic_linkevent_request(adapter, 1);
1138
68bf1c68 1139 adapter->reset_context = 0;
af19b491
AKS
1140 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1141 return 0;
1142}
1143
1144/* Usage: During resume and firmware recovery module.*/
1145
1146static int
1147qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1148{
1149 int err = 0;
1150
1151 rtnl_lock();
1152 if (netif_running(netdev))
1153 err = __qlcnic_up(adapter, netdev);
1154 rtnl_unlock();
1155
1156 return err;
1157}
1158
1159static void
1160__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1161{
1162 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1163 return;
1164
1165 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1166 return;
1167
1168 smp_mb();
1169 spin_lock(&adapter->tx_clean_lock);
1170 netif_carrier_off(netdev);
1171 netif_tx_disable(netdev);
1172
1173 qlcnic_free_mac_list(adapter);
1174
b5e5492c
AKS
1175 if (adapter->fhash.fnum)
1176 qlcnic_delete_lb_filters(adapter);
1177
af19b491
AKS
1178 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1179
1180 qlcnic_napi_disable(adapter);
1181
8a15ad1f
AKS
1182 qlcnic_fw_destroy_ctx(adapter);
1183
1184 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1185 qlcnic_release_tx_buffers(adapter);
1186 spin_unlock(&adapter->tx_clean_lock);
1187}
1188
1189/* Usage: During suspend and firmware recovery module */
1190
1191static void
1192qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1193{
1194 rtnl_lock();
1195 if (netif_running(netdev))
1196 __qlcnic_down(adapter, netdev);
1197 rtnl_unlock();
1198
1199}
1200
1201static int
1202qlcnic_attach(struct qlcnic_adapter *adapter)
1203{
1204 struct net_device *netdev = adapter->netdev;
1205 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1206 int err;
af19b491
AKS
1207
1208 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1209 return 0;
1210
af19b491
AKS
1211 err = qlcnic_napi_add(adapter, netdev);
1212 if (err)
1213 return err;
1214
1215 err = qlcnic_alloc_sw_resources(adapter);
1216 if (err) {
1217 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1218 goto err_out_napi_del;
af19b491
AKS
1219 }
1220
1221 err = qlcnic_alloc_hw_resources(adapter);
1222 if (err) {
1223 dev_err(&pdev->dev, "Error in setting hw resources\n");
1224 goto err_out_free_sw;
1225 }
1226
af19b491
AKS
1227 err = qlcnic_request_irq(adapter);
1228 if (err) {
1229 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1230 goto err_out_free_hw;
af19b491
AKS
1231 }
1232
1233 qlcnic_init_coalesce_defaults(adapter);
1234
1235 qlcnic_create_sysfs_entries(adapter);
1236
1237 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1238 return 0;
1239
8a15ad1f 1240err_out_free_hw:
af19b491
AKS
1241 qlcnic_free_hw_resources(adapter);
1242err_out_free_sw:
1243 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1244err_out_napi_del:
1245 qlcnic_napi_del(adapter);
af19b491
AKS
1246 return err;
1247}
1248
1249static void
1250qlcnic_detach(struct qlcnic_adapter *adapter)
1251{
1252 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1253 return;
1254
1255 qlcnic_remove_sysfs_entries(adapter);
1256
1257 qlcnic_free_hw_resources(adapter);
1258 qlcnic_release_rx_buffers(adapter);
1259 qlcnic_free_irq(adapter);
1260 qlcnic_napi_del(adapter);
1261 qlcnic_free_sw_resources(adapter);
1262
1263 adapter->is_up = 0;
1264}
1265
7eb9855d
AKS
1266void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1267{
1268 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1269 struct qlcnic_host_sds_ring *sds_ring;
1270 int ring;
1271
78ad3892 1272 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1273 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1274 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1275 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1276 qlcnic_disable_int(sds_ring);
1277 }
7eb9855d
AKS
1278 }
1279
8a15ad1f
AKS
1280 qlcnic_fw_destroy_ctx(adapter);
1281
7eb9855d
AKS
1282 qlcnic_detach(adapter);
1283
1284 adapter->diag_test = 0;
1285 adapter->max_sds_rings = max_sds_rings;
1286
1287 if (qlcnic_attach(adapter))
34ce3626 1288 goto out;
7eb9855d
AKS
1289
1290 if (netif_running(netdev))
1291 __qlcnic_up(adapter, netdev);
34ce3626 1292out:
7eb9855d
AKS
1293 netif_device_attach(netdev);
1294}
1295
1296int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1297{
1298 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1299 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1300 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1301 int ring;
1302 int ret;
1303
1304 netif_device_detach(netdev);
1305
1306 if (netif_running(netdev))
1307 __qlcnic_down(adapter, netdev);
1308
1309 qlcnic_detach(adapter);
1310
1311 adapter->max_sds_rings = 1;
1312 adapter->diag_test = test;
1313
1314 ret = qlcnic_attach(adapter);
34ce3626
AKS
1315 if (ret) {
1316 netif_device_attach(netdev);
7eb9855d 1317 return ret;
34ce3626 1318 }
7eb9855d 1319
8a15ad1f
AKS
1320 ret = qlcnic_fw_create_ctx(adapter);
1321 if (ret) {
1322 qlcnic_detach(adapter);
57e46248 1323 netif_device_attach(netdev);
8a15ad1f
AKS
1324 return ret;
1325 }
1326
1327 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1328 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1329 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1330 }
1331
cdaff185
AKS
1332 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1333 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1334 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1335 qlcnic_enable_int(sds_ring);
1336 }
7eb9855d 1337 }
78ad3892 1338 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1339
1340 return 0;
1341}
1342
68bf1c68
AKS
1343/* Reset context in hardware only */
1344static int
1345qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1346{
1347 struct net_device *netdev = adapter->netdev;
1348
1349 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1350 return -EBUSY;
1351
1352 netif_device_detach(netdev);
1353
1354 qlcnic_down(adapter, netdev);
1355
1356 qlcnic_up(adapter, netdev);
1357
1358 netif_device_attach(netdev);
1359
1360 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1361 return 0;
1362}
1363
af19b491
AKS
1364int
1365qlcnic_reset_context(struct qlcnic_adapter *adapter)
1366{
1367 int err = 0;
1368 struct net_device *netdev = adapter->netdev;
1369
1370 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1371 return -EBUSY;
1372
1373 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1374
1375 netif_device_detach(netdev);
1376
1377 if (netif_running(netdev))
1378 __qlcnic_down(adapter, netdev);
1379
1380 qlcnic_detach(adapter);
1381
1382 if (netif_running(netdev)) {
1383 err = qlcnic_attach(adapter);
1384 if (!err)
34ce3626 1385 __qlcnic_up(adapter, netdev);
af19b491
AKS
1386 }
1387
1388 netif_device_attach(netdev);
1389 }
1390
af19b491
AKS
1391 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1392 return err;
1393}
1394
1395static int
1396qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1397 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1398{
1399 int err;
1400 struct pci_dev *pdev = adapter->pdev;
1401
1402 adapter->rx_csum = 1;
1403 adapter->mc_enabled = 0;
1404 adapter->max_mc_count = 38;
1405
1406 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1407 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1408
1409 qlcnic_change_mtu(netdev, netdev->mtu);
1410
1411 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1412
2e9d722d 1413 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
d5790663 1414 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
2e9d722d 1415 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1416 NETIF_F_IPV6_CSUM);
1417
1418 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1419 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1420 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1421 }
af19b491 1422
1bb09fb9 1423 if (pci_using_dac) {
af19b491
AKS
1424 netdev->features |= NETIF_F_HIGHDMA;
1425 netdev->vlan_features |= NETIF_F_HIGHDMA;
1426 }
1427
1428 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1429 netdev->features |= (NETIF_F_HW_VLAN_TX);
1430
1431 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1432 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1433 netdev->irq = adapter->msix_entries[0].vector;
1434
af19b491 1435 netif_carrier_off(netdev);
af19b491
AKS
1436
1437 err = register_netdev(netdev);
1438 if (err) {
1439 dev_err(&pdev->dev, "failed to register net device\n");
1440 return err;
1441 }
1442
1443 return 0;
1444}
1445
1bb09fb9
AKS
1446static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1447{
1448 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1449 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1450 *pci_using_dac = 1;
1451 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1452 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1453 *pci_using_dac = 0;
1454 else {
1455 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1456 return -EIO;
1457 }
1458
1459 return 0;
1460}
1461
af19b491
AKS
1462static int __devinit
1463qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1464{
1465 struct net_device *netdev = NULL;
1466 struct qlcnic_adapter *adapter = NULL;
1467 int err;
af19b491 1468 uint8_t revision_id;
1bb09fb9 1469 uint8_t pci_using_dac;
da48e6c3 1470 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1471
1472 err = pci_enable_device(pdev);
1473 if (err)
1474 return err;
1475
1476 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1477 err = -ENODEV;
1478 goto err_out_disable_pdev;
1479 }
1480
1bb09fb9
AKS
1481 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1482 if (err)
1483 goto err_out_disable_pdev;
1484
af19b491
AKS
1485 err = pci_request_regions(pdev, qlcnic_driver_name);
1486 if (err)
1487 goto err_out_disable_pdev;
1488
1489 pci_set_master(pdev);
451724c8 1490 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1491
1492 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1493 if (!netdev) {
1494 dev_err(&pdev->dev, "failed to allocate net_device\n");
1495 err = -ENOMEM;
1496 goto err_out_free_res;
1497 }
1498
1499 SET_NETDEV_DEV(netdev, &pdev->dev);
1500
1501 adapter = netdev_priv(netdev);
1502 adapter->netdev = netdev;
1503 adapter->pdev = pdev;
6df900e9 1504 adapter->dev_rst_time = jiffies;
af19b491
AKS
1505
1506 revision_id = pdev->revision;
1507 adapter->ahw.revision_id = revision_id;
1508
1509 rwlock_init(&adapter->ahw.crb_lock);
1510 mutex_init(&adapter->ahw.mem_lock);
1511
1512 spin_lock_init(&adapter->tx_clean_lock);
1513 INIT_LIST_HEAD(&adapter->mac_list);
1514
1515 err = qlcnic_setup_pci_map(adapter);
1516 if (err)
1517 goto err_out_free_netdev;
1518
1519 /* This will be reset for mezz cards */
2e9d722d 1520 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1521
1522 err = qlcnic_get_board_info(adapter);
1523 if (err) {
1524 dev_err(&pdev->dev, "Error getting board config info.\n");
1525 goto err_out_iounmap;
1526 }
1527
8cfdce08
SC
1528 err = qlcnic_setup_idc_param(adapter);
1529 if (err)
b3a24649 1530 goto err_out_iounmap;
af19b491 1531
1dc0f3c5 1532 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1533
9f26f547 1534 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1535 if (err) {
1536 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1537 goto err_out_decr_ref;
a7fc948f 1538 }
af19b491 1539
da48e6c3
RB
1540 if (qlcnic_read_mac_addr(adapter))
1541 dev_warn(&pdev->dev, "failed to read mac addr\n");
1542
1543 if (adapter->portnum == 0) {
1544 get_brd_name(adapter, brd_name);
1545
1546 pr_info("%s: %s Board Chip rev 0x%x\n",
1547 module_name(THIS_MODULE),
1548 brd_name, adapter->ahw.revision_id);
1549 }
1550
af19b491
AKS
1551 qlcnic_clear_stats(adapter);
1552
1553 qlcnic_setup_intr(adapter);
1554
1bb09fb9 1555 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1556 if (err)
1557 goto err_out_disable_msi;
1558
1559 pci_set_drvdata(pdev, adapter);
1560
1561 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1562
1563 switch (adapter->ahw.port_type) {
1564 case QLCNIC_GBE:
1565 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1566 adapter->netdev->name);
1567 break;
1568 case QLCNIC_XGBE:
1569 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1570 adapter->netdev->name);
1571 break;
1572 }
1573
b5e5492c 1574 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1575 qlcnic_create_diag_entries(adapter);
1576
1577 return 0;
1578
1579err_out_disable_msi:
1580 qlcnic_teardown_intr(adapter);
1581
1582err_out_decr_ref:
21854f02 1583 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1584
1585err_out_iounmap:
1586 qlcnic_cleanup_pci_map(adapter);
1587
1588err_out_free_netdev:
1589 free_netdev(netdev);
1590
1591err_out_free_res:
1592 pci_release_regions(pdev);
1593
1594err_out_disable_pdev:
1595 pci_set_drvdata(pdev, NULL);
1596 pci_disable_device(pdev);
1597 return err;
1598}
1599
1600static void __devexit qlcnic_remove(struct pci_dev *pdev)
1601{
1602 struct qlcnic_adapter *adapter;
1603 struct net_device *netdev;
1604
1605 adapter = pci_get_drvdata(pdev);
1606 if (adapter == NULL)
1607 return;
1608
1609 netdev = adapter->netdev;
1610
1611 qlcnic_cancel_fw_work(adapter);
1612
1613 unregister_netdev(netdev);
1614
af19b491
AKS
1615 qlcnic_detach(adapter);
1616
2e9d722d
AC
1617 if (adapter->npars != NULL)
1618 kfree(adapter->npars);
1619 if (adapter->eswitch != NULL)
1620 kfree(adapter->eswitch);
1621
21854f02 1622 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1623
1624 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1625
b5e5492c
AKS
1626 qlcnic_free_lb_filters_mem(adapter);
1627
af19b491
AKS
1628 qlcnic_teardown_intr(adapter);
1629
1630 qlcnic_remove_diag_entries(adapter);
1631
1632 qlcnic_cleanup_pci_map(adapter);
1633
1634 qlcnic_release_firmware(adapter);
1635
451724c8 1636 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1637 pci_release_regions(pdev);
1638 pci_disable_device(pdev);
1639 pci_set_drvdata(pdev, NULL);
1640
1641 free_netdev(netdev);
1642}
1643static int __qlcnic_shutdown(struct pci_dev *pdev)
1644{
1645 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1646 struct net_device *netdev = adapter->netdev;
1647 int retval;
1648
1649 netif_device_detach(netdev);
1650
1651 qlcnic_cancel_fw_work(adapter);
1652
1653 if (netif_running(netdev))
1654 qlcnic_down(adapter, netdev);
1655
21854f02 1656 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1657
1658 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1659
1660 retval = pci_save_state(pdev);
1661 if (retval)
1662 return retval;
1663
1664 if (qlcnic_wol_supported(adapter)) {
1665 pci_enable_wake(pdev, PCI_D3cold, 1);
1666 pci_enable_wake(pdev, PCI_D3hot, 1);
1667 }
1668
1669 return 0;
1670}
1671
1672static void qlcnic_shutdown(struct pci_dev *pdev)
1673{
1674 if (__qlcnic_shutdown(pdev))
1675 return;
1676
1677 pci_disable_device(pdev);
1678}
1679
1680#ifdef CONFIG_PM
1681static int
1682qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1683{
1684 int retval;
1685
1686 retval = __qlcnic_shutdown(pdev);
1687 if (retval)
1688 return retval;
1689
1690 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1691 return 0;
1692}
1693
1694static int
1695qlcnic_resume(struct pci_dev *pdev)
1696{
1697 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1698 struct net_device *netdev = adapter->netdev;
1699 int err;
1700
1701 err = pci_enable_device(pdev);
1702 if (err)
1703 return err;
1704
1705 pci_set_power_state(pdev, PCI_D0);
1706 pci_set_master(pdev);
1707 pci_restore_state(pdev);
1708
9f26f547 1709 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1710 if (err) {
1711 dev_err(&pdev->dev, "failed to start firmware\n");
1712 return err;
1713 }
1714
1715 if (netif_running(netdev)) {
af19b491
AKS
1716 err = qlcnic_up(adapter, netdev);
1717 if (err)
52486a3a 1718 goto done;
af19b491 1719
aec1e845 1720 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1721 }
52486a3a 1722done:
af19b491
AKS
1723 netif_device_attach(netdev);
1724 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1725 return 0;
af19b491
AKS
1726}
1727#endif
1728
1729static int qlcnic_open(struct net_device *netdev)
1730{
1731 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1732 int err;
1733
af19b491
AKS
1734 err = qlcnic_attach(adapter);
1735 if (err)
1736 return err;
1737
1738 err = __qlcnic_up(adapter, netdev);
1739 if (err)
1740 goto err_out;
1741
1742 netif_start_queue(netdev);
1743
1744 return 0;
1745
1746err_out:
1747 qlcnic_detach(adapter);
1748 return err;
1749}
1750
1751/*
1752 * qlcnic_close - Disables a network interface entry point
1753 */
1754static int qlcnic_close(struct net_device *netdev)
1755{
1756 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1757
1758 __qlcnic_down(adapter, netdev);
1759 return 0;
1760}
1761
b5e5492c
AKS
1762static void
1763qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1764{
1765 void *head;
1766 int i;
1767
1768 if (!qlcnic_mac_learn)
1769 return;
1770
1771 spin_lock_init(&adapter->mac_learn_lock);
1772
1773 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1774 GFP_KERNEL);
1775 if (!head)
1776 return;
1777
1778 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1779 adapter->fhash.fhead = (struct hlist_head *)head;
1780
1781 for (i = 0; i < adapter->fhash.fmax; i++)
1782 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1783}
1784
1785static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1786{
1787 if (adapter->fhash.fmax && adapter->fhash.fhead)
1788 kfree(adapter->fhash.fhead);
1789
1790 adapter->fhash.fhead = NULL;
1791 adapter->fhash.fmax = 0;
1792}
1793
1794static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1795 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1796{
1797 struct cmd_desc_type0 *hwdesc;
1798 struct qlcnic_nic_req *req;
1799 struct qlcnic_mac_req *mac_req;
7e56cac4 1800 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1801 u32 producer;
1802 u64 word;
1803
1804 producer = tx_ring->producer;
1805 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1806
1807 req = (struct qlcnic_nic_req *)hwdesc;
1808 memset(req, 0, sizeof(struct qlcnic_nic_req));
1809 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1810
1811 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1812 req->req_hdr = cpu_to_le64(word);
1813
1814 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1815 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1816 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1817
7e56cac4
SC
1818 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1819 vlan_req->vlan_id = vlan_id;
03c5d770 1820
b5e5492c
AKS
1821 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1822}
1823
1824#define QLCNIC_MAC_HASH(MAC)\
1825 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1826
1827static void
1828qlcnic_send_filter(struct qlcnic_adapter *adapter,
1829 struct qlcnic_host_tx_ring *tx_ring,
1830 struct cmd_desc_type0 *first_desc,
1831 struct sk_buff *skb)
1832{
1833 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1834 struct qlcnic_filter *fil, *tmp_fil;
1835 struct hlist_node *tmp_hnode, *n;
1836 struct hlist_head *head;
1837 u64 src_addr = 0;
7e56cac4 1838 __le16 vlan_id = 0;
b5e5492c
AKS
1839 u8 hindex;
1840
1841 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1842 return;
1843
1844 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1845 return;
1846
03c5d770
AKS
1847 /* Only NPAR capable devices support vlan based learning*/
1848 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1849 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1850 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1851 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1852 head = &(adapter->fhash.fhead[hindex]);
1853
1854 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1855 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1856 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1857
1858 if (jiffies >
1859 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1860 qlcnic_change_filter(adapter, src_addr, vlan_id,
1861 tx_ring);
b5e5492c
AKS
1862 tmp_fil->ftime = jiffies;
1863 return;
1864 }
1865 }
1866
1867 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1868 if (!fil)
1869 return;
1870
03c5d770 1871 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1872
1873 fil->ftime = jiffies;
03c5d770 1874 fil->vlan_id = vlan_id;
b5e5492c
AKS
1875 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1876 spin_lock(&adapter->mac_learn_lock);
1877 hlist_add_head(&(fil->fnode), head);
1878 adapter->fhash.fnum++;
1879 spin_unlock(&adapter->mac_learn_lock);
1880}
1881
af19b491
AKS
1882static void
1883qlcnic_tso_check(struct net_device *netdev,
1884 struct qlcnic_host_tx_ring *tx_ring,
1885 struct cmd_desc_type0 *first_desc,
1886 struct sk_buff *skb)
1887{
1888 u8 opcode = TX_ETHER_PKT;
1889 __be16 protocol = skb->protocol;
8cf61f89
AKS
1890 u16 flags = 0;
1891 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1892 struct cmd_desc_type0 *hwdesc;
1893 struct vlan_ethhdr *vh;
8bfe8b91 1894 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1895 u32 producer = tx_ring->producer;
7e56cac4
SC
1896 __le16 vlan_oob = first_desc->flags_opcode &
1897 cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1898
2e9d722d
AC
1899 if (*(skb->data) & BIT_0) {
1900 flags |= BIT_0;
1901 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1902 }
1903
af19b491
AKS
1904 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1905 skb_shinfo(skb)->gso_size > 0) {
1906
1907 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1908
1909 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1910 first_desc->total_hdr_length = hdr_len;
1911 if (vlan_oob) {
1912 first_desc->total_hdr_length += VLAN_HLEN;
1913 first_desc->tcp_hdr_offset = VLAN_HLEN;
1914 first_desc->ip_hdr_offset = VLAN_HLEN;
1915 /* Only in case of TSO on vlan device */
1916 flags |= FLAGS_VLAN_TAGGED;
1917 }
1918
1919 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1920 TX_TCP_LSO6 : TX_TCP_LSO;
1921 tso = 1;
1922
1923 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1924 u8 l4proto;
1925
1926 if (protocol == cpu_to_be16(ETH_P_IP)) {
1927 l4proto = ip_hdr(skb)->protocol;
1928
1929 if (l4proto == IPPROTO_TCP)
1930 opcode = TX_TCP_PKT;
1931 else if (l4proto == IPPROTO_UDP)
1932 opcode = TX_UDP_PKT;
1933 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1934 l4proto = ipv6_hdr(skb)->nexthdr;
1935
1936 if (l4proto == IPPROTO_TCP)
1937 opcode = TX_TCPV6_PKT;
1938 else if (l4proto == IPPROTO_UDP)
1939 opcode = TX_UDPV6_PKT;
1940 }
1941 }
1942
1943 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1944 first_desc->ip_hdr_offset += skb_network_offset(skb);
1945 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1946
1947 if (!tso)
1948 return;
1949
1950 /* For LSO, we need to copy the MAC/IP/TCP headers into
1951 * the descriptor ring
1952 */
af19b491
AKS
1953 copied = 0;
1954 offset = 2;
1955
1956 if (vlan_oob) {
1957 /* Create a TSO vlan header template for firmware */
1958
1959 hwdesc = &tx_ring->desc_head[producer];
1960 tx_ring->cmd_buf_arr[producer].skb = NULL;
1961
1962 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1963 hdr_len + VLAN_HLEN);
1964
1965 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1966 skb_copy_from_linear_data(skb, vh, 12);
1967 vh->h_vlan_proto = htons(ETH_P_8021Q);
7e56cac4
SC
1968 vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
1969
af19b491
AKS
1970 skb_copy_from_linear_data_offset(skb, 12,
1971 (char *)vh + 16, copy_len - 16);
1972
1973 copied = copy_len - VLAN_HLEN;
1974 offset = 0;
1975
1976 producer = get_next_index(producer, tx_ring->num_desc);
1977 }
1978
1979 while (copied < hdr_len) {
1980
1981 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1982 (hdr_len - copied));
1983
1984 hwdesc = &tx_ring->desc_head[producer];
1985 tx_ring->cmd_buf_arr[producer].skb = NULL;
1986
1987 skb_copy_from_linear_data_offset(skb, copied,
1988 (char *)hwdesc + offset, copy_len);
1989
1990 copied += copy_len;
1991 offset = 0;
1992
1993 producer = get_next_index(producer, tx_ring->num_desc);
1994 }
1995
1996 tx_ring->producer = producer;
1997 barrier();
8bfe8b91 1998 adapter->stats.lso_frames++;
af19b491
AKS
1999}
2000
2001static int
2002qlcnic_map_tx_skb(struct pci_dev *pdev,
2003 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2004{
2005 struct qlcnic_skb_frag *nf;
2006 struct skb_frag_struct *frag;
2007 int i, nr_frags;
2008 dma_addr_t map;
2009
2010 nr_frags = skb_shinfo(skb)->nr_frags;
2011 nf = &pbuf->frag_array[0];
2012
2013 map = pci_map_single(pdev, skb->data,
2014 skb_headlen(skb), PCI_DMA_TODEVICE);
2015 if (pci_dma_mapping_error(pdev, map))
2016 goto out_err;
2017
2018 nf->dma = map;
2019 nf->length = skb_headlen(skb);
2020
2021 for (i = 0; i < nr_frags; i++) {
2022 frag = &skb_shinfo(skb)->frags[i];
2023 nf = &pbuf->frag_array[i+1];
2024
2025 map = pci_map_page(pdev, frag->page, frag->page_offset,
2026 frag->size, PCI_DMA_TODEVICE);
2027 if (pci_dma_mapping_error(pdev, map))
2028 goto unwind;
2029
2030 nf->dma = map;
2031 nf->length = frag->size;
2032 }
2033
2034 return 0;
2035
2036unwind:
2037 while (--i >= 0) {
2038 nf = &pbuf->frag_array[i+1];
2039 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2040 }
2041
2042 nf = &pbuf->frag_array[0];
2043 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2044
2045out_err:
2046 return -ENOMEM;
2047}
2048
8cf61f89
AKS
2049static int
2050qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
2051 struct sk_buff *skb,
2052 struct cmd_desc_type0 *first_desc)
2053{
2054 u8 opcode = 0;
2055 u16 flags = 0;
2056 __be16 protocol = skb->protocol;
2057 struct vlan_ethhdr *vh;
2058
2059 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
2060 vh = (struct vlan_ethhdr *)skb->data;
2061 protocol = vh->h_vlan_encapsulated_proto;
2062 flags = FLAGS_VLAN_TAGGED;
2063 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2064 } else if (vlan_tx_tag_present(skb)) {
2065 flags = FLAGS_VLAN_OOB;
2066 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2067 }
2068 if (unlikely(adapter->pvid)) {
2069 if (first_desc->vlan_TCI &&
2070 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2071 return -EIO;
2072 if (first_desc->vlan_TCI &&
2073 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2074 goto set_flags;
2075
2076 flags = FLAGS_VLAN_OOB;
2077 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
2078 }
2079set_flags:
2080 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2081 return 0;
2082}
2083
af19b491
AKS
2084static inline void
2085qlcnic_clear_cmddesc(u64 *desc)
2086{
2087 desc[0] = 0ULL;
2088 desc[2] = 0ULL;
8cf61f89 2089 desc[7] = 0ULL;
af19b491
AKS
2090}
2091
cdaff185 2092netdev_tx_t
af19b491
AKS
2093qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2094{
2095 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2096 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2097 struct qlcnic_cmd_buffer *pbuf;
2098 struct qlcnic_skb_frag *buffrag;
2099 struct cmd_desc_type0 *hwdesc, *first_desc;
2100 struct pci_dev *pdev;
dcb50aff 2101 struct ethhdr *phdr;
91a403ca 2102 int delta = 0;
af19b491
AKS
2103 int i, k;
2104
2105 u32 producer;
2106 int frag_count, no_of_desc;
2107 u32 num_txd = tx_ring->num_desc;
2108
780ab790
AKS
2109 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2110 netif_stop_queue(netdev);
2111 return NETDEV_TX_BUSY;
2112 }
2113
fe4d434d 2114 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2115 phdr = (struct ethhdr *)skb->data;
2116 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2117 adapter->mac_addr))
2118 goto drop_packet;
2119 }
2120
af19b491 2121 frag_count = skb_shinfo(skb)->nr_frags + 1;
91a403ca
AKS
2122 /* 14 frags supported for normal packet and
2123 * 32 frags supported for TSO packet
2124 */
2125 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2126
2127 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2128 delta += skb_shinfo(skb)->frags[i].size;
2129
2130 if (!__pskb_pull_tail(skb, delta))
2131 goto drop_packet;
2132
2133 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2134 }
af19b491
AKS
2135
2136 /* 4 fragments per cmd des */
2137 no_of_desc = (frag_count + 3) >> 2;
2138
ef71ff83 2139 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2140 netif_stop_queue(netdev);
ef71ff83
RB
2141 smp_mb();
2142 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2143 netif_start_queue(netdev);
2144 else {
2145 adapter->stats.xmit_off++;
2146 return NETDEV_TX_BUSY;
2147 }
af19b491
AKS
2148 }
2149
2150 producer = tx_ring->producer;
2151 pbuf = &tx_ring->cmd_buf_arr[producer];
2152
2153 pdev = adapter->pdev;
2154
8cf61f89
AKS
2155 first_desc = hwdesc = &tx_ring->desc_head[producer];
2156 qlcnic_clear_cmddesc((u64 *)hwdesc);
2157
2158 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2159 goto drop_packet;
2160
8ae6df97
AKS
2161 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2162 adapter->stats.tx_dma_map_error++;
af19b491 2163 goto drop_packet;
8ae6df97 2164 }
af19b491
AKS
2165
2166 pbuf->skb = skb;
2167 pbuf->frag_count = frag_count;
2168
af19b491
AKS
2169 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2170 qlcnic_set_tx_port(first_desc, adapter->portnum);
2171
2172 for (i = 0; i < frag_count; i++) {
2173
2174 k = i % 4;
2175
2176 if ((k == 0) && (i > 0)) {
2177 /* move to next desc.*/
2178 producer = get_next_index(producer, num_txd);
2179 hwdesc = &tx_ring->desc_head[producer];
2180 qlcnic_clear_cmddesc((u64 *)hwdesc);
2181 tx_ring->cmd_buf_arr[producer].skb = NULL;
2182 }
2183
2184 buffrag = &pbuf->frag_array[i];
2185
2186 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2187 switch (k) {
2188 case 0:
2189 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2190 break;
2191 case 1:
2192 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2193 break;
2194 case 2:
2195 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2196 break;
2197 case 3:
2198 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2199 break;
2200 }
2201 }
2202
2203 tx_ring->producer = get_next_index(producer, num_txd);
2204
2205 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2206
b5e5492c
AKS
2207 if (qlcnic_mac_learn)
2208 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2209
af19b491
AKS
2210 qlcnic_update_cmd_producer(adapter, tx_ring);
2211
2212 adapter->stats.txbytes += skb->len;
2213 adapter->stats.xmitcalled++;
2214
2215 return NETDEV_TX_OK;
2216
2217drop_packet:
2218 adapter->stats.txdropped++;
2219 dev_kfree_skb_any(skb);
2220 return NETDEV_TX_OK;
2221}
2222
2223static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2224{
2225 struct net_device *netdev = adapter->netdev;
2226 u32 temp, temp_state, temp_val;
2227 int rv = 0;
2228
2229 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2230
2231 temp_state = qlcnic_get_temp_state(temp);
2232 temp_val = qlcnic_get_temp_val(temp);
2233
2234 if (temp_state == QLCNIC_TEMP_PANIC) {
2235 dev_err(&netdev->dev,
2236 "Device temperature %d degrees C exceeds"
2237 " maximum allowed. Hardware has been shut down.\n",
2238 temp_val);
2239 rv = 1;
2240 } else if (temp_state == QLCNIC_TEMP_WARN) {
2241 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2242 dev_err(&netdev->dev,
2243 "Device temperature %d degrees C "
2244 "exceeds operating range."
2245 " Immediate action needed.\n",
2246 temp_val);
2247 }
2248 } else {
2249 if (adapter->temp == QLCNIC_TEMP_WARN) {
2250 dev_info(&netdev->dev,
2251 "Device temperature is now %d degrees C"
2252 " in normal range.\n", temp_val);
2253 }
2254 }
2255 adapter->temp = temp_state;
2256 return rv;
2257}
2258
2259void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2260{
2261 struct net_device *netdev = adapter->netdev;
2262
2263 if (adapter->ahw.linkup && !linkup) {
69324275 2264 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2265 adapter->ahw.linkup = 0;
2266 if (netif_running(netdev)) {
2267 netif_carrier_off(netdev);
2268 netif_stop_queue(netdev);
2269 }
2270 } else if (!adapter->ahw.linkup && linkup) {
69324275 2271 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2272 adapter->ahw.linkup = 1;
2273 if (netif_running(netdev)) {
2274 netif_carrier_on(netdev);
2275 netif_wake_queue(netdev);
2276 }
2277 }
2278}
2279
2280static void qlcnic_tx_timeout(struct net_device *netdev)
2281{
2282 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2283
2284 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2285 return;
2286
2287 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2288
2289 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2290 adapter->need_fw_reset = 1;
2291 else
2292 adapter->reset_context = 1;
af19b491
AKS
2293}
2294
2295static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2296{
2297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2298 struct net_device_stats *stats = &netdev->stats;
2299
af19b491
AKS
2300 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2301 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2302 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2303 stats->tx_bytes = adapter->stats.txbytes;
2304 stats->rx_dropped = adapter->stats.rxdropped;
2305 stats->tx_dropped = adapter->stats.txdropped;
2306
2307 return stats;
2308}
2309
7eb9855d 2310static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2311{
af19b491
AKS
2312 u32 status;
2313
2314 status = readl(adapter->isr_int_vec);
2315
2316 if (!(status & adapter->int_vec_bit))
2317 return IRQ_NONE;
2318
2319 /* check interrupt state machine, to be sure */
2320 status = readl(adapter->crb_int_state_reg);
2321 if (!ISR_LEGACY_INT_TRIGGERED(status))
2322 return IRQ_NONE;
2323
2324 writel(0xffffffff, adapter->tgt_status_reg);
2325 /* read twice to ensure write is flushed */
2326 readl(adapter->isr_int_vec);
2327 readl(adapter->isr_int_vec);
2328
7eb9855d
AKS
2329 return IRQ_HANDLED;
2330}
2331
2332static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2333{
2334 struct qlcnic_host_sds_ring *sds_ring = data;
2335 struct qlcnic_adapter *adapter = sds_ring->adapter;
2336
2337 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2338 goto done;
2339 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2340 writel(0xffffffff, adapter->tgt_status_reg);
2341 goto done;
2342 }
2343
2344 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2345 return IRQ_NONE;
2346
2347done:
2348 adapter->diag_cnt++;
2349 qlcnic_enable_int(sds_ring);
2350 return IRQ_HANDLED;
2351}
2352
2353static irqreturn_t qlcnic_intr(int irq, void *data)
2354{
2355 struct qlcnic_host_sds_ring *sds_ring = data;
2356 struct qlcnic_adapter *adapter = sds_ring->adapter;
2357
2358 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2359 return IRQ_NONE;
2360
af19b491
AKS
2361 napi_schedule(&sds_ring->napi);
2362
2363 return IRQ_HANDLED;
2364}
2365
2366static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2367{
2368 struct qlcnic_host_sds_ring *sds_ring = data;
2369 struct qlcnic_adapter *adapter = sds_ring->adapter;
2370
2371 /* clear interrupt */
2372 writel(0xffffffff, adapter->tgt_status_reg);
2373
2374 napi_schedule(&sds_ring->napi);
2375 return IRQ_HANDLED;
2376}
2377
2378static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2379{
2380 struct qlcnic_host_sds_ring *sds_ring = data;
2381
2382 napi_schedule(&sds_ring->napi);
2383 return IRQ_HANDLED;
2384}
2385
2386static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2387{
2388 u32 sw_consumer, hw_consumer;
2389 int count = 0, i;
2390 struct qlcnic_cmd_buffer *buffer;
2391 struct pci_dev *pdev = adapter->pdev;
2392 struct net_device *netdev = adapter->netdev;
2393 struct qlcnic_skb_frag *frag;
2394 int done;
2395 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2396
2397 if (!spin_trylock(&adapter->tx_clean_lock))
2398 return 1;
2399
2400 sw_consumer = tx_ring->sw_consumer;
2401 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2402
2403 while (sw_consumer != hw_consumer) {
2404 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2405 if (buffer->skb) {
2406 frag = &buffer->frag_array[0];
2407 pci_unmap_single(pdev, frag->dma, frag->length,
2408 PCI_DMA_TODEVICE);
2409 frag->dma = 0ULL;
2410 for (i = 1; i < buffer->frag_count; i++) {
2411 frag++;
2412 pci_unmap_page(pdev, frag->dma, frag->length,
2413 PCI_DMA_TODEVICE);
2414 frag->dma = 0ULL;
2415 }
2416
2417 adapter->stats.xmitfinished++;
2418 dev_kfree_skb_any(buffer->skb);
2419 buffer->skb = NULL;
2420 }
2421
2422 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2423 if (++count >= MAX_STATUS_HANDLE)
2424 break;
2425 }
2426
2427 if (count && netif_running(netdev)) {
2428 tx_ring->sw_consumer = sw_consumer;
2429
2430 smp_mb();
2431
2432 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2433 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2434 netif_wake_queue(netdev);
8bfe8b91 2435 adapter->stats.xmit_on++;
af19b491 2436 }
af19b491 2437 }
ef71ff83 2438 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2439 }
2440 /*
2441 * If everything is freed up to consumer then check if the ring is full
2442 * If the ring is full then check if more needs to be freed and
2443 * schedule the call back again.
2444 *
2445 * This happens when there are 2 CPUs. One could be freeing and the
2446 * other filling it. If the ring is full when we get out of here and
2447 * the card has already interrupted the host then the host can miss the
2448 * interrupt.
2449 *
2450 * There is still a possible race condition and the host could miss an
2451 * interrupt. The card has to take care of this.
2452 */
2453 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2454 done = (sw_consumer == hw_consumer);
2455 spin_unlock(&adapter->tx_clean_lock);
2456
2457 return done;
2458}
2459
2460static int qlcnic_poll(struct napi_struct *napi, int budget)
2461{
2462 struct qlcnic_host_sds_ring *sds_ring =
2463 container_of(napi, struct qlcnic_host_sds_ring, napi);
2464
2465 struct qlcnic_adapter *adapter = sds_ring->adapter;
2466
2467 int tx_complete;
2468 int work_done;
2469
2470 tx_complete = qlcnic_process_cmd_ring(adapter);
2471
2472 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2473
2474 if ((work_done < budget) && tx_complete) {
2475 napi_complete(&sds_ring->napi);
2476 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2477 qlcnic_enable_int(sds_ring);
2478 }
2479
2480 return work_done;
2481}
2482
8f891387 2483static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2484{
2485 struct qlcnic_host_sds_ring *sds_ring =
2486 container_of(napi, struct qlcnic_host_sds_ring, napi);
2487
2488 struct qlcnic_adapter *adapter = sds_ring->adapter;
2489 int work_done;
2490
2491 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2492
2493 if (work_done < budget) {
2494 napi_complete(&sds_ring->napi);
2495 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2496 qlcnic_enable_int(sds_ring);
2497 }
2498
2499 return work_done;
2500}
2501
af19b491
AKS
2502#ifdef CONFIG_NET_POLL_CONTROLLER
2503static void qlcnic_poll_controller(struct net_device *netdev)
2504{
bf82791e
YL
2505 int ring;
2506 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2507 struct qlcnic_adapter *adapter = netdev_priv(netdev);
bf82791e
YL
2508 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2509
af19b491 2510 disable_irq(adapter->irq);
bf82791e
YL
2511 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2512 sds_ring = &recv_ctx->sds_rings[ring];
2513 qlcnic_intr(adapter->irq, sds_ring);
2514 }
af19b491
AKS
2515 enable_irq(adapter->irq);
2516}
2517#endif
2518
6df900e9
SC
2519static void
2520qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2521{
2522 u32 val;
2523
2524 val = adapter->portnum & 0xf;
2525 val |= encoding << 7;
2526 val |= (jiffies - adapter->dev_rst_time) << 8;
2527
2528 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2529 adapter->dev_rst_time = jiffies;
2530}
2531
ade91f8e
AKS
2532static int
2533qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2534{
2535 u32 val;
2536
2537 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2538 state != QLCNIC_DEV_NEED_QUISCENT);
2539
2540 if (qlcnic_api_lock(adapter))
ade91f8e 2541 return -EIO;
af19b491
AKS
2542
2543 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2544
2545 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2546 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2547 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2548 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2549
2550 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2551
2552 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2553
2554 return 0;
af19b491
AKS
2555}
2556
1b95a839
AKS
2557static int
2558qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2559{
2560 u32 val;
2561
2562 if (qlcnic_api_lock(adapter))
2563 return -EBUSY;
2564
2565 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2566 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2567 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2568
2569 qlcnic_api_unlock(adapter);
2570
2571 return 0;
2572}
2573
af19b491 2574static void
21854f02 2575qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2576{
2577 u32 val;
2578
2579 if (qlcnic_api_lock(adapter))
2580 goto err;
2581
31018e06 2582 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2583 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2584 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2585
21854f02
AKS
2586 if (failed) {
2587 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2588 dev_info(&adapter->pdev->dev,
2589 "Device state set to Failed. Please Reboot\n");
2590 } else if (!(val & 0x11111111))
af19b491
AKS
2591 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2592
2593 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2594 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2595 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2596
2597 qlcnic_api_unlock(adapter);
2598err:
2599 adapter->fw_fail_cnt = 0;
2600 clear_bit(__QLCNIC_START_FW, &adapter->state);
2601 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2602}
2603
f73dfc50 2604/* Grab api lock, before checking state */
af19b491
AKS
2605static int
2606qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2607{
2608 int act, state;
2609
2610 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2611 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2612
2613 if (((state & 0x11111111) == (act & 0x11111111)) ||
2614 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2615 return 0;
2616 else
2617 return 1;
2618}
2619
96f8118c
SC
2620static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2621{
2622 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2623
2624 if (val != QLCNIC_DRV_IDC_VER) {
2625 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2626 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2627 }
2628
2629 return 0;
2630}
2631
af19b491
AKS
2632static int
2633qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2634{
2635 u32 val, prev_state;
aa5e18c0 2636 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2637 u8 portnum = adapter->portnum;
96f8118c 2638 u8 ret;
af19b491 2639
f73dfc50
AKS
2640 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2641 return 1;
2642
af19b491
AKS
2643 if (qlcnic_api_lock(adapter))
2644 return -1;
2645
31018e06 2646 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2647 if (!(val & (1 << (portnum * 4)))) {
2648 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2649 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2650 }
2651
2652 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2653 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2654
2655 switch (prev_state) {
2656 case QLCNIC_DEV_COLD:
bbd8c6a4 2657 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2658 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2659 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2660 qlcnic_api_unlock(adapter);
2661 return 1;
2662
2663 case QLCNIC_DEV_READY:
96f8118c 2664 ret = qlcnic_check_idc_ver(adapter);
af19b491 2665 qlcnic_api_unlock(adapter);
96f8118c 2666 return ret;
af19b491
AKS
2667
2668 case QLCNIC_DEV_NEED_RESET:
2669 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2670 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2671 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2672 break;
2673
2674 case QLCNIC_DEV_NEED_QUISCENT:
2675 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2676 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2677 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2678 break;
2679
2680 case QLCNIC_DEV_FAILED:
a7fc948f 2681 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2682 qlcnic_api_unlock(adapter);
2683 return -1;
bbd8c6a4
AKS
2684
2685 case QLCNIC_DEV_INITIALIZING:
2686 case QLCNIC_DEV_QUISCENT:
2687 break;
af19b491
AKS
2688 }
2689
2690 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2691
2692 do {
af19b491 2693 msleep(1000);
a5e463d0
SC
2694 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2695
2696 if (prev_state == QLCNIC_DEV_QUISCENT)
2697 continue;
2698 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2699
65b5b420
AKS
2700 if (!dev_init_timeo) {
2701 dev_err(&adapter->pdev->dev,
2702 "Waiting for device to initialize timeout\n");
af19b491 2703 return -1;
65b5b420 2704 }
af19b491
AKS
2705
2706 if (qlcnic_api_lock(adapter))
2707 return -1;
2708
2709 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2710 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2711 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2712
96f8118c 2713 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2714 qlcnic_api_unlock(adapter);
2715
96f8118c 2716 return ret;
af19b491
AKS
2717}
2718
2719static void
2720qlcnic_fwinit_work(struct work_struct *work)
2721{
2722 struct qlcnic_adapter *adapter = container_of(work,
2723 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2724 u32 dev_state = 0xf;
af19b491 2725
f73dfc50
AKS
2726 if (qlcnic_api_lock(adapter))
2727 goto err_ret;
af19b491 2728
a5e463d0 2729 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2730 if (dev_state == QLCNIC_DEV_QUISCENT ||
2731 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2732 qlcnic_api_unlock(adapter);
2733 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2734 FW_POLL_DELAY * 2);
2735 return;
2736 }
2737
9f26f547 2738 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2739 qlcnic_api_unlock(adapter);
2740 goto wait_npar;
9f26f547
AC
2741 }
2742
f73dfc50
AKS
2743 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2744 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2745 adapter->reset_ack_timeo);
2746 goto skip_ack_check;
2747 }
2748
2749 if (!qlcnic_check_drv_state(adapter)) {
2750skip_ack_check:
2751 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2752
f73dfc50
AKS
2753 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2754 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2755 QLCNIC_DEV_INITIALIZING);
2756 set_bit(__QLCNIC_START_FW, &adapter->state);
2757 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2758 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2759 }
2760
f73dfc50
AKS
2761 qlcnic_api_unlock(adapter);
2762
9f26f547 2763 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2764 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2765 adapter->fw_wait_cnt = 0;
af19b491
AKS
2766 return;
2767 }
af19b491
AKS
2768 goto err_ret;
2769 }
2770
f73dfc50 2771 qlcnic_api_unlock(adapter);
aa5e18c0 2772
9f26f547 2773wait_npar:
af19b491 2774 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2775 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2776
af19b491 2777 switch (dev_state) {
3c4b23b1 2778 case QLCNIC_DEV_READY:
9f26f547 2779 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2780 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2781 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2782 return;
2783 }
3c4b23b1
AKS
2784 case QLCNIC_DEV_FAILED:
2785 break;
2786 default:
2787 qlcnic_schedule_work(adapter,
2788 qlcnic_fwinit_work, FW_POLL_DELAY);
2789 return;
af19b491
AKS
2790 }
2791
2792err_ret:
f73dfc50
AKS
2793 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2794 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2795 netif_device_attach(adapter->netdev);
21854f02 2796 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2797}
2798
2799static void
2800qlcnic_detach_work(struct work_struct *work)
2801{
2802 struct qlcnic_adapter *adapter = container_of(work,
2803 struct qlcnic_adapter, fw_work.work);
2804 struct net_device *netdev = adapter->netdev;
2805 u32 status;
2806
2807 netif_device_detach(netdev);
2808
b8c17620
AKS
2809 /* Dont grab rtnl lock during Quiscent mode */
2810 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2811 if (netif_running(netdev))
2812 __qlcnic_down(adapter, netdev);
2813 } else
2814 qlcnic_down(adapter, netdev);
af19b491 2815
af19b491
AKS
2816 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2817
2818 if (status & QLCNIC_RCODE_FATAL_ERROR)
2819 goto err_ret;
2820
2821 if (adapter->temp == QLCNIC_TEMP_PANIC)
2822 goto err_ret;
2823
ade91f8e
AKS
2824 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2825 goto err_ret;
af19b491
AKS
2826
2827 adapter->fw_wait_cnt = 0;
2828
2829 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2830
2831 return;
2832
2833err_ret:
65b5b420
AKS
2834 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2835 status, adapter->temp);
34ce3626 2836 netif_device_attach(netdev);
21854f02 2837 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2838}
2839
3c4b23b1
AKS
2840/*Transit NPAR state to NON Operational */
2841static void
2842qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2843{
2844 u32 state;
2845
2846 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2847 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2848 return;
2849
2850 if (qlcnic_api_lock(adapter))
2851 return;
2852 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2853 qlcnic_api_unlock(adapter);
2854}
2855
f73dfc50 2856/*Transit to RESET state from READY state only */
af19b491
AKS
2857static void
2858qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2859{
2860 u32 state;
2861
cea8975e 2862 adapter->need_fw_reset = 1;
af19b491
AKS
2863 if (qlcnic_api_lock(adapter))
2864 return;
2865
2866 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2867
f73dfc50 2868 if (state == QLCNIC_DEV_READY) {
af19b491 2869 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2870 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2871 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2872 }
2873
3c4b23b1 2874 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2875 qlcnic_api_unlock(adapter);
2876}
2877
9f26f547
AC
2878/* Transit to NPAR READY state from NPAR NOT READY state */
2879static void
2880qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2881{
9f26f547
AC
2882 if (qlcnic_api_lock(adapter))
2883 return;
2884
3c4b23b1
AKS
2885 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2886 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2887
2888 qlcnic_api_unlock(adapter);
2889}
2890
af19b491
AKS
2891static void
2892qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2893 work_func_t func, int delay)
2894{
451724c8
SC
2895 if (test_bit(__QLCNIC_AER, &adapter->state))
2896 return;
2897
af19b491 2898 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
2899 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2900 round_jiffies_relative(delay));
af19b491
AKS
2901}
2902
2903static void
2904qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2905{
2906 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2907 msleep(10);
2908
2909 cancel_delayed_work_sync(&adapter->fw_work);
2910}
2911
2912static void
2913qlcnic_attach_work(struct work_struct *work)
2914{
2915 struct qlcnic_adapter *adapter = container_of(work,
2916 struct qlcnic_adapter, fw_work.work);
2917 struct net_device *netdev = adapter->netdev;
b18971d1 2918 u32 npar_state;
af19b491 2919
b18971d1
AKS
2920 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2921 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2922 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2923 qlcnic_clr_all_drv_state(adapter, 0);
2924 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2925 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2926 FW_POLL_DELAY);
2927 else
2928 goto attach;
2929 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2930 return;
2931 }
2932attach:
af19b491 2933 if (netif_running(netdev)) {
52486a3a 2934 if (qlcnic_up(adapter, netdev))
af19b491 2935 goto done;
af19b491 2936
aec1e845 2937 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
2938 }
2939
af19b491 2940done:
34ce3626 2941 netif_device_attach(netdev);
af19b491
AKS
2942 adapter->fw_fail_cnt = 0;
2943 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2944
2945 if (!qlcnic_clr_drv_state(adapter))
2946 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2947 FW_POLL_DELAY);
af19b491
AKS
2948}
2949
2950static int
2951qlcnic_check_health(struct qlcnic_adapter *adapter)
2952{
4e70812b 2953 u32 state = 0, heartbeat;
af19b491
AKS
2954 struct net_device *netdev = adapter->netdev;
2955
2956 if (qlcnic_check_temp(adapter))
2957 goto detach;
2958
2372a5f1 2959 if (adapter->need_fw_reset)
af19b491 2960 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2961
2962 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 2963 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 2964 qlcnic_set_npar_non_operational(adapter);
af19b491 2965 adapter->need_fw_reset = 1;
b8c17620
AKS
2966 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
2967 goto detach;
af19b491 2968
4e70812b
SC
2969 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2970 if (heartbeat != adapter->heartbeat) {
2971 adapter->heartbeat = heartbeat;
af19b491
AKS
2972 adapter->fw_fail_cnt = 0;
2973 if (adapter->need_fw_reset)
2974 goto detach;
68bf1c68 2975
9ce13ca8 2976 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
2977 qlcnic_reset_hw_context(adapter);
2978 adapter->netdev->trans_start = jiffies;
2979 }
2980
af19b491
AKS
2981 return 0;
2982 }
2983
2984 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2985 return 0;
2986
2987 qlcnic_dev_request_reset(adapter);
2988
9ce13ca8 2989 if (auto_fw_reset)
0df170b6 2990 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2991
2992 dev_info(&netdev->dev, "firmware hang detected\n");
2993
2994detach:
2995 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2996 QLCNIC_DEV_NEED_RESET;
2997
9ce13ca8 2998 if (auto_fw_reset &&
65b5b420
AKS
2999 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3000
af19b491 3001 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3002 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3003 }
af19b491
AKS
3004
3005 return 1;
3006}
3007
3008static void
3009qlcnic_fw_poll_work(struct work_struct *work)
3010{
3011 struct qlcnic_adapter *adapter = container_of(work,
3012 struct qlcnic_adapter, fw_work.work);
3013
3014 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3015 goto reschedule;
3016
3017
3018 if (qlcnic_check_health(adapter))
3019 return;
3020
b5e5492c
AKS
3021 if (adapter->fhash.fnum)
3022 qlcnic_prune_lb_filters(adapter);
3023
af19b491
AKS
3024reschedule:
3025 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3026}
3027
451724c8
SC
3028static int qlcnic_is_first_func(struct pci_dev *pdev)
3029{
3030 struct pci_dev *oth_pdev;
3031 int val = pdev->devfn;
3032
3033 while (val-- > 0) {
3034 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3035 (pdev->bus), pdev->bus->number,
3036 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3037 if (!oth_pdev)
3038 continue;
451724c8 3039
bfc978fa
AKS
3040 if (oth_pdev->current_state != PCI_D3cold) {
3041 pci_dev_put(oth_pdev);
451724c8 3042 return 0;
bfc978fa
AKS
3043 }
3044 pci_dev_put(oth_pdev);
451724c8
SC
3045 }
3046 return 1;
3047}
3048
3049static int qlcnic_attach_func(struct pci_dev *pdev)
3050{
3051 int err, first_func;
3052 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3053 struct net_device *netdev = adapter->netdev;
3054
3055 pdev->error_state = pci_channel_io_normal;
3056
3057 err = pci_enable_device(pdev);
3058 if (err)
3059 return err;
3060
3061 pci_set_power_state(pdev, PCI_D0);
3062 pci_set_master(pdev);
3063 pci_restore_state(pdev);
3064
3065 first_func = qlcnic_is_first_func(pdev);
3066
3067 if (qlcnic_api_lock(adapter))
3068 return -EINVAL;
3069
933fce12 3070 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3071 adapter->need_fw_reset = 1;
3072 set_bit(__QLCNIC_START_FW, &adapter->state);
3073 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3074 QLCDB(adapter, DRV, "Restarting fw\n");
3075 }
3076 qlcnic_api_unlock(adapter);
3077
3078 err = adapter->nic_ops->start_firmware(adapter);
3079 if (err)
3080 return err;
3081
3082 qlcnic_clr_drv_state(adapter);
3083 qlcnic_setup_intr(adapter);
3084
3085 if (netif_running(netdev)) {
3086 err = qlcnic_attach(adapter);
3087 if (err) {
21854f02 3088 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3089 clear_bit(__QLCNIC_AER, &adapter->state);
3090 netif_device_attach(netdev);
3091 return err;
3092 }
3093
3094 err = qlcnic_up(adapter, netdev);
3095 if (err)
3096 goto done;
3097
aec1e845 3098 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3099 }
3100 done:
3101 netif_device_attach(netdev);
3102 return err;
3103}
3104
3105static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3106 pci_channel_state_t state)
3107{
3108 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3109 struct net_device *netdev = adapter->netdev;
3110
3111 if (state == pci_channel_io_perm_failure)
3112 return PCI_ERS_RESULT_DISCONNECT;
3113
3114 if (state == pci_channel_io_normal)
3115 return PCI_ERS_RESULT_RECOVERED;
3116
3117 set_bit(__QLCNIC_AER, &adapter->state);
3118 netif_device_detach(netdev);
3119
3120 cancel_delayed_work_sync(&adapter->fw_work);
3121
3122 if (netif_running(netdev))
3123 qlcnic_down(adapter, netdev);
3124
3125 qlcnic_detach(adapter);
3126 qlcnic_teardown_intr(adapter);
3127
3128 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3129
3130 pci_save_state(pdev);
3131 pci_disable_device(pdev);
3132
3133 return PCI_ERS_RESULT_NEED_RESET;
3134}
3135
3136static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3137{
3138 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3139 PCI_ERS_RESULT_RECOVERED;
3140}
3141
3142static void qlcnic_io_resume(struct pci_dev *pdev)
3143{
3144 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3145
3146 pci_cleanup_aer_uncorrect_error_status(pdev);
3147
3148 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3149 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3150 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3151 FW_POLL_DELAY);
3152}
3153
87eb743b
AC
3154static int
3155qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3156{
3157 int err;
3158
3159 err = qlcnic_can_start_firmware(adapter);
3160 if (err)
3161 return err;
3162
78f84e1a
AKS
3163 err = qlcnic_check_npar_opertional(adapter);
3164 if (err)
3165 return err;
3c4b23b1 3166
174240a8
RB
3167 err = qlcnic_initialize_nic(adapter);
3168 if (err)
3169 return err;
3170
87eb743b
AC
3171 qlcnic_check_options(adapter);
3172
7373373d
RB
3173 err = qlcnic_set_eswitch_port_config(adapter);
3174 if (err)
3175 return err;
3176
87eb743b
AC
3177 adapter->need_fw_reset = 0;
3178
3179 return err;
3180}
3181
3182static int
3183qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3184{
3185 return -EOPNOTSUPP;
3186}
3187
3188static int
3189qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3190{
3191 return -EOPNOTSUPP;
3192}
3193
af19b491
AKS
3194static ssize_t
3195qlcnic_store_bridged_mode(struct device *dev,
3196 struct device_attribute *attr, const char *buf, size_t len)
3197{
3198 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3199 unsigned long new;
3200 int ret = -EINVAL;
3201
3202 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3203 goto err_out;
3204
8a15ad1f 3205 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3206 goto err_out;
3207
3208 if (strict_strtoul(buf, 2, &new))
3209 goto err_out;
3210
2e9d722d 3211 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3212 ret = len;
3213
3214err_out:
3215 return ret;
3216}
3217
3218static ssize_t
3219qlcnic_show_bridged_mode(struct device *dev,
3220 struct device_attribute *attr, char *buf)
3221{
3222 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3223 int bridged_mode = 0;
3224
3225 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3226 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3227
3228 return sprintf(buf, "%d\n", bridged_mode);
3229}
3230
3231static struct device_attribute dev_attr_bridged_mode = {
3232 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3233 .show = qlcnic_show_bridged_mode,
3234 .store = qlcnic_store_bridged_mode,
3235};
3236
3237static ssize_t
3238qlcnic_store_diag_mode(struct device *dev,
3239 struct device_attribute *attr, const char *buf, size_t len)
3240{
3241 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3242 unsigned long new;
3243
3244 if (strict_strtoul(buf, 2, &new))
3245 return -EINVAL;
3246
3247 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3248 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3249
3250 return len;
3251}
3252
3253static ssize_t
3254qlcnic_show_diag_mode(struct device *dev,
3255 struct device_attribute *attr, char *buf)
3256{
3257 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3258
3259 return sprintf(buf, "%d\n",
3260 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3261}
3262
3263static struct device_attribute dev_attr_diag_mode = {
3264 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3265 .show = qlcnic_show_diag_mode,
3266 .store = qlcnic_store_diag_mode,
3267};
3268
3269static int
3270qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3271 loff_t offset, size_t size)
3272{
897e8c7c
DP
3273 size_t crb_size = 4;
3274
af19b491
AKS
3275 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3276 return -EIO;
3277
897e8c7c
DP
3278 if (offset < QLCNIC_PCI_CRBSPACE) {
3279 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3280 QLCNIC_PCI_CAMQM_END))
3281 crb_size = 8;
3282 else
3283 return -EINVAL;
3284 }
af19b491 3285
897e8c7c
DP
3286 if ((size != crb_size) || (offset & (crb_size-1)))
3287 return -EINVAL;
af19b491
AKS
3288
3289 return 0;
3290}
3291
3292static ssize_t
2c3c8bea
CW
3293qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3294 struct bin_attribute *attr,
af19b491
AKS
3295 char *buf, loff_t offset, size_t size)
3296{
3297 struct device *dev = container_of(kobj, struct device, kobj);
3298 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3299 u32 data;
897e8c7c 3300 u64 qmdata;
af19b491
AKS
3301 int ret;
3302
3303 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3304 if (ret != 0)
3305 return ret;
3306
897e8c7c
DP
3307 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3308 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3309 memcpy(buf, &qmdata, size);
3310 } else {
3311 data = QLCRD32(adapter, offset);
3312 memcpy(buf, &data, size);
3313 }
af19b491
AKS
3314 return size;
3315}
3316
3317static ssize_t
2c3c8bea
CW
3318qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3319 struct bin_attribute *attr,
af19b491
AKS
3320 char *buf, loff_t offset, size_t size)
3321{
3322 struct device *dev = container_of(kobj, struct device, kobj);
3323 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3324 u32 data;
897e8c7c 3325 u64 qmdata;
af19b491
AKS
3326 int ret;
3327
3328 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3329 if (ret != 0)
3330 return ret;
3331
897e8c7c
DP
3332 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3333 memcpy(&qmdata, buf, size);
3334 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3335 } else {
3336 memcpy(&data, buf, size);
3337 QLCWR32(adapter, offset, data);
3338 }
af19b491
AKS
3339 return size;
3340}
3341
3342static int
3343qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3344 loff_t offset, size_t size)
3345{
3346 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3347 return -EIO;
3348
3349 if ((size != 8) || (offset & 0x7))
3350 return -EIO;
3351
3352 return 0;
3353}
3354
3355static ssize_t
2c3c8bea
CW
3356qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3357 struct bin_attribute *attr,
af19b491
AKS
3358 char *buf, loff_t offset, size_t size)
3359{
3360 struct device *dev = container_of(kobj, struct device, kobj);
3361 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3362 u64 data;
3363 int ret;
3364
3365 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3366 if (ret != 0)
3367 return ret;
3368
3369 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3370 return -EIO;
3371
3372 memcpy(buf, &data, size);
3373
3374 return size;
3375}
3376
3377static ssize_t
2c3c8bea
CW
3378qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3379 struct bin_attribute *attr,
af19b491
AKS
3380 char *buf, loff_t offset, size_t size)
3381{
3382 struct device *dev = container_of(kobj, struct device, kobj);
3383 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3384 u64 data;
3385 int ret;
3386
3387 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3388 if (ret != 0)
3389 return ret;
3390
3391 memcpy(&data, buf, size);
3392
3393 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3394 return -EIO;
3395
3396 return size;
3397}
3398
3399
3400static struct bin_attribute bin_attr_crb = {
3401 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3402 .size = 0,
3403 .read = qlcnic_sysfs_read_crb,
3404 .write = qlcnic_sysfs_write_crb,
3405};
3406
3407static struct bin_attribute bin_attr_mem = {
3408 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3409 .size = 0,
3410 .read = qlcnic_sysfs_read_mem,
3411 .write = qlcnic_sysfs_write_mem,
3412};
3413
cea8975e 3414static int
346fe763
RB
3415validate_pm_config(struct qlcnic_adapter *adapter,
3416 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3417{
3418
3419 u8 src_pci_func, s_esw_id, d_esw_id;
3420 u8 dest_pci_func;
3421 int i;
3422
3423 for (i = 0; i < count; i++) {
3424 src_pci_func = pm_cfg[i].pci_func;
3425 dest_pci_func = pm_cfg[i].dest_npar;
3426 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3427 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3428 return QL_STATUS_INVALID_PARAM;
3429
3430 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3431 return QL_STATUS_INVALID_PARAM;
3432
3433 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3434 return QL_STATUS_INVALID_PARAM;
3435
346fe763
RB
3436 s_esw_id = adapter->npars[src_pci_func].phy_port;
3437 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3438
3439 if (s_esw_id != d_esw_id)
3440 return QL_STATUS_INVALID_PARAM;
3441
3442 }
3443 return 0;
3444
3445}
3446
3447static ssize_t
3448qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3449 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3450{
3451 struct device *dev = container_of(kobj, struct device, kobj);
3452 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3453 struct qlcnic_pm_func_cfg *pm_cfg;
3454 u32 id, action, pci_func;
3455 int count, rem, i, ret;
3456
3457 count = size / sizeof(struct qlcnic_pm_func_cfg);
3458 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3459 if (rem)
3460 return QL_STATUS_INVALID_PARAM;
3461
3462 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3463
3464 ret = validate_pm_config(adapter, pm_cfg, count);
3465 if (ret)
3466 return ret;
3467 for (i = 0; i < count; i++) {
3468 pci_func = pm_cfg[i].pci_func;
4e8acb01 3469 action = !!pm_cfg[i].action;
346fe763
RB
3470 id = adapter->npars[pci_func].phy_port;
3471 ret = qlcnic_config_port_mirroring(adapter, id,
3472 action, pci_func);
3473 if (ret)
3474 return ret;
3475 }
3476
3477 for (i = 0; i < count; i++) {
3478 pci_func = pm_cfg[i].pci_func;
3479 id = adapter->npars[pci_func].phy_port;
4e8acb01 3480 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3481 adapter->npars[pci_func].dest_npar = id;
3482 }
3483 return size;
3484}
3485
3486static ssize_t
3487qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3488 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3489{
3490 struct device *dev = container_of(kobj, struct device, kobj);
3491 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3492 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3493 int i;
3494
3495 if (size != sizeof(pm_cfg))
3496 return QL_STATUS_INVALID_PARAM;
3497
3498 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3499 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3500 continue;
3501 pm_cfg[i].action = adapter->npars[i].enable_pm;
3502 pm_cfg[i].dest_npar = 0;
3503 pm_cfg[i].pci_func = i;
3504 }
3505 memcpy(buf, &pm_cfg, size);
3506
3507 return size;
3508}
3509
cea8975e 3510static int
346fe763 3511validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3512 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3513{
7613c87b 3514 u32 op_mode;
346fe763
RB
3515 u8 pci_func;
3516 int i;
7613c87b
RB
3517
3518 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3519
346fe763
RB
3520 for (i = 0; i < count; i++) {
3521 pci_func = esw_cfg[i].pci_func;
3522 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3523 return QL_STATUS_INVALID_PARAM;
3524
4e8acb01
RB
3525 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3526 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3527 return QL_STATUS_INVALID_PARAM;
346fe763 3528
4e8acb01
RB
3529 switch (esw_cfg[i].op_mode) {
3530 case QLCNIC_PORT_DEFAULTS:
7613c87b 3531 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3532 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3533 if (esw_cfg[i].mac_anti_spoof != 0)
3534 return QL_STATUS_INVALID_PARAM;
3535 if (esw_cfg[i].mac_override != 1)
3536 return QL_STATUS_INVALID_PARAM;
3537 if (esw_cfg[i].promisc_mode != 1)
3538 return QL_STATUS_INVALID_PARAM;
7373373d 3539 }
4e8acb01
RB
3540 break;
3541 case QLCNIC_ADD_VLAN:
346fe763
RB
3542 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3543 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3544 if (!esw_cfg[i].op_type)
3545 return QL_STATUS_INVALID_PARAM;
3546 break;
3547 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3548 if (!esw_cfg[i].op_type)
3549 return QL_STATUS_INVALID_PARAM;
3550 break;
3551 default:
346fe763 3552 return QL_STATUS_INVALID_PARAM;
4e8acb01 3553 }
346fe763 3554 }
346fe763
RB
3555 return 0;
3556}
3557
3558static ssize_t
3559qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3560 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3561{
3562 struct device *dev = container_of(kobj, struct device, kobj);
3563 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3564 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3565 struct qlcnic_npar_info *npar;
346fe763 3566 int count, rem, i, ret;
0325d69b 3567 u8 pci_func, op_mode = 0;
346fe763
RB
3568
3569 count = size / sizeof(struct qlcnic_esw_func_cfg);
3570 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3571 if (rem)
3572 return QL_STATUS_INVALID_PARAM;
3573
3574 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3575 ret = validate_esw_config(adapter, esw_cfg, count);
3576 if (ret)
3577 return ret;
3578
3579 for (i = 0; i < count; i++) {
0325d69b
RB
3580 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3581 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3582 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3583
3584 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3585 continue;
3586
3587 op_mode = esw_cfg[i].op_mode;
3588 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3589 esw_cfg[i].op_mode = op_mode;
3590 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3591
3592 switch (esw_cfg[i].op_mode) {
3593 case QLCNIC_PORT_DEFAULTS:
3594 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3595 break;
8cf61f89
AKS
3596 case QLCNIC_ADD_VLAN:
3597 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3598 break;
3599 case QLCNIC_DEL_VLAN:
3600 esw_cfg[i].vlan_id = 0;
3601 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3602 break;
0325d69b 3603 }
346fe763
RB
3604 }
3605
0325d69b
RB
3606 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3607 goto out;
e9a47700 3608
346fe763
RB
3609 for (i = 0; i < count; i++) {
3610 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3611 npar = &adapter->npars[pci_func];
3612 switch (esw_cfg[i].op_mode) {
3613 case QLCNIC_PORT_DEFAULTS:
3614 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3615 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3616 npar->offload_flags = esw_cfg[i].offload_flags;
3617 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3618 npar->discard_tagged = esw_cfg[i].discard_tagged;
3619 break;
3620 case QLCNIC_ADD_VLAN:
3621 npar->pvid = esw_cfg[i].vlan_id;
3622 break;
3623 case QLCNIC_DEL_VLAN:
3624 npar->pvid = 0;
3625 break;
3626 }
346fe763 3627 }
0325d69b 3628out:
346fe763
RB
3629 return size;
3630}
3631
3632static ssize_t
3633qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3634 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3635{
3636 struct device *dev = container_of(kobj, struct device, kobj);
3637 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3638 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3639 u8 i;
346fe763
RB
3640
3641 if (size != sizeof(esw_cfg))
3642 return QL_STATUS_INVALID_PARAM;
3643
3644 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3645 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3646 continue;
4e8acb01
RB
3647 esw_cfg[i].pci_func = i;
3648 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3649 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3650 }
3651 memcpy(buf, &esw_cfg, size);
3652
3653 return size;
3654}
3655
cea8975e 3656static int
346fe763
RB
3657validate_npar_config(struct qlcnic_adapter *adapter,
3658 struct qlcnic_npar_func_cfg *np_cfg, int count)
3659{
3660 u8 pci_func, i;
3661
3662 for (i = 0; i < count; i++) {
3663 pci_func = np_cfg[i].pci_func;
3664 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3665 return QL_STATUS_INVALID_PARAM;
3666
3667 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3668 return QL_STATUS_INVALID_PARAM;
3669
d12b0d9a
RB
3670 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3671 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
3672 return QL_STATUS_INVALID_PARAM;
3673 }
3674 return 0;
3675}
3676
3677static ssize_t
3678qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3679 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3680{
3681 struct device *dev = container_of(kobj, struct device, kobj);
3682 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3683 struct qlcnic_info nic_info;
3684 struct qlcnic_npar_func_cfg *np_cfg;
3685 int i, count, rem, ret;
3686 u8 pci_func;
3687
3688 count = size / sizeof(struct qlcnic_npar_func_cfg);
3689 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3690 if (rem)
3691 return QL_STATUS_INVALID_PARAM;
3692
3693 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3694 ret = validate_npar_config(adapter, np_cfg, count);
3695 if (ret)
3696 return ret;
3697
3698 for (i = 0; i < count ; i++) {
3699 pci_func = np_cfg[i].pci_func;
3700 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3701 if (ret)
3702 return ret;
3703 nic_info.pci_func = pci_func;
3704 nic_info.min_tx_bw = np_cfg[i].min_bw;
3705 nic_info.max_tx_bw = np_cfg[i].max_bw;
3706 ret = qlcnic_set_nic_info(adapter, &nic_info);
3707 if (ret)
3708 return ret;
cea8975e
AC
3709 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3710 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3711 }
3712
3713 return size;
3714
3715}
3716static ssize_t
3717qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3718 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3719{
3720 struct device *dev = container_of(kobj, struct device, kobj);
3721 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3722 struct qlcnic_info nic_info;
3723 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3724 int i, ret;
3725
3726 if (size != sizeof(np_cfg))
3727 return QL_STATUS_INVALID_PARAM;
3728
3729 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3730 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3731 continue;
3732 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3733 if (ret)
3734 return ret;
3735
3736 np_cfg[i].pci_func = i;
a1c0c459 3737 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3738 np_cfg[i].port_num = nic_info.phys_port;
3739 np_cfg[i].fw_capab = nic_info.capabilities;
3740 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3741 np_cfg[i].max_bw = nic_info.max_tx_bw;
3742 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3743 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3744 }
3745 memcpy(buf, &np_cfg, size);
3746 return size;
3747}
3748
b6021212
AKS
3749static ssize_t
3750qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3751 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3752{
3753 struct device *dev = container_of(kobj, struct device, kobj);
3754 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3755 struct qlcnic_esw_statistics port_stats;
3756 int ret;
3757
3758 if (size != sizeof(struct qlcnic_esw_statistics))
3759 return QL_STATUS_INVALID_PARAM;
3760
3761 if (offset >= QLCNIC_MAX_PCI_FUNC)
3762 return QL_STATUS_INVALID_PARAM;
3763
3764 memset(&port_stats, 0, size);
3765 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3766 &port_stats.rx);
3767 if (ret)
3768 return ret;
3769
3770 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3771 &port_stats.tx);
3772 if (ret)
3773 return ret;
3774
3775 memcpy(buf, &port_stats, size);
3776 return size;
3777}
3778
3779static ssize_t
3780qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3781 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3782{
3783 struct device *dev = container_of(kobj, struct device, kobj);
3784 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3785 struct qlcnic_esw_statistics esw_stats;
3786 int ret;
3787
3788 if (size != sizeof(struct qlcnic_esw_statistics))
3789 return QL_STATUS_INVALID_PARAM;
3790
3791 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3792 return QL_STATUS_INVALID_PARAM;
3793
3794 memset(&esw_stats, 0, size);
3795 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3796 &esw_stats.rx);
3797 if (ret)
3798 return ret;
3799
3800 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3801 &esw_stats.tx);
3802 if (ret)
3803 return ret;
3804
3805 memcpy(buf, &esw_stats, size);
3806 return size;
3807}
3808
3809static ssize_t
3810qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3811 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3812{
3813 struct device *dev = container_of(kobj, struct device, kobj);
3814 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3815 int ret;
3816
3817 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3818 return QL_STATUS_INVALID_PARAM;
3819
3820 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3821 QLCNIC_QUERY_RX_COUNTER);
3822 if (ret)
3823 return ret;
3824
3825 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3826 QLCNIC_QUERY_TX_COUNTER);
3827 if (ret)
3828 return ret;
3829
3830 return size;
3831}
3832
3833static ssize_t
3834qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3835 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3836{
3837
3838 struct device *dev = container_of(kobj, struct device, kobj);
3839 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3840 int ret;
3841
3842 if (offset >= QLCNIC_MAX_PCI_FUNC)
3843 return QL_STATUS_INVALID_PARAM;
3844
3845 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3846 QLCNIC_QUERY_RX_COUNTER);
3847 if (ret)
3848 return ret;
3849
3850 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3851 QLCNIC_QUERY_TX_COUNTER);
3852 if (ret)
3853 return ret;
3854
3855 return size;
3856}
3857
346fe763
RB
3858static ssize_t
3859qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3860 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3861{
3862 struct device *dev = container_of(kobj, struct device, kobj);
3863 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3864 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3865 struct qlcnic_pci_info *pci_info;
346fe763
RB
3866 int i, ret;
3867
3868 if (size != sizeof(pci_cfg))
3869 return QL_STATUS_INVALID_PARAM;
3870
e88db3bd
DC
3871 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3872 if (!pci_info)
3873 return -ENOMEM;
3874
346fe763 3875 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3876 if (ret) {
3877 kfree(pci_info);
346fe763 3878 return ret;
e88db3bd 3879 }
346fe763
RB
3880
3881 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3882 pci_cfg[i].pci_func = pci_info[i].id;
3883 pci_cfg[i].func_type = pci_info[i].type;
3884 pci_cfg[i].port_num = pci_info[i].default_port;
3885 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3886 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3887 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3888 }
3889 memcpy(buf, &pci_cfg, size);
e88db3bd 3890 kfree(pci_info);
346fe763 3891 return size;
346fe763
RB
3892}
3893static struct bin_attribute bin_attr_npar_config = {
3894 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3895 .size = 0,
3896 .read = qlcnic_sysfs_read_npar_config,
3897 .write = qlcnic_sysfs_write_npar_config,
3898};
3899
3900static struct bin_attribute bin_attr_pci_config = {
3901 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3902 .size = 0,
3903 .read = qlcnic_sysfs_read_pci_config,
3904 .write = NULL,
3905};
3906
b6021212
AKS
3907static struct bin_attribute bin_attr_port_stats = {
3908 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3909 .size = 0,
3910 .read = qlcnic_sysfs_get_port_stats,
3911 .write = qlcnic_sysfs_clear_port_stats,
3912};
3913
3914static struct bin_attribute bin_attr_esw_stats = {
3915 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3916 .size = 0,
3917 .read = qlcnic_sysfs_get_esw_stats,
3918 .write = qlcnic_sysfs_clear_esw_stats,
3919};
3920
346fe763
RB
3921static struct bin_attribute bin_attr_esw_config = {
3922 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3923 .size = 0,
3924 .read = qlcnic_sysfs_read_esw_config,
3925 .write = qlcnic_sysfs_write_esw_config,
3926};
3927
3928static struct bin_attribute bin_attr_pm_config = {
3929 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3930 .size = 0,
3931 .read = qlcnic_sysfs_read_pm_config,
3932 .write = qlcnic_sysfs_write_pm_config,
3933};
3934
af19b491
AKS
3935static void
3936qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3937{
3938 struct device *dev = &adapter->pdev->dev;
3939
3940 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3941 if (device_create_file(dev, &dev_attr_bridged_mode))
3942 dev_warn(dev,
3943 "failed to create bridged_mode sysfs entry\n");
3944}
3945
3946static void
3947qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3948{
3949 struct device *dev = &adapter->pdev->dev;
3950
3951 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3952 device_remove_file(dev, &dev_attr_bridged_mode);
3953}
3954
3955static void
3956qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3957{
3958 struct device *dev = &adapter->pdev->dev;
3959
b6021212
AKS
3960 if (device_create_bin_file(dev, &bin_attr_port_stats))
3961 dev_info(dev, "failed to create port stats sysfs entry");
3962
132ff00a
AC
3963 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3964 return;
af19b491
AKS
3965 if (device_create_file(dev, &dev_attr_diag_mode))
3966 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3967 if (device_create_bin_file(dev, &bin_attr_crb))
3968 dev_info(dev, "failed to create crb sysfs entry\n");
3969 if (device_create_bin_file(dev, &bin_attr_mem))
3970 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
3971 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3972 return;
3973 if (device_create_bin_file(dev, &bin_attr_esw_config))
3974 dev_info(dev, "failed to create esw config sysfs entry");
3975 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3976 return;
3977 if (device_create_bin_file(dev, &bin_attr_pci_config))
3978 dev_info(dev, "failed to create pci config sysfs entry");
3979 if (device_create_bin_file(dev, &bin_attr_npar_config))
3980 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
3981 if (device_create_bin_file(dev, &bin_attr_pm_config))
3982 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
3983 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3984 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
3985}
3986
af19b491
AKS
3987static void
3988qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3989{
3990 struct device *dev = &adapter->pdev->dev;
3991
b6021212
AKS
3992 device_remove_bin_file(dev, &bin_attr_port_stats);
3993
132ff00a
AC
3994 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3995 return;
af19b491
AKS
3996 device_remove_file(dev, &dev_attr_diag_mode);
3997 device_remove_bin_file(dev, &bin_attr_crb);
3998 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
3999 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4000 return;
4001 device_remove_bin_file(dev, &bin_attr_esw_config);
4002 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
4003 return;
4004 device_remove_bin_file(dev, &bin_attr_pci_config);
4005 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4006 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4007 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4008}
4009
4010#ifdef CONFIG_INET
4011
4012#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4013
af19b491 4014static void
aec1e845
AKS
4015qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4016 struct net_device *dev, unsigned long event)
af19b491
AKS
4017{
4018 struct in_device *indev;
af19b491 4019
af19b491
AKS
4020 indev = in_dev_get(dev);
4021 if (!indev)
4022 return;
4023
4024 for_ifa(indev) {
4025 switch (event) {
4026 case NETDEV_UP:
4027 qlcnic_config_ipaddr(adapter,
4028 ifa->ifa_address, QLCNIC_IP_UP);
4029 break;
4030 case NETDEV_DOWN:
4031 qlcnic_config_ipaddr(adapter,
4032 ifa->ifa_address, QLCNIC_IP_DOWN);
4033 break;
4034 default:
4035 break;
4036 }
4037 } endfor_ifa(indev);
4038
4039 in_dev_put(indev);
af19b491
AKS
4040}
4041
aec1e845
AKS
4042static void
4043qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4044{
4045 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4046 struct net_device *dev;
4047 u16 vid;
4048
4049 qlcnic_config_indev_addr(adapter, netdev, event);
4050
4051 if (!adapter->vlgrp)
4052 return;
4053
b738127d 4054 for (vid = 0; vid < VLAN_N_VID; vid++) {
aec1e845
AKS
4055 dev = vlan_group_get_device(adapter->vlgrp, vid);
4056 if (!dev)
4057 continue;
4058
4059 qlcnic_config_indev_addr(adapter, dev, event);
4060 }
4061}
4062
af19b491
AKS
4063static int qlcnic_netdev_event(struct notifier_block *this,
4064 unsigned long event, void *ptr)
4065{
4066 struct qlcnic_adapter *adapter;
4067 struct net_device *dev = (struct net_device *)ptr;
4068
4069recheck:
4070 if (dev == NULL)
4071 goto done;
4072
4073 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4074 dev = vlan_dev_real_dev(dev);
4075 goto recheck;
4076 }
4077
4078 if (!is_qlcnic_netdev(dev))
4079 goto done;
4080
4081 adapter = netdev_priv(dev);
4082
4083 if (!adapter)
4084 goto done;
4085
8a15ad1f 4086 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4087 goto done;
4088
aec1e845 4089 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4090done:
4091 return NOTIFY_DONE;
4092}
4093
4094static int
4095qlcnic_inetaddr_event(struct notifier_block *this,
4096 unsigned long event, void *ptr)
4097{
4098 struct qlcnic_adapter *adapter;
4099 struct net_device *dev;
4100
4101 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4102
4103 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4104
4105recheck:
aec1e845 4106 if (dev == NULL)
af19b491
AKS
4107 goto done;
4108
4109 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4110 dev = vlan_dev_real_dev(dev);
4111 goto recheck;
4112 }
4113
4114 if (!is_qlcnic_netdev(dev))
4115 goto done;
4116
4117 adapter = netdev_priv(dev);
4118
251a84c9 4119 if (!adapter)
af19b491
AKS
4120 goto done;
4121
8a15ad1f 4122 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4123 goto done;
4124
4125 switch (event) {
4126 case NETDEV_UP:
4127 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4128 break;
4129 case NETDEV_DOWN:
4130 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4131 break;
4132 default:
4133 break;
4134 }
4135
4136done:
4137 return NOTIFY_DONE;
4138}
4139
4140static struct notifier_block qlcnic_netdev_cb = {
4141 .notifier_call = qlcnic_netdev_event,
4142};
4143
4144static struct notifier_block qlcnic_inetaddr_cb = {
4145 .notifier_call = qlcnic_inetaddr_event,
4146};
4147#else
4148static void
aec1e845 4149qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4150{ }
4151#endif
451724c8
SC
4152static struct pci_error_handlers qlcnic_err_handler = {
4153 .error_detected = qlcnic_io_error_detected,
4154 .slot_reset = qlcnic_io_slot_reset,
4155 .resume = qlcnic_io_resume,
4156};
af19b491
AKS
4157
4158static struct pci_driver qlcnic_driver = {
4159 .name = qlcnic_driver_name,
4160 .id_table = qlcnic_pci_tbl,
4161 .probe = qlcnic_probe,
4162 .remove = __devexit_p(qlcnic_remove),
4163#ifdef CONFIG_PM
4164 .suspend = qlcnic_suspend,
4165 .resume = qlcnic_resume,
4166#endif
451724c8
SC
4167 .shutdown = qlcnic_shutdown,
4168 .err_handler = &qlcnic_err_handler
4169
af19b491
AKS
4170};
4171
4172static int __init qlcnic_init_module(void)
4173{
0cf3a14c 4174 int ret;
af19b491
AKS
4175
4176 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4177
f7ec804a
AKS
4178 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4179 if (qlcnic_wq == NULL) {
4180 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4181 return -ENOMEM;
4182 }
4183
af19b491
AKS
4184#ifdef CONFIG_INET
4185 register_netdevice_notifier(&qlcnic_netdev_cb);
4186 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4187#endif
4188
0cf3a14c
AKS
4189 ret = pci_register_driver(&qlcnic_driver);
4190 if (ret) {
4191#ifdef CONFIG_INET
4192 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4193 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4194#endif
f7ec804a 4195 destroy_workqueue(qlcnic_wq);
0cf3a14c 4196 }
af19b491 4197
0cf3a14c 4198 return ret;
af19b491
AKS
4199}
4200
4201module_init(qlcnic_init_module);
4202
4203static void __exit qlcnic_exit_module(void)
4204{
4205
4206 pci_unregister_driver(&qlcnic_driver);
4207
4208#ifdef CONFIG_INET
4209 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4210 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4211#endif
f7ec804a 4212 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4213}
4214
4215module_exit(qlcnic_exit_module);