drivers/net: return operator cleanup
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
451724c8 37#include <linux/aer.h>
af19b491 38
7f9a0c34 39MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
40MODULE_LICENSE("GPL");
41MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
43
44char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491
AKS
47
48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
49
50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5;
52
b5e5492c
AKS
53static int qlcnic_mac_learn;
54module_param(qlcnic_mac_learn, int, 0644);
55MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
56
af19b491
AKS
57static int use_msi = 1;
58module_param(use_msi, int, 0644);
59MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
60
61static int use_msi_x = 1;
62module_param(use_msi_x, int, 0644);
63MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
64
65static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
66module_param(auto_fw_reset, int, 0644);
67MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
68
4d5bdb38
AKS
69static int load_fw_file;
70module_param(load_fw_file, int, 0644);
71MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
72
2e9d722d
AC
73static int qlcnic_config_npars;
74module_param(qlcnic_config_npars, int, 0644);
75MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
76
af19b491
AKS
77static int __devinit qlcnic_probe(struct pci_dev *pdev,
78 const struct pci_device_id *ent);
79static void __devexit qlcnic_remove(struct pci_dev *pdev);
80static int qlcnic_open(struct net_device *netdev);
81static int qlcnic_close(struct net_device *netdev);
af19b491 82static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
83static void qlcnic_attach_work(struct work_struct *work);
84static void qlcnic_fwinit_work(struct work_struct *work);
85static void qlcnic_fw_poll_work(struct work_struct *work);
86static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
87 work_func_t func, int delay);
88static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
89static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 90static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
91#ifdef CONFIG_NET_POLL_CONTROLLER
92static void qlcnic_poll_controller(struct net_device *netdev);
93#endif
94
95static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
96static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
97static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
98static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
99
6df900e9 100static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 101static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
102static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
103
7eb9855d 104static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
105static irqreturn_t qlcnic_intr(int irq, void *data);
106static irqreturn_t qlcnic_msi_intr(int irq, void *data);
107static irqreturn_t qlcnic_msix_intr(int irq, void *data);
108
109static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 110static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
111static int qlcnic_start_firmware(struct qlcnic_adapter *);
112
b5e5492c
AKS
113static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
114static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 115static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
116static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
117static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
118static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
119static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
120 struct qlcnic_esw_func_cfg *);
af19b491
AKS
121/* PCI Device ID Table */
122#define ENTRY(device) \
123 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
124 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
125
126#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
127
6a902881 128static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
129 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
130 {0,}
131};
132
133MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
134
135
136void
137qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
138 struct qlcnic_host_tx_ring *tx_ring)
139{
140 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
141}
142
143static const u32 msi_tgt_status[8] = {
144 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
145 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
146 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
147 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
148};
149
150static const
151struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
152
153static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
154{
155 writel(0, sds_ring->crb_intr_mask);
156}
157
158static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
159{
160 struct qlcnic_adapter *adapter = sds_ring->adapter;
161
162 writel(0x1, sds_ring->crb_intr_mask);
163
164 if (!QLCNIC_IS_MSI_FAMILY(adapter))
165 writel(0xfbff, adapter->tgt_mask_reg);
166}
167
168static int
169qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
170{
171 int size = sizeof(struct qlcnic_host_sds_ring) * count;
172
173 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
174
807540ba 175 return recv_ctx->sds_rings == NULL;
af19b491
AKS
176}
177
178static void
179qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
180{
181 if (recv_ctx->sds_rings != NULL)
182 kfree(recv_ctx->sds_rings);
183
184 recv_ctx->sds_rings = NULL;
185}
186
187static int
188qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
189{
190 int ring;
191 struct qlcnic_host_sds_ring *sds_ring;
192 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
193
194 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
195 return -ENOMEM;
196
197 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
198 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 199
200 if (ring == adapter->max_sds_rings - 1)
201 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
202 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
203 else
204 netif_napi_add(netdev, &sds_ring->napi,
205 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
206 }
207
208 return 0;
209}
210
211static void
212qlcnic_napi_del(struct qlcnic_adapter *adapter)
213{
214 int ring;
215 struct qlcnic_host_sds_ring *sds_ring;
216 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
217
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 netif_napi_del(&sds_ring->napi);
221 }
222
223 qlcnic_free_sds_rings(&adapter->recv_ctx);
224}
225
226static void
227qlcnic_napi_enable(struct qlcnic_adapter *adapter)
228{
229 int ring;
230 struct qlcnic_host_sds_ring *sds_ring;
231 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
232
780ab790
AKS
233 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
234 return;
235
af19b491
AKS
236 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
237 sds_ring = &recv_ctx->sds_rings[ring];
238 napi_enable(&sds_ring->napi);
239 qlcnic_enable_int(sds_ring);
240 }
241}
242
243static void
244qlcnic_napi_disable(struct qlcnic_adapter *adapter)
245{
246 int ring;
247 struct qlcnic_host_sds_ring *sds_ring;
248 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
249
780ab790
AKS
250 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
251 return;
252
af19b491
AKS
253 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
254 sds_ring = &recv_ctx->sds_rings[ring];
255 qlcnic_disable_int(sds_ring);
256 napi_synchronize(&sds_ring->napi);
257 napi_disable(&sds_ring->napi);
258 }
259}
260
261static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
262{
263 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
264}
265
af19b491
AKS
266static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
267{
268 u32 val, data;
269
270 val = adapter->ahw.board_type;
271 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
272 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
273 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
274 data = QLCNIC_PORT_MODE_802_3_AP;
275 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
276 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
277 data = QLCNIC_PORT_MODE_XG;
278 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
279 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
280 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
281 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
283 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
284 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
285 } else {
286 data = QLCNIC_PORT_MODE_AUTO_NEG;
287 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
288 }
289
290 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
291 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
292 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
293 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
294 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
295 }
296 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
297 }
298}
299
300static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
301{
302 u32 control;
303 int pos;
304
305 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
306 if (pos) {
307 pci_read_config_dword(pdev, pos, &control);
308 if (enable)
309 control |= PCI_MSIX_FLAGS_ENABLE;
310 else
311 control = 0;
312 pci_write_config_dword(pdev, pos, control);
313 }
314}
315
316static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
317{
318 int i;
319
320 for (i = 0; i < count; i++)
321 adapter->msix_entries[i].entry = i;
322}
323
324static int
325qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
326{
2e9d722d 327 u8 mac_addr[ETH_ALEN];
af19b491
AKS
328 struct net_device *netdev = adapter->netdev;
329 struct pci_dev *pdev = adapter->pdev;
330
da48e6c3 331 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
332 return -EIO;
333
2e9d722d 334 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
335 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
336 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
337
338 /* set station address */
339
340 if (!is_valid_ether_addr(netdev->perm_addr))
341 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
342 netdev->dev_addr);
343
344 return 0;
345}
346
347static int qlcnic_set_mac(struct net_device *netdev, void *p)
348{
349 struct qlcnic_adapter *adapter = netdev_priv(netdev);
350 struct sockaddr *addr = p;
351
7373373d
RB
352 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
353 return -EOPNOTSUPP;
354
af19b491
AKS
355 if (!is_valid_ether_addr(addr->sa_data))
356 return -EINVAL;
357
8a15ad1f 358 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
359 netif_device_detach(netdev);
360 qlcnic_napi_disable(adapter);
361 }
362
363 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
364 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
365 qlcnic_set_multi(adapter->netdev);
366
8a15ad1f 367 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
368 netif_device_attach(netdev);
369 qlcnic_napi_enable(adapter);
370 }
371 return 0;
372}
373
d5790663
AKS
374static void qlcnic_vlan_rx_register(struct net_device *netdev,
375 struct vlan_group *grp)
376{
377 struct qlcnic_adapter *adapter = netdev_priv(netdev);
378 adapter->vlgrp = grp;
379}
380
af19b491
AKS
381static const struct net_device_ops qlcnic_netdev_ops = {
382 .ndo_open = qlcnic_open,
383 .ndo_stop = qlcnic_close,
384 .ndo_start_xmit = qlcnic_xmit_frame,
385 .ndo_get_stats = qlcnic_get_stats,
386 .ndo_validate_addr = eth_validate_addr,
387 .ndo_set_multicast_list = qlcnic_set_multi,
388 .ndo_set_mac_address = qlcnic_set_mac,
389 .ndo_change_mtu = qlcnic_change_mtu,
390 .ndo_tx_timeout = qlcnic_tx_timeout,
d5790663 391 .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
af19b491
AKS
392#ifdef CONFIG_NET_POLL_CONTROLLER
393 .ndo_poll_controller = qlcnic_poll_controller,
394#endif
395};
396
2e9d722d 397static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
398 .config_bridged_mode = qlcnic_config_bridged_mode,
399 .config_led = qlcnic_config_led,
9f26f547
AC
400 .start_firmware = qlcnic_start_firmware
401};
402
403static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
404 .config_bridged_mode = qlcnicvf_config_bridged_mode,
405 .config_led = qlcnicvf_config_led,
9f26f547 406 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
407};
408
af19b491
AKS
409static void
410qlcnic_setup_intr(struct qlcnic_adapter *adapter)
411{
412 const struct qlcnic_legacy_intr_set *legacy_intrp;
413 struct pci_dev *pdev = adapter->pdev;
414 int err, num_msix;
415
416 if (adapter->rss_supported) {
417 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
418 MSIX_ENTRIES_PER_ADAPTER : 2;
419 } else
420 num_msix = 1;
421
422 adapter->max_sds_rings = 1;
423
424 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
425
426 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
427
428 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
429 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
430 legacy_intrp->tgt_status_reg);
431 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
432 legacy_intrp->tgt_mask_reg);
433 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
434
435 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
436 ISR_INT_STATE_REG);
437
438 qlcnic_set_msix_bit(pdev, 0);
439
440 if (adapter->msix_supported) {
441
442 qlcnic_init_msix_entries(adapter, num_msix);
443 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
444 if (err == 0) {
445 adapter->flags |= QLCNIC_MSIX_ENABLED;
446 qlcnic_set_msix_bit(pdev, 1);
447
448 if (adapter->rss_supported)
449 adapter->max_sds_rings = num_msix;
450
451 dev_info(&pdev->dev, "using msi-x interrupts\n");
452 return;
453 }
454
455 if (err > 0)
456 pci_disable_msix(pdev);
457
458 /* fall through for msi */
459 }
460
461 if (use_msi && !pci_enable_msi(pdev)) {
462 adapter->flags |= QLCNIC_MSI_ENABLED;
463 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
464 msi_tgt_status[adapter->ahw.pci_func]);
465 dev_info(&pdev->dev, "using msi interrupts\n");
466 adapter->msix_entries[0].vector = pdev->irq;
467 return;
468 }
469
470 dev_info(&pdev->dev, "using legacy interrupts\n");
471 adapter->msix_entries[0].vector = pdev->irq;
472}
473
474static void
475qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
476{
477 if (adapter->flags & QLCNIC_MSIX_ENABLED)
478 pci_disable_msix(adapter->pdev);
479 if (adapter->flags & QLCNIC_MSI_ENABLED)
480 pci_disable_msi(adapter->pdev);
481}
482
483static void
484qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
485{
486 if (adapter->ahw.pci_base0 != NULL)
487 iounmap(adapter->ahw.pci_base0);
488}
489
346fe763
RB
490static int
491qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
492{
e88db3bd 493 struct qlcnic_pci_info *pci_info;
900853a4 494 int i, ret = 0;
346fe763
RB
495 u8 pfn;
496
e88db3bd
DC
497 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
498 if (!pci_info)
499 return -ENOMEM;
500
ca315ac2 501 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 502 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 503 if (!adapter->npars) {
900853a4 504 ret = -ENOMEM;
e88db3bd
DC
505 goto err_pci_info;
506 }
346fe763 507
ca315ac2 508 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
509 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
510 if (!adapter->eswitch) {
900853a4 511 ret = -ENOMEM;
ca315ac2 512 goto err_npars;
346fe763
RB
513 }
514
515 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
516 if (ret)
517 goto err_eswitch;
346fe763 518
ca315ac2
DC
519 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
520 pfn = pci_info[i].id;
521 if (pfn > QLCNIC_MAX_PCI_FUNC)
522 return QL_STATUS_INVALID_PARAM;
523 adapter->npars[pfn].active = pci_info[i].active;
524 adapter->npars[pfn].type = pci_info[i].type;
525 adapter->npars[pfn].phy_port = pci_info[i].default_port;
ca315ac2
DC
526 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
527 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
528 }
529
ca315ac2
DC
530 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
531 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
532
e88db3bd 533 kfree(pci_info);
ca315ac2
DC
534 return 0;
535
536err_eswitch:
346fe763
RB
537 kfree(adapter->eswitch);
538 adapter->eswitch = NULL;
ca315ac2 539err_npars:
346fe763 540 kfree(adapter->npars);
ca315ac2 541 adapter->npars = NULL;
e88db3bd
DC
542err_pci_info:
543 kfree(pci_info);
346fe763
RB
544
545 return ret;
546}
547
2e9d722d
AC
548static int
549qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
550{
551 u8 id;
552 u32 ref_count;
553 int i, ret = 1;
554 u32 data = QLCNIC_MGMT_FUNC;
555 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
556
557 /* If other drivers are not in use set their privilege level */
31018e06 558 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
559 ret = qlcnic_api_lock(adapter);
560 if (ret)
561 goto err_lock;
2e9d722d 562
0e33c664
AC
563 if (qlcnic_config_npars) {
564 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 565 id = i;
0e33c664
AC
566 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
567 id == adapter->ahw.pci_func)
568 continue;
569 data |= (qlcnic_config_npars &
570 QLC_DEV_SET_DRV(0xf, id));
571 }
572 } else {
573 data = readl(priv_op);
574 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
575 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
576 adapter->ahw.pci_func));
2e9d722d
AC
577 }
578 writel(data, priv_op);
2e9d722d
AC
579 qlcnic_api_unlock(adapter);
580err_lock:
581 return ret;
582}
583
0866d96d
AC
584static void
585qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
586{
587 void __iomem *msix_base_addr;
588 void __iomem *priv_op;
589 u32 func;
590 u32 msix_base;
591 u32 op_mode, priv_level;
592
593 /* Determine FW API version */
594 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
595
596 /* Find PCI function number */
597 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
598 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
599 msix_base = readl(msix_base_addr);
600 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
601 adapter->ahw.pci_func = func;
602
603 /* Determine function privilege level */
604 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
605 op_mode = readl(priv_op);
0e33c664 606 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 607 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 608 else
2e9d722d
AC
609 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
610
0866d96d 611 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
612 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
613 dev_info(&adapter->pdev->dev,
614 "HAL Version: %d Non Privileged function\n",
615 adapter->fw_hal_version);
616 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
617 } else
618 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
619}
620
af19b491
AKS
621static int
622qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
623{
624 void __iomem *mem_ptr0 = NULL;
625 resource_size_t mem_base;
626 unsigned long mem_len, pci_len0 = 0;
627
628 struct pci_dev *pdev = adapter->pdev;
af19b491 629
af19b491
AKS
630 /* remap phys address */
631 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
632 mem_len = pci_resource_len(pdev, 0);
633
634 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
635
636 mem_ptr0 = pci_ioremap_bar(pdev, 0);
637 if (mem_ptr0 == NULL) {
638 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
639 return -EIO;
640 }
641 pci_len0 = mem_len;
642 } else {
643 return -EIO;
644 }
645
646 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
647
648 adapter->ahw.pci_base0 = mem_ptr0;
649 adapter->ahw.pci_len0 = pci_len0;
650
0866d96d 651 qlcnic_check_vf(adapter);
2e9d722d 652
af19b491 653 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 654 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
655
656 return 0;
657}
658
659static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
660{
661 struct pci_dev *pdev = adapter->pdev;
662 int i, found = 0;
663
664 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
665 if (qlcnic_boards[i].vendor == pdev->vendor &&
666 qlcnic_boards[i].device == pdev->device &&
667 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
668 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
669 sprintf(name, "%pM: %s" ,
670 adapter->mac_addr,
671 qlcnic_boards[i].short_name);
af19b491
AKS
672 found = 1;
673 break;
674 }
675
676 }
677
678 if (!found)
7f9a0c34 679 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
680}
681
682static void
683qlcnic_check_options(struct qlcnic_adapter *adapter)
684{
685 u32 fw_major, fw_minor, fw_build;
af19b491 686 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
687
688 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
689 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
690 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
691
692 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
693
251a84c9
AKS
694 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
695 fw_major, fw_minor, fw_build);
af19b491 696
af19b491
AKS
697 adapter->flags &= ~QLCNIC_LRO_ENABLED;
698
699 if (adapter->ahw.port_type == QLCNIC_XGBE) {
700 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
701 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
702 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
703 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
704 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
705 }
706
707 adapter->msix_supported = !!use_msi_x;
708 adapter->rss_supported = !!use_msi_x;
709
710 adapter->num_txd = MAX_CMD_DESCRIPTORS;
711
251b036a 712 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
713}
714
174240a8
RB
715static int
716qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
717{
718 int err;
719 struct qlcnic_info nic_info;
720
721 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
722 if (err)
723 return err;
724
725 adapter->physical_port = nic_info.phys_port;
726 adapter->switch_mode = nic_info.switch_mode;
727 adapter->max_tx_ques = nic_info.max_tx_ques;
728 adapter->max_rx_ques = nic_info.max_rx_ques;
729 adapter->capabilities = nic_info.capabilities;
730 adapter->max_mac_filters = nic_info.max_mac_filters;
731 adapter->max_mtu = nic_info.max_mtu;
732
733 if (adapter->capabilities & BIT_6)
734 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
735 else
736 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
737
738 return err;
739}
740
8cf61f89
AKS
741static void
742qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
743 struct qlcnic_esw_func_cfg *esw_cfg)
744{
745 if (esw_cfg->discard_tagged)
746 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
747 else
748 adapter->flags |= QLCNIC_TAGGING_ENABLED;
749
750 if (esw_cfg->vlan_id)
751 adapter->pvid = esw_cfg->vlan_id;
752 else
753 adapter->pvid = 0;
754}
755
0325d69b
RB
756static void
757qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
758 struct qlcnic_esw_func_cfg *esw_cfg)
759{
fe4d434d 760 adapter->flags &= ~QLCNIC_MACSPOOF;
7373373d 761 adapter->flags &= ~QLCNIC_MAC_OVERRIDE_DISABLED;
7613c87b
RB
762
763 if (esw_cfg->mac_anti_spoof)
764 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 765
7373373d
RB
766 if (!esw_cfg->mac_override)
767 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
768
0325d69b
RB
769 qlcnic_set_netdev_features(adapter, esw_cfg);
770}
771
772static int
773qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
774{
775 struct qlcnic_esw_func_cfg esw_cfg;
776
777 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
778 return 0;
779
780 esw_cfg.pci_func = adapter->ahw.pci_func;
781 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
782 return -EIO;
8cf61f89 783 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
784 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
785
786 return 0;
787}
788
789static void
790qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
791 struct qlcnic_esw_func_cfg *esw_cfg)
792{
793 struct net_device *netdev = adapter->netdev;
794 unsigned long features, vlan_features;
795
796 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
797 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
798 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
799 NETIF_F_IPV6_CSUM);
800
801 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
802 features |= (NETIF_F_TSO | NETIF_F_TSO6);
803 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
804 }
805 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
806 features |= NETIF_F_LRO;
807
808 if (esw_cfg->offload_flags & BIT_0) {
809 netdev->features |= features;
810 adapter->rx_csum = 1;
811 if (!(esw_cfg->offload_flags & BIT_1))
812 netdev->features &= ~NETIF_F_TSO;
813 if (!(esw_cfg->offload_flags & BIT_2))
814 netdev->features &= ~NETIF_F_TSO6;
815 } else {
816 netdev->features &= ~features;
817 adapter->rx_csum = 0;
818 }
819
820 netdev->vlan_features = (features & vlan_features);
821}
822
0866d96d
AC
823static int
824qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
825{
826 void __iomem *priv_op;
827 u32 op_mode, priv_level;
828 int err = 0;
829
174240a8
RB
830 err = qlcnic_initialize_nic(adapter);
831 if (err)
832 return err;
833
0866d96d
AC
834 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
835 return 0;
836
837 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
838 op_mode = readl(priv_op);
839 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
840
841 if (op_mode == QLC_DEV_DRV_DEFAULT)
842 priv_level = QLCNIC_MGMT_FUNC;
843 else
844 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
845
174240a8 846 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
847 if (priv_level == QLCNIC_MGMT_FUNC) {
848 adapter->op_mode = QLCNIC_MGMT_FUNC;
849 err = qlcnic_init_pci_info(adapter);
850 if (err)
851 return err;
852 /* Set privilege level for other functions */
853 qlcnic_set_function_modes(adapter);
854 dev_info(&adapter->pdev->dev,
855 "HAL Version: %d, Management function\n",
856 adapter->fw_hal_version);
857 } else if (priv_level == QLCNIC_PRIV_FUNC) {
858 adapter->op_mode = QLCNIC_PRIV_FUNC;
859 dev_info(&adapter->pdev->dev,
860 "HAL Version: %d, Privileged function\n",
861 adapter->fw_hal_version);
862 }
174240a8 863 }
0866d96d
AC
864
865 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
866
867 return err;
868}
869
0325d69b
RB
870static int
871qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
872{
873 struct qlcnic_esw_func_cfg esw_cfg;
874 struct qlcnic_npar_info *npar;
875 u8 i;
876
174240a8 877 if (adapter->need_fw_reset)
0325d69b
RB
878 return 0;
879
880 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
881 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
882 continue;
883 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
884 esw_cfg.pci_func = i;
885 esw_cfg.offload_flags = BIT_0;
7373373d 886 esw_cfg.mac_override = BIT_0;
0325d69b
RB
887 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
888 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
889 if (qlcnic_config_switch_port(adapter, &esw_cfg))
890 return -EIO;
891 npar = &adapter->npars[i];
892 npar->pvid = esw_cfg.vlan_id;
7373373d 893 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
894 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
895 npar->discard_tagged = esw_cfg.discard_tagged;
896 npar->promisc_mode = esw_cfg.promisc_mode;
897 npar->offload_flags = esw_cfg.offload_flags;
898 }
899
900 return 0;
901}
902
4e8acb01
RB
903static int
904qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
905 struct qlcnic_npar_info *npar, int pci_func)
906{
907 struct qlcnic_esw_func_cfg esw_cfg;
908 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
909 esw_cfg.pci_func = pci_func;
910 esw_cfg.vlan_id = npar->pvid;
7373373d 911 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
912 esw_cfg.discard_tagged = npar->discard_tagged;
913 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
914 esw_cfg.offload_flags = npar->offload_flags;
915 esw_cfg.promisc_mode = npar->promisc_mode;
916 if (qlcnic_config_switch_port(adapter, &esw_cfg))
917 return -EIO;
918
919 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
920 if (qlcnic_config_switch_port(adapter, &esw_cfg))
921 return -EIO;
922
923 return 0;
924}
925
cea8975e
AC
926static int
927qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
928{
4e8acb01 929 int i, err;
cea8975e
AC
930 struct qlcnic_npar_info *npar;
931 struct qlcnic_info nic_info;
932
174240a8 933 if (!adapter->need_fw_reset)
cea8975e
AC
934 return 0;
935
4e8acb01
RB
936 /* Set the NPAR config data after FW reset */
937 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
938 npar = &adapter->npars[i];
939 if (npar->type != QLCNIC_TYPE_NIC)
940 continue;
941 err = qlcnic_get_nic_info(adapter, &nic_info, i);
942 if (err)
943 return err;
944 nic_info.min_tx_bw = npar->min_bw;
945 nic_info.max_tx_bw = npar->max_bw;
946 err = qlcnic_set_nic_info(adapter, &nic_info);
947 if (err)
948 return err;
cea8975e 949
4e8acb01
RB
950 if (npar->enable_pm) {
951 err = qlcnic_config_port_mirroring(adapter,
952 npar->dest_npar, 1, i);
953 if (err)
954 return err;
cea8975e 955 }
4e8acb01
RB
956 err = qlcnic_reset_eswitch_config(adapter, npar, i);
957 if (err)
958 return err;
cea8975e 959 }
4e8acb01 960 return 0;
cea8975e
AC
961}
962
78f84e1a
AKS
963static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
964{
965 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
966 u32 npar_state;
967
968 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
969 return 0;
970
971 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
972 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
973 msleep(1000);
974 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
975 }
976 if (!npar_opt_timeo) {
977 dev_err(&adapter->pdev->dev,
978 "Waiting for NPAR state to opertional timeout\n");
979 return -EIO;
980 }
981 return 0;
982}
983
174240a8
RB
984static int
985qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
986{
987 int err;
988
989 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
990 adapter->op_mode != QLCNIC_MGMT_FUNC)
991 return 0;
992
993 err = qlcnic_set_default_offload_settings(adapter);
994 if (err)
995 return err;
996
997 err = qlcnic_reset_npar_config(adapter);
998 if (err)
999 return err;
1000
1001 qlcnic_dev_set_npar_ready(adapter);
1002
1003 return err;
1004}
1005
af19b491
AKS
1006static int
1007qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1008{
d4066833 1009 int err;
af19b491 1010
aa5e18c0
SC
1011 err = qlcnic_can_start_firmware(adapter);
1012 if (err < 0)
1013 return err;
1014 else if (!err)
d4066833 1015 goto check_fw_status;
af19b491 1016
4d5bdb38
AKS
1017 if (load_fw_file)
1018 qlcnic_request_firmware(adapter);
8f891387 1019 else {
8cfdce08
SC
1020 err = qlcnic_check_flash_fw_ver(adapter);
1021 if (err)
8f891387 1022 goto err_out;
1023
4d5bdb38 1024 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1025 }
af19b491
AKS
1026
1027 err = qlcnic_need_fw_reset(adapter);
af19b491 1028 if (err == 0)
4e70812b 1029 goto check_fw_status;
af19b491 1030
d4066833
SC
1031 err = qlcnic_pinit_from_rom(adapter);
1032 if (err)
1033 goto err_out;
af19b491
AKS
1034 qlcnic_set_port_mode(adapter);
1035
1036 err = qlcnic_load_firmware(adapter);
1037 if (err)
1038 goto err_out;
1039
1040 qlcnic_release_firmware(adapter);
d4066833 1041 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1042
d4066833
SC
1043check_fw_status:
1044 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1045 if (err)
1046 goto err_out;
1047
1048 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1049 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1050
0866d96d
AC
1051 err = qlcnic_check_eswitch_mode(adapter);
1052 if (err) {
1053 dev_err(&adapter->pdev->dev,
1054 "Memory allocation failed for eswitch\n");
1055 goto err_out;
1056 }
174240a8
RB
1057 err = qlcnic_set_mgmt_operations(adapter);
1058 if (err)
1059 goto err_out;
1060
1061 qlcnic_check_options(adapter);
af19b491
AKS
1062 adapter->need_fw_reset = 0;
1063
a7fc948f
AKS
1064 qlcnic_release_firmware(adapter);
1065 return 0;
af19b491
AKS
1066
1067err_out:
a7fc948f
AKS
1068 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1069 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1070
af19b491
AKS
1071 qlcnic_release_firmware(adapter);
1072 return err;
1073}
1074
1075static int
1076qlcnic_request_irq(struct qlcnic_adapter *adapter)
1077{
1078 irq_handler_t handler;
1079 struct qlcnic_host_sds_ring *sds_ring;
1080 int err, ring;
1081
1082 unsigned long flags = 0;
1083 struct net_device *netdev = adapter->netdev;
1084 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1085
7eb9855d
AKS
1086 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1087 handler = qlcnic_tmp_intr;
1088 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1089 flags |= IRQF_SHARED;
1090
1091 } else {
1092 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1093 handler = qlcnic_msix_intr;
1094 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1095 handler = qlcnic_msi_intr;
1096 else {
1097 flags |= IRQF_SHARED;
1098 handler = qlcnic_intr;
1099 }
af19b491
AKS
1100 }
1101 adapter->irq = netdev->irq;
1102
1103 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1104 sds_ring = &recv_ctx->sds_rings[ring];
1105 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1106 err = request_irq(sds_ring->irq, handler,
1107 flags, sds_ring->name, sds_ring);
1108 if (err)
1109 return err;
1110 }
1111
1112 return 0;
1113}
1114
1115static void
1116qlcnic_free_irq(struct qlcnic_adapter *adapter)
1117{
1118 int ring;
1119 struct qlcnic_host_sds_ring *sds_ring;
1120
1121 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1122
1123 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1124 sds_ring = &recv_ctx->sds_rings[ring];
1125 free_irq(sds_ring->irq, sds_ring);
1126 }
1127}
1128
1129static void
1130qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1131{
1132 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1133 adapter->coal.normal.data.rx_time_us =
1134 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1135 adapter->coal.normal.data.rx_packets =
1136 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1137 adapter->coal.normal.data.tx_time_us =
1138 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1139 adapter->coal.normal.data.tx_packets =
1140 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1141}
1142
1143static int
1144__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1145{
8a15ad1f
AKS
1146 int ring;
1147 struct qlcnic_host_rds_ring *rds_ring;
1148
af19b491
AKS
1149 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1150 return -EIO;
1151
8a15ad1f
AKS
1152 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1153 return 0;
0325d69b
RB
1154 if (qlcnic_set_eswitch_port_config(adapter))
1155 return -EIO;
8a15ad1f
AKS
1156
1157 if (qlcnic_fw_create_ctx(adapter))
1158 return -EIO;
1159
1160 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1161 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1162 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1163 }
1164
af19b491
AKS
1165 qlcnic_set_multi(netdev);
1166 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1167
1168 adapter->ahw.linkup = 0;
1169
1170 if (adapter->max_sds_rings > 1)
1171 qlcnic_config_rss(adapter, 1);
1172
1173 qlcnic_config_intr_coalesce(adapter);
1174
24763d80 1175 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1176 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1177
1178 qlcnic_napi_enable(adapter);
1179
1180 qlcnic_linkevent_request(adapter, 1);
1181
68bf1c68 1182 adapter->reset_context = 0;
af19b491
AKS
1183 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1184 return 0;
1185}
1186
1187/* Usage: During resume and firmware recovery module.*/
1188
1189static int
1190qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1191{
1192 int err = 0;
1193
1194 rtnl_lock();
1195 if (netif_running(netdev))
1196 err = __qlcnic_up(adapter, netdev);
1197 rtnl_unlock();
1198
1199 return err;
1200}
1201
1202static void
1203__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1204{
1205 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1206 return;
1207
1208 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1209 return;
1210
1211 smp_mb();
1212 spin_lock(&adapter->tx_clean_lock);
1213 netif_carrier_off(netdev);
1214 netif_tx_disable(netdev);
1215
1216 qlcnic_free_mac_list(adapter);
1217
b5e5492c
AKS
1218 if (adapter->fhash.fnum)
1219 qlcnic_delete_lb_filters(adapter);
1220
af19b491
AKS
1221 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1222
1223 qlcnic_napi_disable(adapter);
1224
8a15ad1f
AKS
1225 qlcnic_fw_destroy_ctx(adapter);
1226
1227 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1228 qlcnic_release_tx_buffers(adapter);
1229 spin_unlock(&adapter->tx_clean_lock);
1230}
1231
1232/* Usage: During suspend and firmware recovery module */
1233
1234static void
1235qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1236{
1237 rtnl_lock();
1238 if (netif_running(netdev))
1239 __qlcnic_down(adapter, netdev);
1240 rtnl_unlock();
1241
1242}
1243
1244static int
1245qlcnic_attach(struct qlcnic_adapter *adapter)
1246{
1247 struct net_device *netdev = adapter->netdev;
1248 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1249 int err;
af19b491
AKS
1250
1251 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1252 return 0;
1253
af19b491
AKS
1254 err = qlcnic_napi_add(adapter, netdev);
1255 if (err)
1256 return err;
1257
1258 err = qlcnic_alloc_sw_resources(adapter);
1259 if (err) {
1260 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1261 goto err_out_napi_del;
af19b491
AKS
1262 }
1263
1264 err = qlcnic_alloc_hw_resources(adapter);
1265 if (err) {
1266 dev_err(&pdev->dev, "Error in setting hw resources\n");
1267 goto err_out_free_sw;
1268 }
1269
af19b491
AKS
1270 err = qlcnic_request_irq(adapter);
1271 if (err) {
1272 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1273 goto err_out_free_hw;
af19b491
AKS
1274 }
1275
1276 qlcnic_init_coalesce_defaults(adapter);
1277
1278 qlcnic_create_sysfs_entries(adapter);
1279
1280 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1281 return 0;
1282
8a15ad1f 1283err_out_free_hw:
af19b491
AKS
1284 qlcnic_free_hw_resources(adapter);
1285err_out_free_sw:
1286 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1287err_out_napi_del:
1288 qlcnic_napi_del(adapter);
af19b491
AKS
1289 return err;
1290}
1291
1292static void
1293qlcnic_detach(struct qlcnic_adapter *adapter)
1294{
1295 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1296 return;
1297
1298 qlcnic_remove_sysfs_entries(adapter);
1299
1300 qlcnic_free_hw_resources(adapter);
1301 qlcnic_release_rx_buffers(adapter);
1302 qlcnic_free_irq(adapter);
1303 qlcnic_napi_del(adapter);
1304 qlcnic_free_sw_resources(adapter);
1305
1306 adapter->is_up = 0;
1307}
1308
7eb9855d
AKS
1309void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1310{
1311 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1312 struct qlcnic_host_sds_ring *sds_ring;
1313 int ring;
1314
78ad3892 1315 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1316 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1317 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1318 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1319 qlcnic_disable_int(sds_ring);
1320 }
7eb9855d
AKS
1321 }
1322
8a15ad1f
AKS
1323 qlcnic_fw_destroy_ctx(adapter);
1324
7eb9855d
AKS
1325 qlcnic_detach(adapter);
1326
1327 adapter->diag_test = 0;
1328 adapter->max_sds_rings = max_sds_rings;
1329
1330 if (qlcnic_attach(adapter))
34ce3626 1331 goto out;
7eb9855d
AKS
1332
1333 if (netif_running(netdev))
1334 __qlcnic_up(adapter, netdev);
34ce3626 1335out:
7eb9855d
AKS
1336 netif_device_attach(netdev);
1337}
1338
1339int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1340{
1341 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1342 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1343 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1344 int ring;
1345 int ret;
1346
1347 netif_device_detach(netdev);
1348
1349 if (netif_running(netdev))
1350 __qlcnic_down(adapter, netdev);
1351
1352 qlcnic_detach(adapter);
1353
1354 adapter->max_sds_rings = 1;
1355 adapter->diag_test = test;
1356
1357 ret = qlcnic_attach(adapter);
34ce3626
AKS
1358 if (ret) {
1359 netif_device_attach(netdev);
7eb9855d 1360 return ret;
34ce3626 1361 }
7eb9855d 1362
8a15ad1f
AKS
1363 ret = qlcnic_fw_create_ctx(adapter);
1364 if (ret) {
1365 qlcnic_detach(adapter);
57e46248 1366 netif_device_attach(netdev);
8a15ad1f
AKS
1367 return ret;
1368 }
1369
1370 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1371 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1372 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1373 }
1374
cdaff185
AKS
1375 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1376 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1377 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1378 qlcnic_enable_int(sds_ring);
1379 }
7eb9855d 1380 }
78ad3892 1381 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1382
1383 return 0;
1384}
1385
68bf1c68
AKS
1386/* Reset context in hardware only */
1387static int
1388qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1389{
1390 struct net_device *netdev = adapter->netdev;
1391
1392 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1393 return -EBUSY;
1394
1395 netif_device_detach(netdev);
1396
1397 qlcnic_down(adapter, netdev);
1398
1399 qlcnic_up(adapter, netdev);
1400
1401 netif_device_attach(netdev);
1402
1403 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1404 return 0;
1405}
1406
af19b491
AKS
1407int
1408qlcnic_reset_context(struct qlcnic_adapter *adapter)
1409{
1410 int err = 0;
1411 struct net_device *netdev = adapter->netdev;
1412
1413 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1414 return -EBUSY;
1415
1416 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1417
1418 netif_device_detach(netdev);
1419
1420 if (netif_running(netdev))
1421 __qlcnic_down(adapter, netdev);
1422
1423 qlcnic_detach(adapter);
1424
1425 if (netif_running(netdev)) {
1426 err = qlcnic_attach(adapter);
1427 if (!err)
34ce3626 1428 __qlcnic_up(adapter, netdev);
af19b491
AKS
1429 }
1430
1431 netif_device_attach(netdev);
1432 }
1433
af19b491
AKS
1434 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1435 return err;
1436}
1437
1438static int
1439qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1440 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1441{
1442 int err;
1443 struct pci_dev *pdev = adapter->pdev;
1444
1445 adapter->rx_csum = 1;
1446 adapter->mc_enabled = 0;
1447 adapter->max_mc_count = 38;
1448
1449 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1450 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1451
1452 qlcnic_change_mtu(netdev, netdev->mtu);
1453
1454 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1455
2e9d722d 1456 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
d5790663 1457 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
2e9d722d 1458 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1459 NETIF_F_IPV6_CSUM);
1460
1461 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1462 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1463 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1464 }
af19b491 1465
1bb09fb9 1466 if (pci_using_dac) {
af19b491
AKS
1467 netdev->features |= NETIF_F_HIGHDMA;
1468 netdev->vlan_features |= NETIF_F_HIGHDMA;
1469 }
1470
1471 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1472 netdev->features |= (NETIF_F_HW_VLAN_TX);
1473
1474 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1475 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1476 netdev->irq = adapter->msix_entries[0].vector;
1477
af19b491
AKS
1478 netif_carrier_off(netdev);
1479 netif_stop_queue(netdev);
1480
1481 err = register_netdev(netdev);
1482 if (err) {
1483 dev_err(&pdev->dev, "failed to register net device\n");
1484 return err;
1485 }
1486
1487 return 0;
1488}
1489
1bb09fb9
AKS
1490static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1491{
1492 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1493 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1494 *pci_using_dac = 1;
1495 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1496 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1497 *pci_using_dac = 0;
1498 else {
1499 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1500 return -EIO;
1501 }
1502
1503 return 0;
1504}
1505
af19b491
AKS
1506static int __devinit
1507qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1508{
1509 struct net_device *netdev = NULL;
1510 struct qlcnic_adapter *adapter = NULL;
1511 int err;
af19b491 1512 uint8_t revision_id;
1bb09fb9 1513 uint8_t pci_using_dac;
da48e6c3 1514 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1515
1516 err = pci_enable_device(pdev);
1517 if (err)
1518 return err;
1519
1520 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1521 err = -ENODEV;
1522 goto err_out_disable_pdev;
1523 }
1524
1bb09fb9
AKS
1525 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1526 if (err)
1527 goto err_out_disable_pdev;
1528
af19b491
AKS
1529 err = pci_request_regions(pdev, qlcnic_driver_name);
1530 if (err)
1531 goto err_out_disable_pdev;
1532
1533 pci_set_master(pdev);
451724c8 1534 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1535
1536 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1537 if (!netdev) {
1538 dev_err(&pdev->dev, "failed to allocate net_device\n");
1539 err = -ENOMEM;
1540 goto err_out_free_res;
1541 }
1542
1543 SET_NETDEV_DEV(netdev, &pdev->dev);
1544
1545 adapter = netdev_priv(netdev);
1546 adapter->netdev = netdev;
1547 adapter->pdev = pdev;
6df900e9 1548 adapter->dev_rst_time = jiffies;
af19b491
AKS
1549
1550 revision_id = pdev->revision;
1551 adapter->ahw.revision_id = revision_id;
1552
1553 rwlock_init(&adapter->ahw.crb_lock);
1554 mutex_init(&adapter->ahw.mem_lock);
1555
1556 spin_lock_init(&adapter->tx_clean_lock);
1557 INIT_LIST_HEAD(&adapter->mac_list);
1558
1559 err = qlcnic_setup_pci_map(adapter);
1560 if (err)
1561 goto err_out_free_netdev;
1562
1563 /* This will be reset for mezz cards */
2e9d722d 1564 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1565
1566 err = qlcnic_get_board_info(adapter);
1567 if (err) {
1568 dev_err(&pdev->dev, "Error getting board config info.\n");
1569 goto err_out_iounmap;
1570 }
1571
8cfdce08
SC
1572 err = qlcnic_setup_idc_param(adapter);
1573 if (err)
b3a24649 1574 goto err_out_iounmap;
af19b491 1575
9f26f547 1576 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1577 if (err) {
1578 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1579 goto err_out_decr_ref;
a7fc948f 1580 }
af19b491 1581
da48e6c3
RB
1582 if (qlcnic_read_mac_addr(adapter))
1583 dev_warn(&pdev->dev, "failed to read mac addr\n");
1584
1585 if (adapter->portnum == 0) {
1586 get_brd_name(adapter, brd_name);
1587
1588 pr_info("%s: %s Board Chip rev 0x%x\n",
1589 module_name(THIS_MODULE),
1590 brd_name, adapter->ahw.revision_id);
1591 }
1592
af19b491
AKS
1593 qlcnic_clear_stats(adapter);
1594
1595 qlcnic_setup_intr(adapter);
1596
1bb09fb9 1597 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1598 if (err)
1599 goto err_out_disable_msi;
1600
1601 pci_set_drvdata(pdev, adapter);
1602
1603 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1604
1605 switch (adapter->ahw.port_type) {
1606 case QLCNIC_GBE:
1607 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1608 adapter->netdev->name);
1609 break;
1610 case QLCNIC_XGBE:
1611 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1612 adapter->netdev->name);
1613 break;
1614 }
1615
b5e5492c 1616 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1617 qlcnic_create_diag_entries(adapter);
1618
1619 return 0;
1620
1621err_out_disable_msi:
1622 qlcnic_teardown_intr(adapter);
1623
1624err_out_decr_ref:
21854f02 1625 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1626
1627err_out_iounmap:
1628 qlcnic_cleanup_pci_map(adapter);
1629
1630err_out_free_netdev:
1631 free_netdev(netdev);
1632
1633err_out_free_res:
1634 pci_release_regions(pdev);
1635
1636err_out_disable_pdev:
1637 pci_set_drvdata(pdev, NULL);
1638 pci_disable_device(pdev);
1639 return err;
1640}
1641
1642static void __devexit qlcnic_remove(struct pci_dev *pdev)
1643{
1644 struct qlcnic_adapter *adapter;
1645 struct net_device *netdev;
1646
1647 adapter = pci_get_drvdata(pdev);
1648 if (adapter == NULL)
1649 return;
1650
1651 netdev = adapter->netdev;
1652
1653 qlcnic_cancel_fw_work(adapter);
1654
1655 unregister_netdev(netdev);
1656
af19b491
AKS
1657 qlcnic_detach(adapter);
1658
2e9d722d
AC
1659 if (adapter->npars != NULL)
1660 kfree(adapter->npars);
1661 if (adapter->eswitch != NULL)
1662 kfree(adapter->eswitch);
1663
21854f02 1664 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1665
1666 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1667
b5e5492c
AKS
1668 qlcnic_free_lb_filters_mem(adapter);
1669
af19b491
AKS
1670 qlcnic_teardown_intr(adapter);
1671
1672 qlcnic_remove_diag_entries(adapter);
1673
1674 qlcnic_cleanup_pci_map(adapter);
1675
1676 qlcnic_release_firmware(adapter);
1677
451724c8 1678 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1679 pci_release_regions(pdev);
1680 pci_disable_device(pdev);
1681 pci_set_drvdata(pdev, NULL);
1682
1683 free_netdev(netdev);
1684}
1685static int __qlcnic_shutdown(struct pci_dev *pdev)
1686{
1687 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1688 struct net_device *netdev = adapter->netdev;
1689 int retval;
1690
1691 netif_device_detach(netdev);
1692
1693 qlcnic_cancel_fw_work(adapter);
1694
1695 if (netif_running(netdev))
1696 qlcnic_down(adapter, netdev);
1697
21854f02 1698 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1699
1700 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1701
1702 retval = pci_save_state(pdev);
1703 if (retval)
1704 return retval;
1705
1706 if (qlcnic_wol_supported(adapter)) {
1707 pci_enable_wake(pdev, PCI_D3cold, 1);
1708 pci_enable_wake(pdev, PCI_D3hot, 1);
1709 }
1710
1711 return 0;
1712}
1713
1714static void qlcnic_shutdown(struct pci_dev *pdev)
1715{
1716 if (__qlcnic_shutdown(pdev))
1717 return;
1718
1719 pci_disable_device(pdev);
1720}
1721
1722#ifdef CONFIG_PM
1723static int
1724qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1725{
1726 int retval;
1727
1728 retval = __qlcnic_shutdown(pdev);
1729 if (retval)
1730 return retval;
1731
1732 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1733 return 0;
1734}
1735
1736static int
1737qlcnic_resume(struct pci_dev *pdev)
1738{
1739 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1740 struct net_device *netdev = adapter->netdev;
1741 int err;
1742
1743 err = pci_enable_device(pdev);
1744 if (err)
1745 return err;
1746
1747 pci_set_power_state(pdev, PCI_D0);
1748 pci_set_master(pdev);
1749 pci_restore_state(pdev);
1750
9f26f547 1751 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1752 if (err) {
1753 dev_err(&pdev->dev, "failed to start firmware\n");
1754 return err;
1755 }
1756
1757 if (netif_running(netdev)) {
af19b491
AKS
1758 err = qlcnic_up(adapter, netdev);
1759 if (err)
52486a3a 1760 goto done;
af19b491 1761
aec1e845 1762 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1763 }
52486a3a 1764done:
af19b491
AKS
1765 netif_device_attach(netdev);
1766 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1767 return 0;
af19b491
AKS
1768}
1769#endif
1770
1771static int qlcnic_open(struct net_device *netdev)
1772{
1773 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1774 int err;
1775
af19b491
AKS
1776 err = qlcnic_attach(adapter);
1777 if (err)
1778 return err;
1779
1780 err = __qlcnic_up(adapter, netdev);
1781 if (err)
1782 goto err_out;
1783
1784 netif_start_queue(netdev);
1785
1786 return 0;
1787
1788err_out:
1789 qlcnic_detach(adapter);
1790 return err;
1791}
1792
1793/*
1794 * qlcnic_close - Disables a network interface entry point
1795 */
1796static int qlcnic_close(struct net_device *netdev)
1797{
1798 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1799
1800 __qlcnic_down(adapter, netdev);
1801 return 0;
1802}
1803
b5e5492c
AKS
1804static void
1805qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1806{
1807 void *head;
1808 int i;
1809
1810 if (!qlcnic_mac_learn)
1811 return;
1812
1813 spin_lock_init(&adapter->mac_learn_lock);
1814
1815 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1816 GFP_KERNEL);
1817 if (!head)
1818 return;
1819
1820 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1821 adapter->fhash.fhead = (struct hlist_head *)head;
1822
1823 for (i = 0; i < adapter->fhash.fmax; i++)
1824 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1825}
1826
1827static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1828{
1829 if (adapter->fhash.fmax && adapter->fhash.fhead)
1830 kfree(adapter->fhash.fhead);
1831
1832 adapter->fhash.fhead = NULL;
1833 adapter->fhash.fmax = 0;
1834}
1835
1836static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
03c5d770 1837 u64 uaddr, u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1838{
1839 struct cmd_desc_type0 *hwdesc;
1840 struct qlcnic_nic_req *req;
1841 struct qlcnic_mac_req *mac_req;
1842 u32 producer;
1843 u64 word;
1844
1845 producer = tx_ring->producer;
1846 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1847
1848 req = (struct qlcnic_nic_req *)hwdesc;
1849 memset(req, 0, sizeof(struct qlcnic_nic_req));
1850 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1851
1852 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1853 req->req_hdr = cpu_to_le64(word);
1854
1855 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1856 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1857 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1858
03c5d770
AKS
1859 req->words[1] = cpu_to_le64(vlan_id);
1860
b5e5492c
AKS
1861 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1862}
1863
1864#define QLCNIC_MAC_HASH(MAC)\
1865 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1866
1867static void
1868qlcnic_send_filter(struct qlcnic_adapter *adapter,
1869 struct qlcnic_host_tx_ring *tx_ring,
1870 struct cmd_desc_type0 *first_desc,
1871 struct sk_buff *skb)
1872{
1873 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1874 struct qlcnic_filter *fil, *tmp_fil;
1875 struct hlist_node *tmp_hnode, *n;
1876 struct hlist_head *head;
1877 u64 src_addr = 0;
03c5d770 1878 u16 vlan_id = 0;
b5e5492c
AKS
1879 u8 hindex;
1880
1881 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1882 return;
1883
1884 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1885 return;
1886
03c5d770
AKS
1887 /* Only NPAR capable devices support vlan based learning*/
1888 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1889 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1890 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1891 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1892 head = &(adapter->fhash.fhead[hindex]);
1893
1894 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1895 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1896 tmp_fil->vlan_id == vlan_id) {
b5e5492c
AKS
1897 tmp_fil->ftime = jiffies;
1898 return;
1899 }
1900 }
1901
1902 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1903 if (!fil)
1904 return;
1905
03c5d770 1906 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1907
1908 fil->ftime = jiffies;
03c5d770 1909 fil->vlan_id = vlan_id;
b5e5492c
AKS
1910 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1911 spin_lock(&adapter->mac_learn_lock);
1912 hlist_add_head(&(fil->fnode), head);
1913 adapter->fhash.fnum++;
1914 spin_unlock(&adapter->mac_learn_lock);
1915}
1916
af19b491
AKS
1917static void
1918qlcnic_tso_check(struct net_device *netdev,
1919 struct qlcnic_host_tx_ring *tx_ring,
1920 struct cmd_desc_type0 *first_desc,
1921 struct sk_buff *skb)
1922{
1923 u8 opcode = TX_ETHER_PKT;
1924 __be16 protocol = skb->protocol;
8cf61f89
AKS
1925 u16 flags = 0;
1926 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1927 struct cmd_desc_type0 *hwdesc;
1928 struct vlan_ethhdr *vh;
8bfe8b91 1929 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1930 u32 producer = tx_ring->producer;
8cf61f89 1931 int vlan_oob = first_desc->flags_opcode & cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1932
2e9d722d
AC
1933 if (*(skb->data) & BIT_0) {
1934 flags |= BIT_0;
1935 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1936 }
1937
af19b491
AKS
1938 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1939 skb_shinfo(skb)->gso_size > 0) {
1940
1941 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1942
1943 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1944 first_desc->total_hdr_length = hdr_len;
1945 if (vlan_oob) {
1946 first_desc->total_hdr_length += VLAN_HLEN;
1947 first_desc->tcp_hdr_offset = VLAN_HLEN;
1948 first_desc->ip_hdr_offset = VLAN_HLEN;
1949 /* Only in case of TSO on vlan device */
1950 flags |= FLAGS_VLAN_TAGGED;
1951 }
1952
1953 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1954 TX_TCP_LSO6 : TX_TCP_LSO;
1955 tso = 1;
1956
1957 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1958 u8 l4proto;
1959
1960 if (protocol == cpu_to_be16(ETH_P_IP)) {
1961 l4proto = ip_hdr(skb)->protocol;
1962
1963 if (l4proto == IPPROTO_TCP)
1964 opcode = TX_TCP_PKT;
1965 else if (l4proto == IPPROTO_UDP)
1966 opcode = TX_UDP_PKT;
1967 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1968 l4proto = ipv6_hdr(skb)->nexthdr;
1969
1970 if (l4proto == IPPROTO_TCP)
1971 opcode = TX_TCPV6_PKT;
1972 else if (l4proto == IPPROTO_UDP)
1973 opcode = TX_UDPV6_PKT;
1974 }
1975 }
1976
1977 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1978 first_desc->ip_hdr_offset += skb_network_offset(skb);
1979 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1980
1981 if (!tso)
1982 return;
1983
1984 /* For LSO, we need to copy the MAC/IP/TCP headers into
1985 * the descriptor ring
1986 */
af19b491
AKS
1987 copied = 0;
1988 offset = 2;
1989
1990 if (vlan_oob) {
1991 /* Create a TSO vlan header template for firmware */
1992
1993 hwdesc = &tx_ring->desc_head[producer];
1994 tx_ring->cmd_buf_arr[producer].skb = NULL;
1995
1996 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1997 hdr_len + VLAN_HLEN);
1998
1999 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
2000 skb_copy_from_linear_data(skb, vh, 12);
2001 vh->h_vlan_proto = htons(ETH_P_8021Q);
8cf61f89 2002 vh->h_vlan_TCI = htons(first_desc->vlan_TCI);
af19b491
AKS
2003 skb_copy_from_linear_data_offset(skb, 12,
2004 (char *)vh + 16, copy_len - 16);
2005
2006 copied = copy_len - VLAN_HLEN;
2007 offset = 0;
2008
2009 producer = get_next_index(producer, tx_ring->num_desc);
2010 }
2011
2012 while (copied < hdr_len) {
2013
2014 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
2015 (hdr_len - copied));
2016
2017 hwdesc = &tx_ring->desc_head[producer];
2018 tx_ring->cmd_buf_arr[producer].skb = NULL;
2019
2020 skb_copy_from_linear_data_offset(skb, copied,
2021 (char *)hwdesc + offset, copy_len);
2022
2023 copied += copy_len;
2024 offset = 0;
2025
2026 producer = get_next_index(producer, tx_ring->num_desc);
2027 }
2028
2029 tx_ring->producer = producer;
2030 barrier();
8bfe8b91 2031 adapter->stats.lso_frames++;
af19b491
AKS
2032}
2033
2034static int
2035qlcnic_map_tx_skb(struct pci_dev *pdev,
2036 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2037{
2038 struct qlcnic_skb_frag *nf;
2039 struct skb_frag_struct *frag;
2040 int i, nr_frags;
2041 dma_addr_t map;
2042
2043 nr_frags = skb_shinfo(skb)->nr_frags;
2044 nf = &pbuf->frag_array[0];
2045
2046 map = pci_map_single(pdev, skb->data,
2047 skb_headlen(skb), PCI_DMA_TODEVICE);
2048 if (pci_dma_mapping_error(pdev, map))
2049 goto out_err;
2050
2051 nf->dma = map;
2052 nf->length = skb_headlen(skb);
2053
2054 for (i = 0; i < nr_frags; i++) {
2055 frag = &skb_shinfo(skb)->frags[i];
2056 nf = &pbuf->frag_array[i+1];
2057
2058 map = pci_map_page(pdev, frag->page, frag->page_offset,
2059 frag->size, PCI_DMA_TODEVICE);
2060 if (pci_dma_mapping_error(pdev, map))
2061 goto unwind;
2062
2063 nf->dma = map;
2064 nf->length = frag->size;
2065 }
2066
2067 return 0;
2068
2069unwind:
2070 while (--i >= 0) {
2071 nf = &pbuf->frag_array[i+1];
2072 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2073 }
2074
2075 nf = &pbuf->frag_array[0];
2076 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2077
2078out_err:
2079 return -ENOMEM;
2080}
2081
8cf61f89
AKS
2082static int
2083qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
2084 struct sk_buff *skb,
2085 struct cmd_desc_type0 *first_desc)
2086{
2087 u8 opcode = 0;
2088 u16 flags = 0;
2089 __be16 protocol = skb->protocol;
2090 struct vlan_ethhdr *vh;
2091
2092 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
2093 vh = (struct vlan_ethhdr *)skb->data;
2094 protocol = vh->h_vlan_encapsulated_proto;
2095 flags = FLAGS_VLAN_TAGGED;
2096 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2097 } else if (vlan_tx_tag_present(skb)) {
2098 flags = FLAGS_VLAN_OOB;
2099 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2100 }
2101 if (unlikely(adapter->pvid)) {
2102 if (first_desc->vlan_TCI &&
2103 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2104 return -EIO;
2105 if (first_desc->vlan_TCI &&
2106 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2107 goto set_flags;
2108
2109 flags = FLAGS_VLAN_OOB;
2110 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
2111 }
2112set_flags:
2113 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2114 return 0;
2115}
2116
af19b491
AKS
2117static inline void
2118qlcnic_clear_cmddesc(u64 *desc)
2119{
2120 desc[0] = 0ULL;
2121 desc[2] = 0ULL;
8cf61f89 2122 desc[7] = 0ULL;
af19b491
AKS
2123}
2124
cdaff185 2125netdev_tx_t
af19b491
AKS
2126qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2127{
2128 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2129 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2130 struct qlcnic_cmd_buffer *pbuf;
2131 struct qlcnic_skb_frag *buffrag;
2132 struct cmd_desc_type0 *hwdesc, *first_desc;
2133 struct pci_dev *pdev;
dcb50aff 2134 struct ethhdr *phdr;
af19b491
AKS
2135 int i, k;
2136
2137 u32 producer;
2138 int frag_count, no_of_desc;
2139 u32 num_txd = tx_ring->num_desc;
2140
780ab790
AKS
2141 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2142 netif_stop_queue(netdev);
2143 return NETDEV_TX_BUSY;
2144 }
2145
fe4d434d 2146 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2147 phdr = (struct ethhdr *)skb->data;
2148 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2149 adapter->mac_addr))
2150 goto drop_packet;
2151 }
2152
af19b491
AKS
2153 frag_count = skb_shinfo(skb)->nr_frags + 1;
2154
2155 /* 4 fragments per cmd des */
2156 no_of_desc = (frag_count + 3) >> 2;
2157
ef71ff83 2158 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2159 netif_stop_queue(netdev);
ef71ff83
RB
2160 smp_mb();
2161 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2162 netif_start_queue(netdev);
2163 else {
2164 adapter->stats.xmit_off++;
2165 return NETDEV_TX_BUSY;
2166 }
af19b491
AKS
2167 }
2168
2169 producer = tx_ring->producer;
2170 pbuf = &tx_ring->cmd_buf_arr[producer];
2171
2172 pdev = adapter->pdev;
2173
8cf61f89
AKS
2174 first_desc = hwdesc = &tx_ring->desc_head[producer];
2175 qlcnic_clear_cmddesc((u64 *)hwdesc);
2176
2177 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2178 goto drop_packet;
2179
8ae6df97
AKS
2180 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2181 adapter->stats.tx_dma_map_error++;
af19b491 2182 goto drop_packet;
8ae6df97 2183 }
af19b491
AKS
2184
2185 pbuf->skb = skb;
2186 pbuf->frag_count = frag_count;
2187
af19b491
AKS
2188 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2189 qlcnic_set_tx_port(first_desc, adapter->portnum);
2190
2191 for (i = 0; i < frag_count; i++) {
2192
2193 k = i % 4;
2194
2195 if ((k == 0) && (i > 0)) {
2196 /* move to next desc.*/
2197 producer = get_next_index(producer, num_txd);
2198 hwdesc = &tx_ring->desc_head[producer];
2199 qlcnic_clear_cmddesc((u64 *)hwdesc);
2200 tx_ring->cmd_buf_arr[producer].skb = NULL;
2201 }
2202
2203 buffrag = &pbuf->frag_array[i];
2204
2205 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2206 switch (k) {
2207 case 0:
2208 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2209 break;
2210 case 1:
2211 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2212 break;
2213 case 2:
2214 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2215 break;
2216 case 3:
2217 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2218 break;
2219 }
2220 }
2221
2222 tx_ring->producer = get_next_index(producer, num_txd);
2223
2224 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2225
b5e5492c
AKS
2226 if (qlcnic_mac_learn)
2227 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2228
af19b491
AKS
2229 qlcnic_update_cmd_producer(adapter, tx_ring);
2230
2231 adapter->stats.txbytes += skb->len;
2232 adapter->stats.xmitcalled++;
2233
2234 return NETDEV_TX_OK;
2235
2236drop_packet:
2237 adapter->stats.txdropped++;
2238 dev_kfree_skb_any(skb);
2239 return NETDEV_TX_OK;
2240}
2241
2242static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2243{
2244 struct net_device *netdev = adapter->netdev;
2245 u32 temp, temp_state, temp_val;
2246 int rv = 0;
2247
2248 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2249
2250 temp_state = qlcnic_get_temp_state(temp);
2251 temp_val = qlcnic_get_temp_val(temp);
2252
2253 if (temp_state == QLCNIC_TEMP_PANIC) {
2254 dev_err(&netdev->dev,
2255 "Device temperature %d degrees C exceeds"
2256 " maximum allowed. Hardware has been shut down.\n",
2257 temp_val);
2258 rv = 1;
2259 } else if (temp_state == QLCNIC_TEMP_WARN) {
2260 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2261 dev_err(&netdev->dev,
2262 "Device temperature %d degrees C "
2263 "exceeds operating range."
2264 " Immediate action needed.\n",
2265 temp_val);
2266 }
2267 } else {
2268 if (adapter->temp == QLCNIC_TEMP_WARN) {
2269 dev_info(&netdev->dev,
2270 "Device temperature is now %d degrees C"
2271 " in normal range.\n", temp_val);
2272 }
2273 }
2274 adapter->temp = temp_state;
2275 return rv;
2276}
2277
2278void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2279{
2280 struct net_device *netdev = adapter->netdev;
2281
2282 if (adapter->ahw.linkup && !linkup) {
69324275 2283 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2284 adapter->ahw.linkup = 0;
2285 if (netif_running(netdev)) {
2286 netif_carrier_off(netdev);
2287 netif_stop_queue(netdev);
2288 }
2289 } else if (!adapter->ahw.linkup && linkup) {
69324275 2290 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2291 adapter->ahw.linkup = 1;
2292 if (netif_running(netdev)) {
2293 netif_carrier_on(netdev);
2294 netif_wake_queue(netdev);
2295 }
2296 }
2297}
2298
2299static void qlcnic_tx_timeout(struct net_device *netdev)
2300{
2301 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2302
2303 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2304 return;
2305
2306 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2307
2308 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2309 adapter->need_fw_reset = 1;
2310 else
2311 adapter->reset_context = 1;
af19b491
AKS
2312}
2313
2314static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2315{
2316 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2317 struct net_device_stats *stats = &netdev->stats;
2318
af19b491
AKS
2319 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2320 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2321 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2322 stats->tx_bytes = adapter->stats.txbytes;
2323 stats->rx_dropped = adapter->stats.rxdropped;
2324 stats->tx_dropped = adapter->stats.txdropped;
2325
2326 return stats;
2327}
2328
7eb9855d 2329static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2330{
af19b491
AKS
2331 u32 status;
2332
2333 status = readl(adapter->isr_int_vec);
2334
2335 if (!(status & adapter->int_vec_bit))
2336 return IRQ_NONE;
2337
2338 /* check interrupt state machine, to be sure */
2339 status = readl(adapter->crb_int_state_reg);
2340 if (!ISR_LEGACY_INT_TRIGGERED(status))
2341 return IRQ_NONE;
2342
2343 writel(0xffffffff, adapter->tgt_status_reg);
2344 /* read twice to ensure write is flushed */
2345 readl(adapter->isr_int_vec);
2346 readl(adapter->isr_int_vec);
2347
7eb9855d
AKS
2348 return IRQ_HANDLED;
2349}
2350
2351static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2352{
2353 struct qlcnic_host_sds_ring *sds_ring = data;
2354 struct qlcnic_adapter *adapter = sds_ring->adapter;
2355
2356 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2357 goto done;
2358 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2359 writel(0xffffffff, adapter->tgt_status_reg);
2360 goto done;
2361 }
2362
2363 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2364 return IRQ_NONE;
2365
2366done:
2367 adapter->diag_cnt++;
2368 qlcnic_enable_int(sds_ring);
2369 return IRQ_HANDLED;
2370}
2371
2372static irqreturn_t qlcnic_intr(int irq, void *data)
2373{
2374 struct qlcnic_host_sds_ring *sds_ring = data;
2375 struct qlcnic_adapter *adapter = sds_ring->adapter;
2376
2377 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2378 return IRQ_NONE;
2379
af19b491
AKS
2380 napi_schedule(&sds_ring->napi);
2381
2382 return IRQ_HANDLED;
2383}
2384
2385static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2386{
2387 struct qlcnic_host_sds_ring *sds_ring = data;
2388 struct qlcnic_adapter *adapter = sds_ring->adapter;
2389
2390 /* clear interrupt */
2391 writel(0xffffffff, adapter->tgt_status_reg);
2392
2393 napi_schedule(&sds_ring->napi);
2394 return IRQ_HANDLED;
2395}
2396
2397static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2398{
2399 struct qlcnic_host_sds_ring *sds_ring = data;
2400
2401 napi_schedule(&sds_ring->napi);
2402 return IRQ_HANDLED;
2403}
2404
2405static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2406{
2407 u32 sw_consumer, hw_consumer;
2408 int count = 0, i;
2409 struct qlcnic_cmd_buffer *buffer;
2410 struct pci_dev *pdev = adapter->pdev;
2411 struct net_device *netdev = adapter->netdev;
2412 struct qlcnic_skb_frag *frag;
2413 int done;
2414 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2415
2416 if (!spin_trylock(&adapter->tx_clean_lock))
2417 return 1;
2418
2419 sw_consumer = tx_ring->sw_consumer;
2420 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2421
2422 while (sw_consumer != hw_consumer) {
2423 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2424 if (buffer->skb) {
2425 frag = &buffer->frag_array[0];
2426 pci_unmap_single(pdev, frag->dma, frag->length,
2427 PCI_DMA_TODEVICE);
2428 frag->dma = 0ULL;
2429 for (i = 1; i < buffer->frag_count; i++) {
2430 frag++;
2431 pci_unmap_page(pdev, frag->dma, frag->length,
2432 PCI_DMA_TODEVICE);
2433 frag->dma = 0ULL;
2434 }
2435
2436 adapter->stats.xmitfinished++;
2437 dev_kfree_skb_any(buffer->skb);
2438 buffer->skb = NULL;
2439 }
2440
2441 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2442 if (++count >= MAX_STATUS_HANDLE)
2443 break;
2444 }
2445
2446 if (count && netif_running(netdev)) {
2447 tx_ring->sw_consumer = sw_consumer;
2448
2449 smp_mb();
2450
2451 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2452 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2453 netif_wake_queue(netdev);
8bfe8b91 2454 adapter->stats.xmit_on++;
af19b491 2455 }
af19b491 2456 }
ef71ff83 2457 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2458 }
2459 /*
2460 * If everything is freed up to consumer then check if the ring is full
2461 * If the ring is full then check if more needs to be freed and
2462 * schedule the call back again.
2463 *
2464 * This happens when there are 2 CPUs. One could be freeing and the
2465 * other filling it. If the ring is full when we get out of here and
2466 * the card has already interrupted the host then the host can miss the
2467 * interrupt.
2468 *
2469 * There is still a possible race condition and the host could miss an
2470 * interrupt. The card has to take care of this.
2471 */
2472 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2473 done = (sw_consumer == hw_consumer);
2474 spin_unlock(&adapter->tx_clean_lock);
2475
2476 return done;
2477}
2478
2479static int qlcnic_poll(struct napi_struct *napi, int budget)
2480{
2481 struct qlcnic_host_sds_ring *sds_ring =
2482 container_of(napi, struct qlcnic_host_sds_ring, napi);
2483
2484 struct qlcnic_adapter *adapter = sds_ring->adapter;
2485
2486 int tx_complete;
2487 int work_done;
2488
2489 tx_complete = qlcnic_process_cmd_ring(adapter);
2490
2491 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2492
2493 if ((work_done < budget) && tx_complete) {
2494 napi_complete(&sds_ring->napi);
2495 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2496 qlcnic_enable_int(sds_ring);
2497 }
2498
2499 return work_done;
2500}
2501
8f891387 2502static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2503{
2504 struct qlcnic_host_sds_ring *sds_ring =
2505 container_of(napi, struct qlcnic_host_sds_ring, napi);
2506
2507 struct qlcnic_adapter *adapter = sds_ring->adapter;
2508 int work_done;
2509
2510 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2511
2512 if (work_done < budget) {
2513 napi_complete(&sds_ring->napi);
2514 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2515 qlcnic_enable_int(sds_ring);
2516 }
2517
2518 return work_done;
2519}
2520
af19b491
AKS
2521#ifdef CONFIG_NET_POLL_CONTROLLER
2522static void qlcnic_poll_controller(struct net_device *netdev)
2523{
bf82791e
YL
2524 int ring;
2525 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2526 struct qlcnic_adapter *adapter = netdev_priv(netdev);
bf82791e
YL
2527 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2528
af19b491 2529 disable_irq(adapter->irq);
bf82791e
YL
2530 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2531 sds_ring = &recv_ctx->sds_rings[ring];
2532 qlcnic_intr(adapter->irq, sds_ring);
2533 }
af19b491
AKS
2534 enable_irq(adapter->irq);
2535}
2536#endif
2537
6df900e9
SC
2538static void
2539qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2540{
2541 u32 val;
2542
2543 val = adapter->portnum & 0xf;
2544 val |= encoding << 7;
2545 val |= (jiffies - adapter->dev_rst_time) << 8;
2546
2547 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2548 adapter->dev_rst_time = jiffies;
2549}
2550
ade91f8e
AKS
2551static int
2552qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2553{
2554 u32 val;
2555
2556 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2557 state != QLCNIC_DEV_NEED_QUISCENT);
2558
2559 if (qlcnic_api_lock(adapter))
ade91f8e 2560 return -EIO;
af19b491
AKS
2561
2562 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2563
2564 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2565 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2566 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2567 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2568
2569 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2570
2571 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2572
2573 return 0;
af19b491
AKS
2574}
2575
1b95a839
AKS
2576static int
2577qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2578{
2579 u32 val;
2580
2581 if (qlcnic_api_lock(adapter))
2582 return -EBUSY;
2583
2584 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2585 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2586 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2587
2588 qlcnic_api_unlock(adapter);
2589
2590 return 0;
2591}
2592
af19b491 2593static void
21854f02 2594qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2595{
2596 u32 val;
2597
2598 if (qlcnic_api_lock(adapter))
2599 goto err;
2600
31018e06 2601 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2602 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2603 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2604
21854f02
AKS
2605 if (failed) {
2606 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2607 dev_info(&adapter->pdev->dev,
2608 "Device state set to Failed. Please Reboot\n");
2609 } else if (!(val & 0x11111111))
af19b491
AKS
2610 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2611
2612 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2613 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2614 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2615
2616 qlcnic_api_unlock(adapter);
2617err:
2618 adapter->fw_fail_cnt = 0;
2619 clear_bit(__QLCNIC_START_FW, &adapter->state);
2620 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2621}
2622
f73dfc50 2623/* Grab api lock, before checking state */
af19b491
AKS
2624static int
2625qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2626{
2627 int act, state;
2628
2629 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2630 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2631
2632 if (((state & 0x11111111) == (act & 0x11111111)) ||
2633 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2634 return 0;
2635 else
2636 return 1;
2637}
2638
96f8118c
SC
2639static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2640{
2641 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2642
2643 if (val != QLCNIC_DRV_IDC_VER) {
2644 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2645 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2646 }
2647
2648 return 0;
2649}
2650
af19b491
AKS
2651static int
2652qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2653{
2654 u32 val, prev_state;
aa5e18c0 2655 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2656 u8 portnum = adapter->portnum;
96f8118c 2657 u8 ret;
af19b491 2658
f73dfc50
AKS
2659 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2660 return 1;
2661
af19b491
AKS
2662 if (qlcnic_api_lock(adapter))
2663 return -1;
2664
31018e06 2665 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2666 if (!(val & (1 << (portnum * 4)))) {
2667 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2668 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2669 }
2670
2671 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2672 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2673
2674 switch (prev_state) {
2675 case QLCNIC_DEV_COLD:
bbd8c6a4 2676 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2677 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2678 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2679 qlcnic_api_unlock(adapter);
2680 return 1;
2681
2682 case QLCNIC_DEV_READY:
96f8118c 2683 ret = qlcnic_check_idc_ver(adapter);
af19b491 2684 qlcnic_api_unlock(adapter);
96f8118c 2685 return ret;
af19b491
AKS
2686
2687 case QLCNIC_DEV_NEED_RESET:
2688 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2689 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2690 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2691 break;
2692
2693 case QLCNIC_DEV_NEED_QUISCENT:
2694 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2695 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2696 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2697 break;
2698
2699 case QLCNIC_DEV_FAILED:
a7fc948f 2700 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2701 qlcnic_api_unlock(adapter);
2702 return -1;
bbd8c6a4
AKS
2703
2704 case QLCNIC_DEV_INITIALIZING:
2705 case QLCNIC_DEV_QUISCENT:
2706 break;
af19b491
AKS
2707 }
2708
2709 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2710
2711 do {
af19b491 2712 msleep(1000);
a5e463d0
SC
2713 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2714
2715 if (prev_state == QLCNIC_DEV_QUISCENT)
2716 continue;
2717 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2718
65b5b420
AKS
2719 if (!dev_init_timeo) {
2720 dev_err(&adapter->pdev->dev,
2721 "Waiting for device to initialize timeout\n");
af19b491 2722 return -1;
65b5b420 2723 }
af19b491
AKS
2724
2725 if (qlcnic_api_lock(adapter))
2726 return -1;
2727
2728 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2729 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2730 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2731
96f8118c 2732 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2733 qlcnic_api_unlock(adapter);
2734
96f8118c 2735 return ret;
af19b491
AKS
2736}
2737
2738static void
2739qlcnic_fwinit_work(struct work_struct *work)
2740{
2741 struct qlcnic_adapter *adapter = container_of(work,
2742 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2743 u32 dev_state = 0xf;
af19b491 2744
f73dfc50
AKS
2745 if (qlcnic_api_lock(adapter))
2746 goto err_ret;
af19b491 2747
a5e463d0
SC
2748 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2749 if (dev_state == QLCNIC_DEV_QUISCENT) {
2750 qlcnic_api_unlock(adapter);
2751 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2752 FW_POLL_DELAY * 2);
2753 return;
2754 }
2755
9f26f547 2756 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2757 qlcnic_api_unlock(adapter);
2758 goto wait_npar;
9f26f547
AC
2759 }
2760
f73dfc50
AKS
2761 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2762 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2763 adapter->reset_ack_timeo);
2764 goto skip_ack_check;
2765 }
2766
2767 if (!qlcnic_check_drv_state(adapter)) {
2768skip_ack_check:
2769 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2770
2771 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2772 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2773 QLCNIC_DEV_QUISCENT);
2774 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2775 FW_POLL_DELAY * 2);
2776 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2777 qlcnic_idc_debug_info(adapter, 0);
2778
a5e463d0
SC
2779 qlcnic_api_unlock(adapter);
2780 return;
2781 }
2782
f73dfc50
AKS
2783 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2784 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2785 QLCNIC_DEV_INITIALIZING);
2786 set_bit(__QLCNIC_START_FW, &adapter->state);
2787 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2788 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2789 }
2790
f73dfc50
AKS
2791 qlcnic_api_unlock(adapter);
2792
9f26f547 2793 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2794 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2795 adapter->fw_wait_cnt = 0;
af19b491
AKS
2796 return;
2797 }
af19b491
AKS
2798 goto err_ret;
2799 }
2800
f73dfc50 2801 qlcnic_api_unlock(adapter);
aa5e18c0 2802
9f26f547 2803wait_npar:
af19b491 2804 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2805 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2806
af19b491 2807 switch (dev_state) {
3c4b23b1 2808 case QLCNIC_DEV_READY:
9f26f547 2809 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2810 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2811 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2812 return;
2813 }
3c4b23b1
AKS
2814 case QLCNIC_DEV_FAILED:
2815 break;
2816 default:
2817 qlcnic_schedule_work(adapter,
2818 qlcnic_fwinit_work, FW_POLL_DELAY);
2819 return;
af19b491
AKS
2820 }
2821
2822err_ret:
f73dfc50
AKS
2823 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2824 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2825 netif_device_attach(adapter->netdev);
21854f02 2826 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2827}
2828
2829static void
2830qlcnic_detach_work(struct work_struct *work)
2831{
2832 struct qlcnic_adapter *adapter = container_of(work,
2833 struct qlcnic_adapter, fw_work.work);
2834 struct net_device *netdev = adapter->netdev;
2835 u32 status;
2836
2837 netif_device_detach(netdev);
2838
2839 qlcnic_down(adapter, netdev);
2840
af19b491
AKS
2841 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2842
2843 if (status & QLCNIC_RCODE_FATAL_ERROR)
2844 goto err_ret;
2845
2846 if (adapter->temp == QLCNIC_TEMP_PANIC)
2847 goto err_ret;
2848
ade91f8e
AKS
2849 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2850 goto err_ret;
af19b491
AKS
2851
2852 adapter->fw_wait_cnt = 0;
2853
2854 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2855
2856 return;
2857
2858err_ret:
65b5b420
AKS
2859 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2860 status, adapter->temp);
34ce3626 2861 netif_device_attach(netdev);
21854f02 2862 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2863}
2864
3c4b23b1
AKS
2865/*Transit NPAR state to NON Operational */
2866static void
2867qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2868{
2869 u32 state;
2870
2871 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2872 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2873 return;
2874
2875 if (qlcnic_api_lock(adapter))
2876 return;
2877 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2878 qlcnic_api_unlock(adapter);
2879}
2880
f73dfc50 2881/*Transit to RESET state from READY state only */
af19b491
AKS
2882static void
2883qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2884{
2885 u32 state;
2886
cea8975e 2887 adapter->need_fw_reset = 1;
af19b491
AKS
2888 if (qlcnic_api_lock(adapter))
2889 return;
2890
2891 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2892
f73dfc50 2893 if (state == QLCNIC_DEV_READY) {
af19b491 2894 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2895 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2896 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2897 }
2898
3c4b23b1 2899 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2900 qlcnic_api_unlock(adapter);
2901}
2902
9f26f547
AC
2903/* Transit to NPAR READY state from NPAR NOT READY state */
2904static void
2905qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2906{
9f26f547
AC
2907 if (qlcnic_api_lock(adapter))
2908 return;
2909
3c4b23b1
AKS
2910 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2911 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2912
2913 qlcnic_api_unlock(adapter);
2914}
2915
af19b491
AKS
2916static void
2917qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2918 work_func_t func, int delay)
2919{
451724c8
SC
2920 if (test_bit(__QLCNIC_AER, &adapter->state))
2921 return;
2922
af19b491
AKS
2923 INIT_DELAYED_WORK(&adapter->fw_work, func);
2924 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2925}
2926
2927static void
2928qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2929{
2930 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2931 msleep(10);
2932
2933 cancel_delayed_work_sync(&adapter->fw_work);
2934}
2935
2936static void
2937qlcnic_attach_work(struct work_struct *work)
2938{
2939 struct qlcnic_adapter *adapter = container_of(work,
2940 struct qlcnic_adapter, fw_work.work);
2941 struct net_device *netdev = adapter->netdev;
b18971d1 2942 u32 npar_state;
af19b491 2943
b18971d1
AKS
2944 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2945 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2946 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2947 qlcnic_clr_all_drv_state(adapter, 0);
2948 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2949 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2950 FW_POLL_DELAY);
2951 else
2952 goto attach;
2953 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2954 return;
2955 }
2956attach:
af19b491 2957 if (netif_running(netdev)) {
52486a3a 2958 if (qlcnic_up(adapter, netdev))
af19b491 2959 goto done;
af19b491 2960
aec1e845 2961 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
2962 }
2963
af19b491 2964done:
34ce3626 2965 netif_device_attach(netdev);
af19b491
AKS
2966 adapter->fw_fail_cnt = 0;
2967 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2968
2969 if (!qlcnic_clr_drv_state(adapter))
2970 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2971 FW_POLL_DELAY);
af19b491
AKS
2972}
2973
2974static int
2975qlcnic_check_health(struct qlcnic_adapter *adapter)
2976{
4e70812b 2977 u32 state = 0, heartbeat;
af19b491
AKS
2978 struct net_device *netdev = adapter->netdev;
2979
2980 if (qlcnic_check_temp(adapter))
2981 goto detach;
2982
2372a5f1 2983 if (adapter->need_fw_reset)
af19b491 2984 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2985
2986 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3c4b23b1
AKS
2987 if (state == QLCNIC_DEV_NEED_RESET ||
2988 state == QLCNIC_DEV_NEED_QUISCENT) {
2989 qlcnic_set_npar_non_operational(adapter);
af19b491 2990 adapter->need_fw_reset = 1;
3c4b23b1 2991 }
af19b491 2992
4e70812b
SC
2993 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2994 if (heartbeat != adapter->heartbeat) {
2995 adapter->heartbeat = heartbeat;
af19b491
AKS
2996 adapter->fw_fail_cnt = 0;
2997 if (adapter->need_fw_reset)
2998 goto detach;
68bf1c68 2999
0df170b6
AKS
3000 if (adapter->reset_context &&
3001 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
3002 qlcnic_reset_hw_context(adapter);
3003 adapter->netdev->trans_start = jiffies;
3004 }
3005
af19b491
AKS
3006 return 0;
3007 }
3008
3009 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3010 return 0;
3011
3012 qlcnic_dev_request_reset(adapter);
3013
0df170b6
AKS
3014 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
3015 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
3016
3017 dev_info(&netdev->dev, "firmware hang detected\n");
3018
3019detach:
3020 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3021 QLCNIC_DEV_NEED_RESET;
3022
3023 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
3024 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3025
af19b491 3026 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3027 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3028 }
af19b491
AKS
3029
3030 return 1;
3031}
3032
3033static void
3034qlcnic_fw_poll_work(struct work_struct *work)
3035{
3036 struct qlcnic_adapter *adapter = container_of(work,
3037 struct qlcnic_adapter, fw_work.work);
3038
3039 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3040 goto reschedule;
3041
3042
3043 if (qlcnic_check_health(adapter))
3044 return;
3045
b5e5492c
AKS
3046 if (adapter->fhash.fnum)
3047 qlcnic_prune_lb_filters(adapter);
3048
af19b491
AKS
3049reschedule:
3050 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3051}
3052
451724c8
SC
3053static int qlcnic_is_first_func(struct pci_dev *pdev)
3054{
3055 struct pci_dev *oth_pdev;
3056 int val = pdev->devfn;
3057
3058 while (val-- > 0) {
3059 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3060 (pdev->bus), pdev->bus->number,
3061 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3062 if (!oth_pdev)
3063 continue;
451724c8 3064
bfc978fa
AKS
3065 if (oth_pdev->current_state != PCI_D3cold) {
3066 pci_dev_put(oth_pdev);
451724c8 3067 return 0;
bfc978fa
AKS
3068 }
3069 pci_dev_put(oth_pdev);
451724c8
SC
3070 }
3071 return 1;
3072}
3073
3074static int qlcnic_attach_func(struct pci_dev *pdev)
3075{
3076 int err, first_func;
3077 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3078 struct net_device *netdev = adapter->netdev;
3079
3080 pdev->error_state = pci_channel_io_normal;
3081
3082 err = pci_enable_device(pdev);
3083 if (err)
3084 return err;
3085
3086 pci_set_power_state(pdev, PCI_D0);
3087 pci_set_master(pdev);
3088 pci_restore_state(pdev);
3089
3090 first_func = qlcnic_is_first_func(pdev);
3091
3092 if (qlcnic_api_lock(adapter))
3093 return -EINVAL;
3094
933fce12 3095 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3096 adapter->need_fw_reset = 1;
3097 set_bit(__QLCNIC_START_FW, &adapter->state);
3098 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3099 QLCDB(adapter, DRV, "Restarting fw\n");
3100 }
3101 qlcnic_api_unlock(adapter);
3102
3103 err = adapter->nic_ops->start_firmware(adapter);
3104 if (err)
3105 return err;
3106
3107 qlcnic_clr_drv_state(adapter);
3108 qlcnic_setup_intr(adapter);
3109
3110 if (netif_running(netdev)) {
3111 err = qlcnic_attach(adapter);
3112 if (err) {
21854f02 3113 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3114 clear_bit(__QLCNIC_AER, &adapter->state);
3115 netif_device_attach(netdev);
3116 return err;
3117 }
3118
3119 err = qlcnic_up(adapter, netdev);
3120 if (err)
3121 goto done;
3122
aec1e845 3123 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3124 }
3125 done:
3126 netif_device_attach(netdev);
3127 return err;
3128}
3129
3130static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3131 pci_channel_state_t state)
3132{
3133 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3134 struct net_device *netdev = adapter->netdev;
3135
3136 if (state == pci_channel_io_perm_failure)
3137 return PCI_ERS_RESULT_DISCONNECT;
3138
3139 if (state == pci_channel_io_normal)
3140 return PCI_ERS_RESULT_RECOVERED;
3141
3142 set_bit(__QLCNIC_AER, &adapter->state);
3143 netif_device_detach(netdev);
3144
3145 cancel_delayed_work_sync(&adapter->fw_work);
3146
3147 if (netif_running(netdev))
3148 qlcnic_down(adapter, netdev);
3149
3150 qlcnic_detach(adapter);
3151 qlcnic_teardown_intr(adapter);
3152
3153 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3154
3155 pci_save_state(pdev);
3156 pci_disable_device(pdev);
3157
3158 return PCI_ERS_RESULT_NEED_RESET;
3159}
3160
3161static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3162{
3163 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3164 PCI_ERS_RESULT_RECOVERED;
3165}
3166
3167static void qlcnic_io_resume(struct pci_dev *pdev)
3168{
3169 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3170
3171 pci_cleanup_aer_uncorrect_error_status(pdev);
3172
3173 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3174 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3175 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3176 FW_POLL_DELAY);
3177}
3178
87eb743b
AC
3179static int
3180qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3181{
3182 int err;
3183
3184 err = qlcnic_can_start_firmware(adapter);
3185 if (err)
3186 return err;
3187
78f84e1a
AKS
3188 err = qlcnic_check_npar_opertional(adapter);
3189 if (err)
3190 return err;
3c4b23b1 3191
174240a8
RB
3192 err = qlcnic_initialize_nic(adapter);
3193 if (err)
3194 return err;
3195
87eb743b
AC
3196 qlcnic_check_options(adapter);
3197
7373373d
RB
3198 err = qlcnic_set_eswitch_port_config(adapter);
3199 if (err)
3200 return err;
3201
87eb743b
AC
3202 adapter->need_fw_reset = 0;
3203
3204 return err;
3205}
3206
3207static int
3208qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3209{
3210 return -EOPNOTSUPP;
3211}
3212
3213static int
3214qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3215{
3216 return -EOPNOTSUPP;
3217}
3218
af19b491
AKS
3219static ssize_t
3220qlcnic_store_bridged_mode(struct device *dev,
3221 struct device_attribute *attr, const char *buf, size_t len)
3222{
3223 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3224 unsigned long new;
3225 int ret = -EINVAL;
3226
3227 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3228 goto err_out;
3229
8a15ad1f 3230 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3231 goto err_out;
3232
3233 if (strict_strtoul(buf, 2, &new))
3234 goto err_out;
3235
2e9d722d 3236 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3237 ret = len;
3238
3239err_out:
3240 return ret;
3241}
3242
3243static ssize_t
3244qlcnic_show_bridged_mode(struct device *dev,
3245 struct device_attribute *attr, char *buf)
3246{
3247 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3248 int bridged_mode = 0;
3249
3250 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3251 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3252
3253 return sprintf(buf, "%d\n", bridged_mode);
3254}
3255
3256static struct device_attribute dev_attr_bridged_mode = {
3257 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3258 .show = qlcnic_show_bridged_mode,
3259 .store = qlcnic_store_bridged_mode,
3260};
3261
3262static ssize_t
3263qlcnic_store_diag_mode(struct device *dev,
3264 struct device_attribute *attr, const char *buf, size_t len)
3265{
3266 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3267 unsigned long new;
3268
3269 if (strict_strtoul(buf, 2, &new))
3270 return -EINVAL;
3271
3272 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3273 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3274
3275 return len;
3276}
3277
3278static ssize_t
3279qlcnic_show_diag_mode(struct device *dev,
3280 struct device_attribute *attr, char *buf)
3281{
3282 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3283
3284 return sprintf(buf, "%d\n",
3285 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3286}
3287
3288static struct device_attribute dev_attr_diag_mode = {
3289 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3290 .show = qlcnic_show_diag_mode,
3291 .store = qlcnic_store_diag_mode,
3292};
3293
3294static int
3295qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3296 loff_t offset, size_t size)
3297{
897e8c7c
DP
3298 size_t crb_size = 4;
3299
af19b491
AKS
3300 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3301 return -EIO;
3302
897e8c7c
DP
3303 if (offset < QLCNIC_PCI_CRBSPACE) {
3304 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3305 QLCNIC_PCI_CAMQM_END))
3306 crb_size = 8;
3307 else
3308 return -EINVAL;
3309 }
af19b491 3310
897e8c7c
DP
3311 if ((size != crb_size) || (offset & (crb_size-1)))
3312 return -EINVAL;
af19b491
AKS
3313
3314 return 0;
3315}
3316
3317static ssize_t
2c3c8bea
CW
3318qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3319 struct bin_attribute *attr,
af19b491
AKS
3320 char *buf, loff_t offset, size_t size)
3321{
3322 struct device *dev = container_of(kobj, struct device, kobj);
3323 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3324 u32 data;
897e8c7c 3325 u64 qmdata;
af19b491
AKS
3326 int ret;
3327
3328 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3329 if (ret != 0)
3330 return ret;
3331
897e8c7c
DP
3332 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3333 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3334 memcpy(buf, &qmdata, size);
3335 } else {
3336 data = QLCRD32(adapter, offset);
3337 memcpy(buf, &data, size);
3338 }
af19b491
AKS
3339 return size;
3340}
3341
3342static ssize_t
2c3c8bea
CW
3343qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3344 struct bin_attribute *attr,
af19b491
AKS
3345 char *buf, loff_t offset, size_t size)
3346{
3347 struct device *dev = container_of(kobj, struct device, kobj);
3348 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3349 u32 data;
897e8c7c 3350 u64 qmdata;
af19b491
AKS
3351 int ret;
3352
3353 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3354 if (ret != 0)
3355 return ret;
3356
897e8c7c
DP
3357 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3358 memcpy(&qmdata, buf, size);
3359 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3360 } else {
3361 memcpy(&data, buf, size);
3362 QLCWR32(adapter, offset, data);
3363 }
af19b491
AKS
3364 return size;
3365}
3366
3367static int
3368qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3369 loff_t offset, size_t size)
3370{
3371 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3372 return -EIO;
3373
3374 if ((size != 8) || (offset & 0x7))
3375 return -EIO;
3376
3377 return 0;
3378}
3379
3380static ssize_t
2c3c8bea
CW
3381qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3382 struct bin_attribute *attr,
af19b491
AKS
3383 char *buf, loff_t offset, size_t size)
3384{
3385 struct device *dev = container_of(kobj, struct device, kobj);
3386 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3387 u64 data;
3388 int ret;
3389
3390 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3391 if (ret != 0)
3392 return ret;
3393
3394 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3395 return -EIO;
3396
3397 memcpy(buf, &data, size);
3398
3399 return size;
3400}
3401
3402static ssize_t
2c3c8bea
CW
3403qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3404 struct bin_attribute *attr,
af19b491
AKS
3405 char *buf, loff_t offset, size_t size)
3406{
3407 struct device *dev = container_of(kobj, struct device, kobj);
3408 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3409 u64 data;
3410 int ret;
3411
3412 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3413 if (ret != 0)
3414 return ret;
3415
3416 memcpy(&data, buf, size);
3417
3418 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3419 return -EIO;
3420
3421 return size;
3422}
3423
3424
3425static struct bin_attribute bin_attr_crb = {
3426 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3427 .size = 0,
3428 .read = qlcnic_sysfs_read_crb,
3429 .write = qlcnic_sysfs_write_crb,
3430};
3431
3432static struct bin_attribute bin_attr_mem = {
3433 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3434 .size = 0,
3435 .read = qlcnic_sysfs_read_mem,
3436 .write = qlcnic_sysfs_write_mem,
3437};
3438
cea8975e 3439static int
346fe763
RB
3440validate_pm_config(struct qlcnic_adapter *adapter,
3441 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3442{
3443
3444 u8 src_pci_func, s_esw_id, d_esw_id;
3445 u8 dest_pci_func;
3446 int i;
3447
3448 for (i = 0; i < count; i++) {
3449 src_pci_func = pm_cfg[i].pci_func;
3450 dest_pci_func = pm_cfg[i].dest_npar;
3451 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3452 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3453 return QL_STATUS_INVALID_PARAM;
3454
3455 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3456 return QL_STATUS_INVALID_PARAM;
3457
3458 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3459 return QL_STATUS_INVALID_PARAM;
3460
346fe763
RB
3461 s_esw_id = adapter->npars[src_pci_func].phy_port;
3462 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3463
3464 if (s_esw_id != d_esw_id)
3465 return QL_STATUS_INVALID_PARAM;
3466
3467 }
3468 return 0;
3469
3470}
3471
3472static ssize_t
3473qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3474 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3475{
3476 struct device *dev = container_of(kobj, struct device, kobj);
3477 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3478 struct qlcnic_pm_func_cfg *pm_cfg;
3479 u32 id, action, pci_func;
3480 int count, rem, i, ret;
3481
3482 count = size / sizeof(struct qlcnic_pm_func_cfg);
3483 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3484 if (rem)
3485 return QL_STATUS_INVALID_PARAM;
3486
3487 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3488
3489 ret = validate_pm_config(adapter, pm_cfg, count);
3490 if (ret)
3491 return ret;
3492 for (i = 0; i < count; i++) {
3493 pci_func = pm_cfg[i].pci_func;
4e8acb01 3494 action = !!pm_cfg[i].action;
346fe763
RB
3495 id = adapter->npars[pci_func].phy_port;
3496 ret = qlcnic_config_port_mirroring(adapter, id,
3497 action, pci_func);
3498 if (ret)
3499 return ret;
3500 }
3501
3502 for (i = 0; i < count; i++) {
3503 pci_func = pm_cfg[i].pci_func;
3504 id = adapter->npars[pci_func].phy_port;
4e8acb01 3505 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3506 adapter->npars[pci_func].dest_npar = id;
3507 }
3508 return size;
3509}
3510
3511static ssize_t
3512qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3513 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3514{
3515 struct device *dev = container_of(kobj, struct device, kobj);
3516 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3517 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3518 int i;
3519
3520 if (size != sizeof(pm_cfg))
3521 return QL_STATUS_INVALID_PARAM;
3522
3523 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3524 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3525 continue;
3526 pm_cfg[i].action = adapter->npars[i].enable_pm;
3527 pm_cfg[i].dest_npar = 0;
3528 pm_cfg[i].pci_func = i;
3529 }
3530 memcpy(buf, &pm_cfg, size);
3531
3532 return size;
3533}
3534
cea8975e 3535static int
346fe763 3536validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3537 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3538{
7613c87b 3539 u32 op_mode;
346fe763
RB
3540 u8 pci_func;
3541 int i;
7613c87b
RB
3542
3543 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3544
346fe763
RB
3545 for (i = 0; i < count; i++) {
3546 pci_func = esw_cfg[i].pci_func;
3547 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3548 return QL_STATUS_INVALID_PARAM;
3549
4e8acb01
RB
3550 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3551 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3552 return QL_STATUS_INVALID_PARAM;
346fe763 3553
4e8acb01
RB
3554 switch (esw_cfg[i].op_mode) {
3555 case QLCNIC_PORT_DEFAULTS:
7613c87b 3556 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3557 QLCNIC_NON_PRIV_FUNC) {
7613c87b 3558 esw_cfg[i].mac_anti_spoof = 0;
7373373d
RB
3559 esw_cfg[i].mac_override = 1;
3560 }
4e8acb01
RB
3561 break;
3562 case QLCNIC_ADD_VLAN:
346fe763
RB
3563 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3564 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3565 if (!esw_cfg[i].op_type)
3566 return QL_STATUS_INVALID_PARAM;
3567 break;
3568 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3569 if (!esw_cfg[i].op_type)
3570 return QL_STATUS_INVALID_PARAM;
3571 break;
3572 default:
346fe763 3573 return QL_STATUS_INVALID_PARAM;
4e8acb01 3574 }
346fe763 3575 }
346fe763
RB
3576 return 0;
3577}
3578
3579static ssize_t
3580qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3581 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3582{
3583 struct device *dev = container_of(kobj, struct device, kobj);
3584 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3585 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3586 struct qlcnic_npar_info *npar;
346fe763 3587 int count, rem, i, ret;
0325d69b 3588 u8 pci_func, op_mode = 0;
346fe763
RB
3589
3590 count = size / sizeof(struct qlcnic_esw_func_cfg);
3591 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3592 if (rem)
3593 return QL_STATUS_INVALID_PARAM;
3594
3595 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3596 ret = validate_esw_config(adapter, esw_cfg, count);
3597 if (ret)
3598 return ret;
3599
3600 for (i = 0; i < count; i++) {
0325d69b
RB
3601 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3602 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3603 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3604
3605 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3606 continue;
3607
3608 op_mode = esw_cfg[i].op_mode;
3609 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3610 esw_cfg[i].op_mode = op_mode;
3611 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3612
3613 switch (esw_cfg[i].op_mode) {
3614 case QLCNIC_PORT_DEFAULTS:
3615 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3616 break;
8cf61f89
AKS
3617 case QLCNIC_ADD_VLAN:
3618 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3619 break;
3620 case QLCNIC_DEL_VLAN:
3621 esw_cfg[i].vlan_id = 0;
3622 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3623 break;
0325d69b 3624 }
346fe763
RB
3625 }
3626
0325d69b
RB
3627 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3628 goto out;
e9a47700 3629
346fe763
RB
3630 for (i = 0; i < count; i++) {
3631 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3632 npar = &adapter->npars[pci_func];
3633 switch (esw_cfg[i].op_mode) {
3634 case QLCNIC_PORT_DEFAULTS:
3635 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3636 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3637 npar->offload_flags = esw_cfg[i].offload_flags;
3638 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3639 npar->discard_tagged = esw_cfg[i].discard_tagged;
3640 break;
3641 case QLCNIC_ADD_VLAN:
3642 npar->pvid = esw_cfg[i].vlan_id;
3643 break;
3644 case QLCNIC_DEL_VLAN:
3645 npar->pvid = 0;
3646 break;
3647 }
346fe763 3648 }
0325d69b 3649out:
346fe763
RB
3650 return size;
3651}
3652
3653static ssize_t
3654qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3655 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3656{
3657 struct device *dev = container_of(kobj, struct device, kobj);
3658 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3659 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3660 u8 i;
346fe763
RB
3661
3662 if (size != sizeof(esw_cfg))
3663 return QL_STATUS_INVALID_PARAM;
3664
3665 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3666 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3667 continue;
4e8acb01
RB
3668 esw_cfg[i].pci_func = i;
3669 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3670 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3671 }
3672 memcpy(buf, &esw_cfg, size);
3673
3674 return size;
3675}
3676
cea8975e 3677static int
346fe763
RB
3678validate_npar_config(struct qlcnic_adapter *adapter,
3679 struct qlcnic_npar_func_cfg *np_cfg, int count)
3680{
3681 u8 pci_func, i;
3682
3683 for (i = 0; i < count; i++) {
3684 pci_func = np_cfg[i].pci_func;
3685 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3686 return QL_STATUS_INVALID_PARAM;
3687
3688 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3689 return QL_STATUS_INVALID_PARAM;
3690
3691 if (!IS_VALID_BW(np_cfg[i].min_bw)
3692 || !IS_VALID_BW(np_cfg[i].max_bw)
3693 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3694 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3695 return QL_STATUS_INVALID_PARAM;
3696 }
3697 return 0;
3698}
3699
3700static ssize_t
3701qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3702 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3703{
3704 struct device *dev = container_of(kobj, struct device, kobj);
3705 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3706 struct qlcnic_info nic_info;
3707 struct qlcnic_npar_func_cfg *np_cfg;
3708 int i, count, rem, ret;
3709 u8 pci_func;
3710
3711 count = size / sizeof(struct qlcnic_npar_func_cfg);
3712 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3713 if (rem)
3714 return QL_STATUS_INVALID_PARAM;
3715
3716 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3717 ret = validate_npar_config(adapter, np_cfg, count);
3718 if (ret)
3719 return ret;
3720
3721 for (i = 0; i < count ; i++) {
3722 pci_func = np_cfg[i].pci_func;
3723 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3724 if (ret)
3725 return ret;
3726 nic_info.pci_func = pci_func;
3727 nic_info.min_tx_bw = np_cfg[i].min_bw;
3728 nic_info.max_tx_bw = np_cfg[i].max_bw;
3729 ret = qlcnic_set_nic_info(adapter, &nic_info);
3730 if (ret)
3731 return ret;
cea8975e
AC
3732 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3733 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3734 }
3735
3736 return size;
3737
3738}
3739static ssize_t
3740qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3741 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3742{
3743 struct device *dev = container_of(kobj, struct device, kobj);
3744 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3745 struct qlcnic_info nic_info;
3746 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3747 int i, ret;
3748
3749 if (size != sizeof(np_cfg))
3750 return QL_STATUS_INVALID_PARAM;
3751
3752 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3753 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3754 continue;
3755 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3756 if (ret)
3757 return ret;
3758
3759 np_cfg[i].pci_func = i;
3760 np_cfg[i].op_mode = nic_info.op_mode;
3761 np_cfg[i].port_num = nic_info.phys_port;
3762 np_cfg[i].fw_capab = nic_info.capabilities;
3763 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3764 np_cfg[i].max_bw = nic_info.max_tx_bw;
3765 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3766 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3767 }
3768 memcpy(buf, &np_cfg, size);
3769 return size;
3770}
3771
b6021212
AKS
3772static ssize_t
3773qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3774 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3775{
3776 struct device *dev = container_of(kobj, struct device, kobj);
3777 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3778 struct qlcnic_esw_statistics port_stats;
3779 int ret;
3780
3781 if (size != sizeof(struct qlcnic_esw_statistics))
3782 return QL_STATUS_INVALID_PARAM;
3783
3784 if (offset >= QLCNIC_MAX_PCI_FUNC)
3785 return QL_STATUS_INVALID_PARAM;
3786
3787 memset(&port_stats, 0, size);
3788 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3789 &port_stats.rx);
3790 if (ret)
3791 return ret;
3792
3793 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3794 &port_stats.tx);
3795 if (ret)
3796 return ret;
3797
3798 memcpy(buf, &port_stats, size);
3799 return size;
3800}
3801
3802static ssize_t
3803qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3804 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3805{
3806 struct device *dev = container_of(kobj, struct device, kobj);
3807 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3808 struct qlcnic_esw_statistics esw_stats;
3809 int ret;
3810
3811 if (size != sizeof(struct qlcnic_esw_statistics))
3812 return QL_STATUS_INVALID_PARAM;
3813
3814 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3815 return QL_STATUS_INVALID_PARAM;
3816
3817 memset(&esw_stats, 0, size);
3818 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3819 &esw_stats.rx);
3820 if (ret)
3821 return ret;
3822
3823 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3824 &esw_stats.tx);
3825 if (ret)
3826 return ret;
3827
3828 memcpy(buf, &esw_stats, size);
3829 return size;
3830}
3831
3832static ssize_t
3833qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3834 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3835{
3836 struct device *dev = container_of(kobj, struct device, kobj);
3837 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3838 int ret;
3839
3840 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3841 return QL_STATUS_INVALID_PARAM;
3842
3843 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3844 QLCNIC_QUERY_RX_COUNTER);
3845 if (ret)
3846 return ret;
3847
3848 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3849 QLCNIC_QUERY_TX_COUNTER);
3850 if (ret)
3851 return ret;
3852
3853 return size;
3854}
3855
3856static ssize_t
3857qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3858 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3859{
3860
3861 struct device *dev = container_of(kobj, struct device, kobj);
3862 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3863 int ret;
3864
3865 if (offset >= QLCNIC_MAX_PCI_FUNC)
3866 return QL_STATUS_INVALID_PARAM;
3867
3868 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3869 QLCNIC_QUERY_RX_COUNTER);
3870 if (ret)
3871 return ret;
3872
3873 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3874 QLCNIC_QUERY_TX_COUNTER);
3875 if (ret)
3876 return ret;
3877
3878 return size;
3879}
3880
346fe763
RB
3881static ssize_t
3882qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3883 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3884{
3885 struct device *dev = container_of(kobj, struct device, kobj);
3886 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3887 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3888 struct qlcnic_pci_info *pci_info;
346fe763
RB
3889 int i, ret;
3890
3891 if (size != sizeof(pci_cfg))
3892 return QL_STATUS_INVALID_PARAM;
3893
e88db3bd
DC
3894 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3895 if (!pci_info)
3896 return -ENOMEM;
3897
346fe763 3898 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3899 if (ret) {
3900 kfree(pci_info);
346fe763 3901 return ret;
e88db3bd 3902 }
346fe763
RB
3903
3904 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3905 pci_cfg[i].pci_func = pci_info[i].id;
3906 pci_cfg[i].func_type = pci_info[i].type;
3907 pci_cfg[i].port_num = pci_info[i].default_port;
3908 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3909 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3910 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3911 }
3912 memcpy(buf, &pci_cfg, size);
e88db3bd 3913 kfree(pci_info);
346fe763 3914 return size;
346fe763
RB
3915}
3916static struct bin_attribute bin_attr_npar_config = {
3917 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3918 .size = 0,
3919 .read = qlcnic_sysfs_read_npar_config,
3920 .write = qlcnic_sysfs_write_npar_config,
3921};
3922
3923static struct bin_attribute bin_attr_pci_config = {
3924 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3925 .size = 0,
3926 .read = qlcnic_sysfs_read_pci_config,
3927 .write = NULL,
3928};
3929
b6021212
AKS
3930static struct bin_attribute bin_attr_port_stats = {
3931 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3932 .size = 0,
3933 .read = qlcnic_sysfs_get_port_stats,
3934 .write = qlcnic_sysfs_clear_port_stats,
3935};
3936
3937static struct bin_attribute bin_attr_esw_stats = {
3938 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3939 .size = 0,
3940 .read = qlcnic_sysfs_get_esw_stats,
3941 .write = qlcnic_sysfs_clear_esw_stats,
3942};
3943
346fe763
RB
3944static struct bin_attribute bin_attr_esw_config = {
3945 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3946 .size = 0,
3947 .read = qlcnic_sysfs_read_esw_config,
3948 .write = qlcnic_sysfs_write_esw_config,
3949};
3950
3951static struct bin_attribute bin_attr_pm_config = {
3952 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3953 .size = 0,
3954 .read = qlcnic_sysfs_read_pm_config,
3955 .write = qlcnic_sysfs_write_pm_config,
3956};
3957
af19b491
AKS
3958static void
3959qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3960{
3961 struct device *dev = &adapter->pdev->dev;
3962
3963 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3964 if (device_create_file(dev, &dev_attr_bridged_mode))
3965 dev_warn(dev,
3966 "failed to create bridged_mode sysfs entry\n");
3967}
3968
3969static void
3970qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3971{
3972 struct device *dev = &adapter->pdev->dev;
3973
3974 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3975 device_remove_file(dev, &dev_attr_bridged_mode);
3976}
3977
3978static void
3979qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3980{
3981 struct device *dev = &adapter->pdev->dev;
3982
b6021212
AKS
3983 if (device_create_bin_file(dev, &bin_attr_port_stats))
3984 dev_info(dev, "failed to create port stats sysfs entry");
3985
132ff00a
AC
3986 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3987 return;
af19b491
AKS
3988 if (device_create_file(dev, &dev_attr_diag_mode))
3989 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3990 if (device_create_bin_file(dev, &bin_attr_crb))
3991 dev_info(dev, "failed to create crb sysfs entry\n");
3992 if (device_create_bin_file(dev, &bin_attr_mem))
3993 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
3994 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3995 return;
3996 if (device_create_bin_file(dev, &bin_attr_esw_config))
3997 dev_info(dev, "failed to create esw config sysfs entry");
3998 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3999 return;
4000 if (device_create_bin_file(dev, &bin_attr_pci_config))
4001 dev_info(dev, "failed to create pci config sysfs entry");
4002 if (device_create_bin_file(dev, &bin_attr_npar_config))
4003 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4004 if (device_create_bin_file(dev, &bin_attr_pm_config))
4005 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4006 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4007 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4008}
4009
af19b491
AKS
4010static void
4011qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4012{
4013 struct device *dev = &adapter->pdev->dev;
4014
b6021212
AKS
4015 device_remove_bin_file(dev, &bin_attr_port_stats);
4016
132ff00a
AC
4017 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4018 return;
af19b491
AKS
4019 device_remove_file(dev, &dev_attr_diag_mode);
4020 device_remove_bin_file(dev, &bin_attr_crb);
4021 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
4022 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4023 return;
4024 device_remove_bin_file(dev, &bin_attr_esw_config);
4025 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
4026 return;
4027 device_remove_bin_file(dev, &bin_attr_pci_config);
4028 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4029 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4030 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4031}
4032
4033#ifdef CONFIG_INET
4034
4035#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4036
af19b491 4037static void
aec1e845
AKS
4038qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4039 struct net_device *dev, unsigned long event)
af19b491
AKS
4040{
4041 struct in_device *indev;
af19b491 4042
af19b491
AKS
4043 indev = in_dev_get(dev);
4044 if (!indev)
4045 return;
4046
4047 for_ifa(indev) {
4048 switch (event) {
4049 case NETDEV_UP:
4050 qlcnic_config_ipaddr(adapter,
4051 ifa->ifa_address, QLCNIC_IP_UP);
4052 break;
4053 case NETDEV_DOWN:
4054 qlcnic_config_ipaddr(adapter,
4055 ifa->ifa_address, QLCNIC_IP_DOWN);
4056 break;
4057 default:
4058 break;
4059 }
4060 } endfor_ifa(indev);
4061
4062 in_dev_put(indev);
af19b491
AKS
4063}
4064
aec1e845
AKS
4065static void
4066qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4067{
4068 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4069 struct net_device *dev;
4070 u16 vid;
4071
4072 qlcnic_config_indev_addr(adapter, netdev, event);
4073
4074 if (!adapter->vlgrp)
4075 return;
4076
4077 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4078 dev = vlan_group_get_device(adapter->vlgrp, vid);
4079 if (!dev)
4080 continue;
4081
4082 qlcnic_config_indev_addr(adapter, dev, event);
4083 }
4084}
4085
af19b491
AKS
4086static int qlcnic_netdev_event(struct notifier_block *this,
4087 unsigned long event, void *ptr)
4088{
4089 struct qlcnic_adapter *adapter;
4090 struct net_device *dev = (struct net_device *)ptr;
4091
4092recheck:
4093 if (dev == NULL)
4094 goto done;
4095
4096 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4097 dev = vlan_dev_real_dev(dev);
4098 goto recheck;
4099 }
4100
4101 if (!is_qlcnic_netdev(dev))
4102 goto done;
4103
4104 adapter = netdev_priv(dev);
4105
4106 if (!adapter)
4107 goto done;
4108
8a15ad1f 4109 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4110 goto done;
4111
aec1e845 4112 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4113done:
4114 return NOTIFY_DONE;
4115}
4116
4117static int
4118qlcnic_inetaddr_event(struct notifier_block *this,
4119 unsigned long event, void *ptr)
4120{
4121 struct qlcnic_adapter *adapter;
4122 struct net_device *dev;
4123
4124 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4125
4126 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4127
4128recheck:
aec1e845 4129 if (dev == NULL)
af19b491
AKS
4130 goto done;
4131
4132 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4133 dev = vlan_dev_real_dev(dev);
4134 goto recheck;
4135 }
4136
4137 if (!is_qlcnic_netdev(dev))
4138 goto done;
4139
4140 adapter = netdev_priv(dev);
4141
251a84c9 4142 if (!adapter)
af19b491
AKS
4143 goto done;
4144
8a15ad1f 4145 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4146 goto done;
4147
4148 switch (event) {
4149 case NETDEV_UP:
4150 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4151 break;
4152 case NETDEV_DOWN:
4153 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4154 break;
4155 default:
4156 break;
4157 }
4158
4159done:
4160 return NOTIFY_DONE;
4161}
4162
4163static struct notifier_block qlcnic_netdev_cb = {
4164 .notifier_call = qlcnic_netdev_event,
4165};
4166
4167static struct notifier_block qlcnic_inetaddr_cb = {
4168 .notifier_call = qlcnic_inetaddr_event,
4169};
4170#else
4171static void
aec1e845 4172qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4173{ }
4174#endif
451724c8
SC
4175static struct pci_error_handlers qlcnic_err_handler = {
4176 .error_detected = qlcnic_io_error_detected,
4177 .slot_reset = qlcnic_io_slot_reset,
4178 .resume = qlcnic_io_resume,
4179};
af19b491
AKS
4180
4181static struct pci_driver qlcnic_driver = {
4182 .name = qlcnic_driver_name,
4183 .id_table = qlcnic_pci_tbl,
4184 .probe = qlcnic_probe,
4185 .remove = __devexit_p(qlcnic_remove),
4186#ifdef CONFIG_PM
4187 .suspend = qlcnic_suspend,
4188 .resume = qlcnic_resume,
4189#endif
451724c8
SC
4190 .shutdown = qlcnic_shutdown,
4191 .err_handler = &qlcnic_err_handler
4192
af19b491
AKS
4193};
4194
4195static int __init qlcnic_init_module(void)
4196{
0cf3a14c 4197 int ret;
af19b491
AKS
4198
4199 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4200
4201#ifdef CONFIG_INET
4202 register_netdevice_notifier(&qlcnic_netdev_cb);
4203 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4204#endif
4205
0cf3a14c
AKS
4206 ret = pci_register_driver(&qlcnic_driver);
4207 if (ret) {
4208#ifdef CONFIG_INET
4209 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4210 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4211#endif
4212 }
af19b491 4213
0cf3a14c 4214 return ret;
af19b491
AKS
4215}
4216
4217module_init(qlcnic_init_module);
4218
4219static void __exit qlcnic_exit_module(void)
4220{
4221
4222 pci_unregister_driver(&qlcnic_driver);
4223
4224#ifdef CONFIG_INET
4225 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4226 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4227#endif
4228}
4229
4230module_exit(qlcnic_exit_module);