Merge branch 'master' into next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / netxen / netxen_nic_main.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * Copyright (C) 2009 - QLogic Corporation.
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
19 * MA 02111-1307, USA.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called "COPYING".
23 *
24 */
25
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include "netxen_nic_hw.h"
30
31 #include "netxen_nic.h"
32
33 #include <linux/dma-mapping.h>
34 #include <linux/if_vlan.h>
35 #include <net/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/inetdevice.h>
38 #include <linux/sysfs.h>
39 #include <linux/aer.h>
40
41 MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
44 MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
45
46 char netxen_nic_driver_name[] = "netxen_nic";
47 static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
48 NETXEN_NIC_LINUX_VERSIONID;
49
50 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
51
52 /* Default to restricted 1G auto-neg mode */
53 static int wol_port_mode = 5;
54
55 static int use_msi = 1;
56
57 static int use_msi_x = 1;
58
59 static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
60 module_param(auto_fw_reset, int, 0644);
61 MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
62
63 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
66 static int netxen_nic_open(struct net_device *netdev);
67 static int netxen_nic_close(struct net_device *netdev);
68 static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
69 struct net_device *);
70 static void netxen_tx_timeout(struct net_device *netdev);
71 static void netxen_tx_timeout_task(struct work_struct *work);
72 static void netxen_fw_poll_work(struct work_struct *work);
73 static void netxen_schedule_work(struct netxen_adapter *adapter,
74 work_func_t func, int delay);
75 static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
76 static int netxen_nic_poll(struct napi_struct *napi, int budget);
77 #ifdef CONFIG_NET_POLL_CONTROLLER
78 static void netxen_nic_poll_controller(struct net_device *netdev);
79 #endif
80
81 static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
82 static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
83 static void netxen_create_diag_entries(struct netxen_adapter *adapter);
84 static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
85
86 static int nx_dev_request_aer(struct netxen_adapter *adapter);
87 static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
88 static int netxen_can_start_firmware(struct netxen_adapter *adapter);
89
90 static irqreturn_t netxen_intr(int irq, void *data);
91 static irqreturn_t netxen_msi_intr(int irq, void *data);
92 static irqreturn_t netxen_msix_intr(int irq, void *data);
93
94 static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
95 static struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
96 static int netxen_nic_set_mac(struct net_device *netdev, void *p);
97
98 /* PCI Device ID Table */
99 #define ENTRY(device) \
100 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
101 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
102
103 static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
104 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
105 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
106 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
107 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
108 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
109 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
110 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
111 ENTRY(PCI_DEVICE_ID_NX3031),
112 {0,}
113 };
114
115 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
116
117 static uint32_t crb_cmd_producer[4] = {
118 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
119 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
120 };
121
122 void
123 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
124 struct nx_host_tx_ring *tx_ring)
125 {
126 NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
127 }
128
129 static uint32_t crb_cmd_consumer[4] = {
130 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
131 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
132 };
133
134 static inline void
135 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
136 struct nx_host_tx_ring *tx_ring)
137 {
138 NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
139 }
140
141 static uint32_t msi_tgt_status[8] = {
142 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
143 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
144 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
145 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
146 };
147
148 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
149
150 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
151 {
152 struct netxen_adapter *adapter = sds_ring->adapter;
153
154 NXWRIO(adapter, sds_ring->crb_intr_mask, 0);
155 }
156
157 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
158 {
159 struct netxen_adapter *adapter = sds_ring->adapter;
160
161 NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1);
162
163 if (!NETXEN_IS_MSI_FAMILY(adapter))
164 NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff);
165 }
166
167 static int
168 netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
169 {
170 int size = sizeof(struct nx_host_sds_ring) * count;
171
172 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
173
174 return recv_ctx->sds_rings == NULL;
175 }
176
177 static void
178 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
179 {
180 if (recv_ctx->sds_rings != NULL)
181 kfree(recv_ctx->sds_rings);
182
183 recv_ctx->sds_rings = NULL;
184 }
185
186 static int
187 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
188 {
189 int ring;
190 struct nx_host_sds_ring *sds_ring;
191 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
192
193 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
194 return -ENOMEM;
195
196 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
197 sds_ring = &recv_ctx->sds_rings[ring];
198 netif_napi_add(netdev, &sds_ring->napi,
199 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
200 }
201
202 return 0;
203 }
204
205 static void
206 netxen_napi_del(struct netxen_adapter *adapter)
207 {
208 int ring;
209 struct nx_host_sds_ring *sds_ring;
210 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
211
212 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
213 sds_ring = &recv_ctx->sds_rings[ring];
214 netif_napi_del(&sds_ring->napi);
215 }
216
217 netxen_free_sds_rings(&adapter->recv_ctx);
218 }
219
220 static void
221 netxen_napi_enable(struct netxen_adapter *adapter)
222 {
223 int ring;
224 struct nx_host_sds_ring *sds_ring;
225 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
226
227 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
228 sds_ring = &recv_ctx->sds_rings[ring];
229 napi_enable(&sds_ring->napi);
230 netxen_nic_enable_int(sds_ring);
231 }
232 }
233
234 static void
235 netxen_napi_disable(struct netxen_adapter *adapter)
236 {
237 int ring;
238 struct nx_host_sds_ring *sds_ring;
239 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
240
241 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
242 sds_ring = &recv_ctx->sds_rings[ring];
243 netxen_nic_disable_int(sds_ring);
244 napi_synchronize(&sds_ring->napi);
245 napi_disable(&sds_ring->napi);
246 }
247 }
248
249 static int nx_set_dma_mask(struct netxen_adapter *adapter)
250 {
251 struct pci_dev *pdev = adapter->pdev;
252 uint64_t mask, cmask;
253
254 adapter->pci_using_dac = 0;
255
256 mask = DMA_BIT_MASK(32);
257 cmask = DMA_BIT_MASK(32);
258
259 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
260 #ifndef CONFIG_IA64
261 mask = DMA_BIT_MASK(35);
262 #endif
263 } else {
264 mask = DMA_BIT_MASK(39);
265 cmask = mask;
266 }
267
268 if (pci_set_dma_mask(pdev, mask) == 0 &&
269 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
270 adapter->pci_using_dac = 1;
271 return 0;
272 }
273
274 return -EIO;
275 }
276
277 /* Update addressable range if firmware supports it */
278 static int
279 nx_update_dma_mask(struct netxen_adapter *adapter)
280 {
281 int change, shift, err;
282 uint64_t mask, old_mask, old_cmask;
283 struct pci_dev *pdev = adapter->pdev;
284
285 change = 0;
286
287 shift = NXRD32(adapter, CRB_DMA_SHIFT);
288 if (shift > 32)
289 return 0;
290
291 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
292 change = 1;
293 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
294 change = 1;
295
296 if (change) {
297 old_mask = pdev->dma_mask;
298 old_cmask = pdev->dev.coherent_dma_mask;
299
300 mask = DMA_BIT_MASK(32+shift);
301
302 err = pci_set_dma_mask(pdev, mask);
303 if (err)
304 goto err_out;
305
306 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
307
308 err = pci_set_consistent_dma_mask(pdev, mask);
309 if (err)
310 goto err_out;
311 }
312 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
313 }
314
315 return 0;
316
317 err_out:
318 pci_set_dma_mask(pdev, old_mask);
319 pci_set_consistent_dma_mask(pdev, old_cmask);
320 return err;
321 }
322
323 static int
324 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
325 {
326 u32 val, timeout;
327
328 if (first_boot == 0x55555555) {
329 /* This is the first boot after power up */
330 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
331
332 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
333 return 0;
334
335 /* PCI bus master workaround */
336 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
337 if (!(first_boot & 0x4)) {
338 first_boot |= 0x4;
339 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
340 NXRD32(adapter, NETXEN_PCIE_REG(0x4));
341 }
342
343 /* This is the first boot after power up */
344 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
345 if (first_boot != 0x80000f) {
346 /* clear the register for future unloads/loads */
347 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
348 return -EIO;
349 }
350
351 /* Start P2 boot loader */
352 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
353 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
354 timeout = 0;
355 do {
356 msleep(1);
357 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
358
359 if (++timeout > 5000)
360 return -EIO;
361
362 } while (val == NETXEN_BDINFO_MAGIC);
363 }
364 return 0;
365 }
366
367 static void netxen_set_port_mode(struct netxen_adapter *adapter)
368 {
369 u32 val, data;
370
371 val = adapter->ahw.board_type;
372 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
373 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
374 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
375 data = NETXEN_PORT_MODE_802_3_AP;
376 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
377 } else if (port_mode == NETXEN_PORT_MODE_XG) {
378 data = NETXEN_PORT_MODE_XG;
379 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
380 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
381 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
382 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
383 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
384 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
385 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
386 } else {
387 data = NETXEN_PORT_MODE_AUTO_NEG;
388 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
389 }
390
391 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
392 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
393 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
394 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
395 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
396 }
397 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
398 }
399 }
400
401 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
402 {
403 u32 control;
404 int pos;
405
406 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
407 if (pos) {
408 pci_read_config_dword(pdev, pos, &control);
409 if (enable)
410 control |= PCI_MSIX_FLAGS_ENABLE;
411 else
412 control = 0;
413 pci_write_config_dword(pdev, pos, control);
414 }
415 }
416
417 static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
418 {
419 int i;
420
421 for (i = 0; i < count; i++)
422 adapter->msix_entries[i].entry = i;
423 }
424
425 static int
426 netxen_read_mac_addr(struct netxen_adapter *adapter)
427 {
428 int i;
429 unsigned char *p;
430 u64 mac_addr;
431 struct net_device *netdev = adapter->netdev;
432 struct pci_dev *pdev = adapter->pdev;
433
434 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
435 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
436 return -EIO;
437 } else {
438 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
439 return -EIO;
440 }
441
442 p = (unsigned char *)&mac_addr;
443 for (i = 0; i < 6; i++)
444 netdev->dev_addr[i] = *(p + 5 - i);
445
446 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
447 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
448
449 /* set station address */
450
451 if (!is_valid_ether_addr(netdev->perm_addr))
452 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
453
454 return 0;
455 }
456
457 static int netxen_nic_set_mac(struct net_device *netdev, void *p)
458 {
459 struct netxen_adapter *adapter = netdev_priv(netdev);
460 struct sockaddr *addr = p;
461
462 if (!is_valid_ether_addr(addr->sa_data))
463 return -EINVAL;
464
465 if (netif_running(netdev)) {
466 netif_device_detach(netdev);
467 netxen_napi_disable(adapter);
468 }
469
470 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
471 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
472 adapter->macaddr_set(adapter, addr->sa_data);
473
474 if (netif_running(netdev)) {
475 netif_device_attach(netdev);
476 netxen_napi_enable(adapter);
477 }
478 return 0;
479 }
480
481 static void netxen_set_multicast_list(struct net_device *dev)
482 {
483 struct netxen_adapter *adapter = netdev_priv(dev);
484
485 adapter->set_multi(dev);
486 }
487
488 static const struct net_device_ops netxen_netdev_ops = {
489 .ndo_open = netxen_nic_open,
490 .ndo_stop = netxen_nic_close,
491 .ndo_start_xmit = netxen_nic_xmit_frame,
492 .ndo_get_stats = netxen_nic_get_stats,
493 .ndo_validate_addr = eth_validate_addr,
494 .ndo_set_multicast_list = netxen_set_multicast_list,
495 .ndo_set_mac_address = netxen_nic_set_mac,
496 .ndo_change_mtu = netxen_nic_change_mtu,
497 .ndo_tx_timeout = netxen_tx_timeout,
498 #ifdef CONFIG_NET_POLL_CONTROLLER
499 .ndo_poll_controller = netxen_nic_poll_controller,
500 #endif
501 };
502
503 static void
504 netxen_setup_intr(struct netxen_adapter *adapter)
505 {
506 struct netxen_legacy_intr_set *legacy_intrp;
507 struct pci_dev *pdev = adapter->pdev;
508 int err, num_msix;
509
510 if (adapter->rss_supported) {
511 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
512 MSIX_ENTRIES_PER_ADAPTER : 2;
513 } else
514 num_msix = 1;
515
516 adapter->max_sds_rings = 1;
517
518 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
519
520 if (adapter->ahw.revision_id >= NX_P3_B0)
521 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
522 else
523 legacy_intrp = &legacy_intr[0];
524
525 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
526 adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
527 legacy_intrp->tgt_status_reg);
528 adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
529 legacy_intrp->tgt_mask_reg);
530 adapter->pci_int_reg = netxen_get_ioaddr(adapter,
531 legacy_intrp->pci_int_reg);
532 adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
533
534 if (adapter->ahw.revision_id >= NX_P3_B1)
535 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
536 ISR_INT_STATE_REG);
537 else
538 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
539 CRB_INT_VECTOR);
540
541 netxen_set_msix_bit(pdev, 0);
542
543 if (adapter->msix_supported) {
544
545 netxen_init_msix_entries(adapter, num_msix);
546 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
547 if (err == 0) {
548 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
549 netxen_set_msix_bit(pdev, 1);
550
551 if (adapter->rss_supported)
552 adapter->max_sds_rings = num_msix;
553
554 dev_info(&pdev->dev, "using msi-x interrupts\n");
555 return;
556 }
557
558 if (err > 0)
559 pci_disable_msix(pdev);
560
561 /* fall through for msi */
562 }
563
564 if (use_msi && !pci_enable_msi(pdev)) {
565 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
566 adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
567 msi_tgt_status[adapter->ahw.pci_func]);
568 dev_info(&pdev->dev, "using msi interrupts\n");
569 adapter->msix_entries[0].vector = pdev->irq;
570 return;
571 }
572
573 dev_info(&pdev->dev, "using legacy interrupts\n");
574 adapter->msix_entries[0].vector = pdev->irq;
575 }
576
577 static void
578 netxen_teardown_intr(struct netxen_adapter *adapter)
579 {
580 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
581 pci_disable_msix(adapter->pdev);
582 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
583 pci_disable_msi(adapter->pdev);
584 }
585
586 static void
587 netxen_cleanup_pci_map(struct netxen_adapter *adapter)
588 {
589 if (adapter->ahw.db_base != NULL)
590 iounmap(adapter->ahw.db_base);
591 if (adapter->ahw.pci_base0 != NULL)
592 iounmap(adapter->ahw.pci_base0);
593 if (adapter->ahw.pci_base1 != NULL)
594 iounmap(adapter->ahw.pci_base1);
595 if (adapter->ahw.pci_base2 != NULL)
596 iounmap(adapter->ahw.pci_base2);
597 }
598
599 static int
600 netxen_setup_pci_map(struct netxen_adapter *adapter)
601 {
602 void __iomem *db_ptr = NULL;
603
604 resource_size_t mem_base, db_base;
605 unsigned long mem_len, db_len = 0;
606
607 struct pci_dev *pdev = adapter->pdev;
608 int pci_func = adapter->ahw.pci_func;
609 struct netxen_hardware_context *ahw = &adapter->ahw;
610
611 int err = 0;
612
613 /*
614 * Set the CRB window to invalid. If any register in window 0 is
615 * accessed it should set the window to 0 and then reset it to 1.
616 */
617 adapter->ahw.crb_win = -1;
618 adapter->ahw.ocm_win = -1;
619
620 /* remap phys address */
621 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
622 mem_len = pci_resource_len(pdev, 0);
623
624 /* 128 Meg of memory */
625 if (mem_len == NETXEN_PCI_128MB_SIZE) {
626
627 ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
628 ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
629 SECOND_PAGE_GROUP_SIZE);
630 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
631 THIRD_PAGE_GROUP_SIZE);
632 if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
633 ahw->pci_base2 == NULL) {
634 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
635 err = -EIO;
636 goto err_out;
637 }
638
639 ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
640
641 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
642
643 ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
644 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
645 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
646 if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
647 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
648 err = -EIO;
649 goto err_out;
650 }
651
652 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
653
654 ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
655 if (ahw->pci_base0 == NULL) {
656 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
657 return -EIO;
658 }
659 ahw->pci_len0 = mem_len;
660 } else {
661 return -EIO;
662 }
663
664 netxen_setup_hwops(adapter);
665
666 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
667
668 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
669 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
670 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
671
672 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
673 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
674 NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
675 }
676
677 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
678 goto skip_doorbell;
679
680 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
681 db_len = pci_resource_len(pdev, 4);
682
683 if (db_len == 0) {
684 printk(KERN_ERR "%s: doorbell is disabled\n",
685 netxen_nic_driver_name);
686 err = -EIO;
687 goto err_out;
688 }
689
690 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
691 if (!db_ptr) {
692 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
693 netxen_nic_driver_name);
694 err = -EIO;
695 goto err_out;
696 }
697
698 skip_doorbell:
699 adapter->ahw.db_base = db_ptr;
700 adapter->ahw.db_len = db_len;
701 return 0;
702
703 err_out:
704 netxen_cleanup_pci_map(adapter);
705 return err;
706 }
707
708 static void
709 netxen_check_options(struct netxen_adapter *adapter)
710 {
711 u32 fw_major, fw_minor, fw_build;
712 char brd_name[NETXEN_MAX_SHORT_NAME];
713 char serial_num[32];
714 int i, offset, val;
715 int *ptr32;
716 struct pci_dev *pdev = adapter->pdev;
717
718 adapter->driver_mismatch = 0;
719
720 ptr32 = (int *)&serial_num;
721 offset = NX_FW_SERIAL_NUM_OFFSET;
722 for (i = 0; i < 8; i++) {
723 if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
724 dev_err(&pdev->dev, "error reading board info\n");
725 adapter->driver_mismatch = 1;
726 return;
727 }
728 ptr32[i] = cpu_to_le32(val);
729 offset += sizeof(u32);
730 }
731
732 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
733 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
734 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
735
736 adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
737
738 if (adapter->portnum == 0) {
739 get_brd_name_by_type(adapter->ahw.board_type, brd_name);
740
741 pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
742 module_name(THIS_MODULE),
743 brd_name, serial_num, adapter->ahw.revision_id);
744 }
745
746 if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) {
747 adapter->driver_mismatch = 1;
748 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
749 fw_major, fw_minor, fw_build);
750 return;
751 }
752
753 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
754 i = NXRD32(adapter, NETXEN_SRE_MISC);
755 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
756 }
757
758 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
759 fw_major, fw_minor, fw_build,
760 adapter->ahw.cut_through ? "cut-through" : "legacy");
761
762 if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
763 adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
764
765 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
766 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
767 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
768 } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
769 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
770 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
771 }
772
773 adapter->msix_supported = 0;
774 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
775 adapter->msix_supported = !!use_msi_x;
776 adapter->rss_supported = !!use_msi_x;
777 } else {
778 u32 flashed_ver = 0;
779 netxen_rom_fast_read(adapter,
780 NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
781 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
782
783 if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
784 switch (adapter->ahw.board_type) {
785 case NETXEN_BRDTYPE_P2_SB31_10G:
786 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
787 adapter->msix_supported = !!use_msi_x;
788 adapter->rss_supported = !!use_msi_x;
789 break;
790 default:
791 break;
792 }
793 }
794 }
795
796 adapter->num_txd = MAX_CMD_DESCRIPTORS;
797
798 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
799 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
800 adapter->max_rds_rings = 3;
801 } else {
802 adapter->num_lro_rxd = 0;
803 adapter->max_rds_rings = 2;
804 }
805 }
806
807 static int
808 netxen_start_firmware(struct netxen_adapter *adapter)
809 {
810 int val, err, first_boot;
811 struct pci_dev *pdev = adapter->pdev;
812
813 /* required for NX2031 dummy dma */
814 err = nx_set_dma_mask(adapter);
815 if (err)
816 return err;
817
818 if (!netxen_can_start_firmware(adapter))
819 goto wait_init;
820
821 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
822
823 err = netxen_check_hw_init(adapter, first_boot);
824 if (err) {
825 dev_err(&pdev->dev, "error in init HW init sequence\n");
826 return err;
827 }
828
829 netxen_request_firmware(adapter);
830
831 err = netxen_need_fw_reset(adapter);
832 if (err < 0)
833 goto err_out;
834 if (err == 0)
835 goto wait_init;
836
837 if (first_boot != 0x55555555) {
838 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
839 netxen_pinit_from_rom(adapter);
840 msleep(1);
841 }
842
843 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
844 NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0);
845 NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0);
846
847 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
848 netxen_set_port_mode(adapter);
849
850 err = netxen_load_firmware(adapter);
851 if (err)
852 goto err_out;
853
854 netxen_release_firmware(adapter);
855
856 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
857
858 /* Initialize multicast addr pool owners */
859 val = 0x7654;
860 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
861 val |= 0x0f000000;
862 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
863
864 }
865
866 err = netxen_init_dummy_dma(adapter);
867 if (err)
868 goto err_out;
869
870 /*
871 * Tell the hardware our version number.
872 */
873 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
874 | ((_NETXEN_NIC_LINUX_MINOR << 8))
875 | (_NETXEN_NIC_LINUX_SUBVERSION);
876 NXWR32(adapter, CRB_DRIVER_VERSION, val);
877
878 wait_init:
879 /* Handshake with the card before we register the devices. */
880 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
881 if (err) {
882 netxen_free_dummy_dma(adapter);
883 goto err_out;
884 }
885
886 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY);
887
888 nx_update_dma_mask(adapter);
889
890 netxen_check_options(adapter);
891
892 adapter->need_fw_reset = 0;
893
894 /* fall through and release firmware */
895
896 err_out:
897 netxen_release_firmware(adapter);
898 return err;
899 }
900
901 static int
902 netxen_nic_request_irq(struct netxen_adapter *adapter)
903 {
904 irq_handler_t handler;
905 struct nx_host_sds_ring *sds_ring;
906 int err, ring;
907
908 unsigned long flags = IRQF_SAMPLE_RANDOM;
909 struct net_device *netdev = adapter->netdev;
910 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
911
912 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
913 handler = netxen_msix_intr;
914 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
915 handler = netxen_msi_intr;
916 else {
917 flags |= IRQF_SHARED;
918 handler = netxen_intr;
919 }
920 adapter->irq = netdev->irq;
921
922 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
923 sds_ring = &recv_ctx->sds_rings[ring];
924 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
925 err = request_irq(sds_ring->irq, handler,
926 flags, sds_ring->name, sds_ring);
927 if (err)
928 return err;
929 }
930
931 return 0;
932 }
933
934 static void
935 netxen_nic_free_irq(struct netxen_adapter *adapter)
936 {
937 int ring;
938 struct nx_host_sds_ring *sds_ring;
939
940 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
941
942 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
943 sds_ring = &recv_ctx->sds_rings[ring];
944 free_irq(sds_ring->irq, sds_ring);
945 }
946 }
947
948 static void
949 netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
950 {
951 adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
952 adapter->coal.normal.data.rx_time_us =
953 NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
954 adapter->coal.normal.data.rx_packets =
955 NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
956 adapter->coal.normal.data.tx_time_us =
957 NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
958 adapter->coal.normal.data.tx_packets =
959 NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
960 }
961
962 /* with rtnl_lock */
963 static int
964 __netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
965 {
966 int err;
967
968 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
969 return -EIO;
970
971 err = adapter->init_port(adapter, adapter->physical_port);
972 if (err) {
973 printk(KERN_ERR "%s: Failed to initialize port %d\n",
974 netxen_nic_driver_name, adapter->portnum);
975 return err;
976 }
977 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
978 adapter->macaddr_set(adapter, adapter->mac_addr);
979
980 adapter->set_multi(netdev);
981 adapter->set_mtu(adapter, netdev->mtu);
982
983 adapter->ahw.linkup = 0;
984
985 if (adapter->max_sds_rings > 1)
986 netxen_config_rss(adapter, 1);
987
988 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
989 netxen_config_intr_coalesce(adapter);
990
991 if (netdev->features & NETIF_F_LRO)
992 netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED);
993
994 netxen_napi_enable(adapter);
995
996 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
997 netxen_linkevent_request(adapter, 1);
998 else
999 netxen_nic_set_link_parameters(adapter);
1000
1001 set_bit(__NX_DEV_UP, &adapter->state);
1002 return 0;
1003 }
1004
1005 /* Usage: During resume and firmware recovery module.*/
1006
1007 static inline int
1008 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
1009 {
1010 int err = 0;
1011
1012 rtnl_lock();
1013 if (netif_running(netdev))
1014 err = __netxen_nic_up(adapter, netdev);
1015 rtnl_unlock();
1016
1017 return err;
1018 }
1019
1020 /* with rtnl_lock */
1021 static void
1022 __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1023 {
1024 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
1025 return;
1026
1027 if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state))
1028 return;
1029
1030 smp_mb();
1031 spin_lock(&adapter->tx_clean_lock);
1032 netif_carrier_off(netdev);
1033 netif_tx_disable(netdev);
1034
1035 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
1036 netxen_linkevent_request(adapter, 0);
1037
1038 if (adapter->stop_port)
1039 adapter->stop_port(adapter);
1040
1041 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1042 netxen_p3_free_mac_list(adapter);
1043
1044 adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE);
1045
1046 netxen_napi_disable(adapter);
1047
1048 netxen_release_tx_buffers(adapter);
1049 spin_unlock(&adapter->tx_clean_lock);
1050 }
1051
1052 /* Usage: During suspend and firmware recovery module */
1053
1054 static inline void
1055 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1056 {
1057 rtnl_lock();
1058 if (netif_running(netdev))
1059 __netxen_nic_down(adapter, netdev);
1060 rtnl_unlock();
1061
1062 }
1063
1064 static int
1065 netxen_nic_attach(struct netxen_adapter *adapter)
1066 {
1067 struct net_device *netdev = adapter->netdev;
1068 struct pci_dev *pdev = adapter->pdev;
1069 int err, ring;
1070 struct nx_host_rds_ring *rds_ring;
1071 struct nx_host_tx_ring *tx_ring;
1072
1073 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
1074 return 0;
1075
1076 err = netxen_init_firmware(adapter);
1077 if (err)
1078 return err;
1079
1080 err = netxen_napi_add(adapter, netdev);
1081 if (err)
1082 return err;
1083
1084 err = netxen_alloc_sw_resources(adapter);
1085 if (err) {
1086 printk(KERN_ERR "%s: Error in setting sw resources\n",
1087 netdev->name);
1088 return err;
1089 }
1090
1091 err = netxen_alloc_hw_resources(adapter);
1092 if (err) {
1093 printk(KERN_ERR "%s: Error in setting hw resources\n",
1094 netdev->name);
1095 goto err_out_free_sw;
1096 }
1097
1098 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1099 tx_ring = adapter->tx_ring;
1100 tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
1101 crb_cmd_producer[adapter->portnum]);
1102 tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter,
1103 crb_cmd_consumer[adapter->portnum]);
1104
1105 tx_ring->producer = 0;
1106 tx_ring->sw_consumer = 0;
1107
1108 netxen_nic_update_cmd_producer(adapter, tx_ring);
1109 netxen_nic_update_cmd_consumer(adapter, tx_ring);
1110 }
1111
1112 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1113 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1114 netxen_post_rx_buffers(adapter, ring, rds_ring);
1115 }
1116
1117 err = netxen_nic_request_irq(adapter);
1118 if (err) {
1119 dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
1120 netdev->name);
1121 goto err_out_free_rxbuf;
1122 }
1123
1124 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1125 netxen_nic_init_coalesce_defaults(adapter);
1126
1127 netxen_create_sysfs_entries(adapter);
1128
1129 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
1130 return 0;
1131
1132 err_out_free_rxbuf:
1133 netxen_release_rx_buffers(adapter);
1134 netxen_free_hw_resources(adapter);
1135 err_out_free_sw:
1136 netxen_free_sw_resources(adapter);
1137 return err;
1138 }
1139
1140 static void
1141 netxen_nic_detach(struct netxen_adapter *adapter)
1142 {
1143 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
1144 return;
1145
1146 netxen_remove_sysfs_entries(adapter);
1147
1148 netxen_free_hw_resources(adapter);
1149 netxen_release_rx_buffers(adapter);
1150 netxen_nic_free_irq(adapter);
1151 netxen_napi_del(adapter);
1152 netxen_free_sw_resources(adapter);
1153
1154 adapter->is_up = 0;
1155 }
1156
1157 int
1158 netxen_nic_reset_context(struct netxen_adapter *adapter)
1159 {
1160 int err = 0;
1161 struct net_device *netdev = adapter->netdev;
1162
1163 if (test_and_set_bit(__NX_RESETTING, &adapter->state))
1164 return -EBUSY;
1165
1166 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
1167
1168 netif_device_detach(netdev);
1169
1170 if (netif_running(netdev))
1171 __netxen_nic_down(adapter, netdev);
1172
1173 netxen_nic_detach(adapter);
1174
1175 if (netif_running(netdev)) {
1176 err = netxen_nic_attach(adapter);
1177 if (!err)
1178 err = __netxen_nic_up(adapter, netdev);
1179
1180 if (err)
1181 goto done;
1182 }
1183
1184 netif_device_attach(netdev);
1185 }
1186
1187 done:
1188 clear_bit(__NX_RESETTING, &adapter->state);
1189 return err;
1190 }
1191
1192 static int
1193 netxen_setup_netdev(struct netxen_adapter *adapter,
1194 struct net_device *netdev)
1195 {
1196 int err = 0;
1197 struct pci_dev *pdev = adapter->pdev;
1198
1199 adapter->rx_csum = 1;
1200 adapter->mc_enabled = 0;
1201 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1202 adapter->max_mc_count = 38;
1203 else
1204 adapter->max_mc_count = 16;
1205
1206 netdev->netdev_ops = &netxen_netdev_ops;
1207 netdev->watchdog_timeo = 5*HZ;
1208
1209 netxen_nic_change_mtu(netdev, netdev->mtu);
1210
1211 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
1212
1213 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1214 netdev->features |= (NETIF_F_GRO);
1215 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1216
1217 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1218 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1219 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1220 }
1221
1222 if (adapter->pci_using_dac) {
1223 netdev->features |= NETIF_F_HIGHDMA;
1224 netdev->vlan_features |= NETIF_F_HIGHDMA;
1225 }
1226
1227 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
1228 netdev->features |= (NETIF_F_HW_VLAN_TX);
1229
1230 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
1231 netdev->features |= NETIF_F_LRO;
1232
1233 netdev->irq = adapter->msix_entries[0].vector;
1234
1235 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
1236
1237 if (netxen_read_mac_addr(adapter))
1238 dev_warn(&pdev->dev, "failed to read mac addr\n");
1239
1240 netif_carrier_off(netdev);
1241
1242 err = register_netdev(netdev);
1243 if (err) {
1244 dev_err(&pdev->dev, "failed to register net device\n");
1245 return err;
1246 }
1247
1248 return 0;
1249 }
1250
1251 #ifdef CONFIG_PCIEAER
1252 static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
1253 {
1254 struct pci_dev *pdev = adapter->pdev;
1255 struct pci_dev *root = pdev->bus->self;
1256 u32 aer_pos;
1257
1258 if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
1259 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
1260 return;
1261
1262 if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT)
1263 return;
1264
1265 aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
1266 if (!aer_pos)
1267 return;
1268
1269 pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff);
1270 }
1271 #endif
1272
1273 static int __devinit
1274 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1275 {
1276 struct net_device *netdev = NULL;
1277 struct netxen_adapter *adapter = NULL;
1278 int i = 0, err;
1279 int pci_func_id = PCI_FUNC(pdev->devfn);
1280 uint8_t revision_id;
1281 u32 val;
1282
1283 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
1284 pr_warning("%s: chip revisions between 0x%x-0x%x "
1285 "will not be enabled.\n",
1286 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
1287 return -ENODEV;
1288 }
1289
1290 if ((err = pci_enable_device(pdev)))
1291 return err;
1292
1293 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1294 err = -ENODEV;
1295 goto err_out_disable_pdev;
1296 }
1297
1298 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
1299 goto err_out_disable_pdev;
1300
1301 if (NX_IS_REVISION_P3(pdev->revision))
1302 pci_enable_pcie_error_reporting(pdev);
1303
1304 pci_set_master(pdev);
1305
1306 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
1307 if(!netdev) {
1308 dev_err(&pdev->dev, "failed to allocate net_device\n");
1309 err = -ENOMEM;
1310 goto err_out_free_res;
1311 }
1312
1313 SET_NETDEV_DEV(netdev, &pdev->dev);
1314
1315 adapter = netdev_priv(netdev);
1316 adapter->netdev = netdev;
1317 adapter->pdev = pdev;
1318 adapter->ahw.pci_func = pci_func_id;
1319
1320 revision_id = pdev->revision;
1321 adapter->ahw.revision_id = revision_id;
1322
1323 rwlock_init(&adapter->ahw.crb_lock);
1324 spin_lock_init(&adapter->ahw.mem_lock);
1325
1326 spin_lock_init(&adapter->tx_clean_lock);
1327 INIT_LIST_HEAD(&adapter->mac_list);
1328
1329 err = netxen_setup_pci_map(adapter);
1330 if (err)
1331 goto err_out_free_netdev;
1332
1333 /* This will be reset for mezz cards */
1334 adapter->portnum = pci_func_id;
1335
1336 err = netxen_nic_get_board_info(adapter);
1337 if (err) {
1338 dev_err(&pdev->dev, "Error getting board config info.\n");
1339 goto err_out_iounmap;
1340 }
1341
1342 #ifdef CONFIG_PCIEAER
1343 netxen_mask_aer_correctable(adapter);
1344 #endif
1345
1346 /* Mezz cards have PCI function 0,2,3 enabled */
1347 switch (adapter->ahw.board_type) {
1348 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
1349 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
1350 if (pci_func_id >= 2)
1351 adapter->portnum = pci_func_id - 2;
1352 break;
1353 default:
1354 break;
1355 }
1356
1357 if (adapter->portnum == 0) {
1358 val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
1359 if (val != 0xffffffff && val != 0) {
1360 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
1361 adapter->need_fw_reset = 1;
1362 }
1363 }
1364
1365 err = netxen_start_firmware(adapter);
1366 if (err)
1367 goto err_out_decr_ref;
1368
1369 /*
1370 * See if the firmware gave us a virtual-physical port mapping.
1371 */
1372 adapter->physical_port = adapter->portnum;
1373 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1374 i = NXRD32(adapter, CRB_V2P(adapter->portnum));
1375 if (i != 0x55555555)
1376 adapter->physical_port = i;
1377 }
1378
1379 netxen_nic_clear_stats(adapter);
1380
1381 netxen_setup_intr(adapter);
1382
1383 err = netxen_setup_netdev(adapter, netdev);
1384 if (err)
1385 goto err_out_disable_msi;
1386
1387 pci_set_drvdata(pdev, adapter);
1388
1389 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
1390
1391 switch (adapter->ahw.port_type) {
1392 case NETXEN_NIC_GBE:
1393 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1394 adapter->netdev->name);
1395 break;
1396 case NETXEN_NIC_XGBE:
1397 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1398 adapter->netdev->name);
1399 break;
1400 }
1401
1402 netxen_create_diag_entries(adapter);
1403
1404 return 0;
1405
1406 err_out_disable_msi:
1407 netxen_teardown_intr(adapter);
1408
1409 netxen_free_dummy_dma(adapter);
1410
1411 err_out_decr_ref:
1412 nx_decr_dev_ref_cnt(adapter);
1413
1414 err_out_iounmap:
1415 netxen_cleanup_pci_map(adapter);
1416
1417 err_out_free_netdev:
1418 free_netdev(netdev);
1419
1420 err_out_free_res:
1421 pci_release_regions(pdev);
1422
1423 err_out_disable_pdev:
1424 pci_set_drvdata(pdev, NULL);
1425 pci_disable_device(pdev);
1426 return err;
1427 }
1428
1429 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1430 {
1431 struct netxen_adapter *adapter;
1432 struct net_device *netdev;
1433
1434 adapter = pci_get_drvdata(pdev);
1435 if (adapter == NULL)
1436 return;
1437
1438 netdev = adapter->netdev;
1439
1440 netxen_cancel_fw_work(adapter);
1441
1442 unregister_netdev(netdev);
1443
1444 cancel_work_sync(&adapter->tx_timeout_task);
1445
1446 netxen_nic_detach(adapter);
1447
1448 nx_decr_dev_ref_cnt(adapter);
1449
1450 if (adapter->portnum == 0)
1451 netxen_free_dummy_dma(adapter);
1452
1453 clear_bit(__NX_RESETTING, &adapter->state);
1454
1455 netxen_teardown_intr(adapter);
1456
1457 netxen_remove_diag_entries(adapter);
1458
1459 netxen_cleanup_pci_map(adapter);
1460
1461 netxen_release_firmware(adapter);
1462
1463 if (NX_IS_REVISION_P3(pdev->revision))
1464 pci_disable_pcie_error_reporting(pdev);
1465
1466 pci_release_regions(pdev);
1467 pci_disable_device(pdev);
1468 pci_set_drvdata(pdev, NULL);
1469
1470 free_netdev(netdev);
1471 }
1472
1473 static void netxen_nic_detach_func(struct netxen_adapter *adapter)
1474 {
1475 struct net_device *netdev = adapter->netdev;
1476
1477 netif_device_detach(netdev);
1478
1479 netxen_cancel_fw_work(adapter);
1480
1481 if (netif_running(netdev))
1482 netxen_nic_down(adapter, netdev);
1483
1484 cancel_work_sync(&adapter->tx_timeout_task);
1485
1486 netxen_nic_detach(adapter);
1487
1488 if (adapter->portnum == 0)
1489 netxen_free_dummy_dma(adapter);
1490
1491 nx_decr_dev_ref_cnt(adapter);
1492
1493 clear_bit(__NX_RESETTING, &adapter->state);
1494 }
1495
1496 static int netxen_nic_attach_func(struct pci_dev *pdev)
1497 {
1498 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1499 struct net_device *netdev = adapter->netdev;
1500 int err;
1501
1502 err = pci_enable_device(pdev);
1503 if (err)
1504 return err;
1505
1506 pci_set_power_state(pdev, PCI_D0);
1507 pci_set_master(pdev);
1508 pci_restore_state(pdev);
1509
1510 adapter->ahw.crb_win = -1;
1511 adapter->ahw.ocm_win = -1;
1512
1513 err = netxen_start_firmware(adapter);
1514 if (err) {
1515 dev_err(&pdev->dev, "failed to start firmware\n");
1516 return err;
1517 }
1518
1519 if (netif_running(netdev)) {
1520 err = netxen_nic_attach(adapter);
1521 if (err)
1522 goto err_out;
1523
1524 err = netxen_nic_up(adapter, netdev);
1525 if (err)
1526 goto err_out_detach;
1527
1528 netxen_config_indev_addr(netdev, NETDEV_UP);
1529 }
1530
1531 netif_device_attach(netdev);
1532 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
1533 return 0;
1534
1535 err_out_detach:
1536 netxen_nic_detach(adapter);
1537 err_out:
1538 nx_decr_dev_ref_cnt(adapter);
1539 return err;
1540 }
1541
1542 static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
1543 pci_channel_state_t state)
1544 {
1545 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1546
1547 if (state == pci_channel_io_perm_failure)
1548 return PCI_ERS_RESULT_DISCONNECT;
1549
1550 if (nx_dev_request_aer(adapter))
1551 return PCI_ERS_RESULT_RECOVERED;
1552
1553 netxen_nic_detach_func(adapter);
1554
1555 pci_disable_device(pdev);
1556
1557 return PCI_ERS_RESULT_NEED_RESET;
1558 }
1559
1560 static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
1561 {
1562 int err = 0;
1563
1564 err = netxen_nic_attach_func(pdev);
1565
1566 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1567 }
1568
1569 static void netxen_io_resume(struct pci_dev *pdev)
1570 {
1571 pci_cleanup_aer_uncorrect_error_status(pdev);
1572 }
1573
1574 static void netxen_nic_shutdown(struct pci_dev *pdev)
1575 {
1576 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1577
1578 netxen_nic_detach_func(adapter);
1579
1580 if (pci_save_state(pdev))
1581 return;
1582
1583 if (netxen_nic_wol_supported(adapter)) {
1584 pci_enable_wake(pdev, PCI_D3cold, 1);
1585 pci_enable_wake(pdev, PCI_D3hot, 1);
1586 }
1587
1588 pci_disable_device(pdev);
1589 }
1590
1591 #ifdef CONFIG_PM
1592 static int
1593 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1594 {
1595 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1596 int retval;
1597
1598 netxen_nic_detach_func(adapter);
1599
1600 retval = pci_save_state(pdev);
1601 if (retval)
1602 return retval;
1603
1604 if (netxen_nic_wol_supported(adapter)) {
1605 pci_enable_wake(pdev, PCI_D3cold, 1);
1606 pci_enable_wake(pdev, PCI_D3hot, 1);
1607 }
1608
1609 pci_disable_device(pdev);
1610 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1611
1612 return 0;
1613 }
1614
1615 static int
1616 netxen_nic_resume(struct pci_dev *pdev)
1617 {
1618 return netxen_nic_attach_func(pdev);
1619 }
1620 #endif
1621
1622 static int netxen_nic_open(struct net_device *netdev)
1623 {
1624 struct netxen_adapter *adapter = netdev_priv(netdev);
1625 int err = 0;
1626
1627 if (adapter->driver_mismatch)
1628 return -EIO;
1629
1630 err = netxen_nic_attach(adapter);
1631 if (err)
1632 return err;
1633
1634 err = __netxen_nic_up(adapter, netdev);
1635 if (err)
1636 goto err_out;
1637
1638 netif_start_queue(netdev);
1639
1640 return 0;
1641
1642 err_out:
1643 netxen_nic_detach(adapter);
1644 return err;
1645 }
1646
1647 /*
1648 * netxen_nic_close - Disables a network interface entry point
1649 */
1650 static int netxen_nic_close(struct net_device *netdev)
1651 {
1652 struct netxen_adapter *adapter = netdev_priv(netdev);
1653
1654 __netxen_nic_down(adapter, netdev);
1655 return 0;
1656 }
1657
1658 static void
1659 netxen_tso_check(struct net_device *netdev,
1660 struct nx_host_tx_ring *tx_ring,
1661 struct cmd_desc_type0 *first_desc,
1662 struct sk_buff *skb)
1663 {
1664 u8 opcode = TX_ETHER_PKT;
1665 __be16 protocol = skb->protocol;
1666 u16 flags = 0, vid = 0;
1667 u32 producer;
1668 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1669 struct cmd_desc_type0 *hwdesc;
1670 struct vlan_ethhdr *vh;
1671
1672 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1673
1674 vh = (struct vlan_ethhdr *)skb->data;
1675 protocol = vh->h_vlan_encapsulated_proto;
1676 flags = FLAGS_VLAN_TAGGED;
1677
1678 } else if (vlan_tx_tag_present(skb)) {
1679
1680 flags = FLAGS_VLAN_OOB;
1681 vid = vlan_tx_tag_get(skb);
1682 netxen_set_tx_vlan_tci(first_desc, vid);
1683 vlan_oob = 1;
1684 }
1685
1686 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1687 skb_shinfo(skb)->gso_size > 0) {
1688
1689 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1690
1691 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1692 first_desc->total_hdr_length = hdr_len;
1693 if (vlan_oob) {
1694 first_desc->total_hdr_length += VLAN_HLEN;
1695 first_desc->tcp_hdr_offset = VLAN_HLEN;
1696 first_desc->ip_hdr_offset = VLAN_HLEN;
1697 /* Only in case of TSO on vlan device */
1698 flags |= FLAGS_VLAN_TAGGED;
1699 }
1700
1701 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1702 TX_TCP_LSO6 : TX_TCP_LSO;
1703 tso = 1;
1704
1705 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1706 u8 l4proto;
1707
1708 if (protocol == cpu_to_be16(ETH_P_IP)) {
1709 l4proto = ip_hdr(skb)->protocol;
1710
1711 if (l4proto == IPPROTO_TCP)
1712 opcode = TX_TCP_PKT;
1713 else if(l4proto == IPPROTO_UDP)
1714 opcode = TX_UDP_PKT;
1715 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1716 l4proto = ipv6_hdr(skb)->nexthdr;
1717
1718 if (l4proto == IPPROTO_TCP)
1719 opcode = TX_TCPV6_PKT;
1720 else if(l4proto == IPPROTO_UDP)
1721 opcode = TX_UDPV6_PKT;
1722 }
1723 }
1724
1725 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1726 first_desc->ip_hdr_offset += skb_network_offset(skb);
1727 netxen_set_tx_flags_opcode(first_desc, flags, opcode);
1728
1729 if (!tso)
1730 return;
1731
1732 /* For LSO, we need to copy the MAC/IP/TCP headers into
1733 * the descriptor ring
1734 */
1735 producer = tx_ring->producer;
1736 copied = 0;
1737 offset = 2;
1738
1739 if (vlan_oob) {
1740 /* Create a TSO vlan header template for firmware */
1741
1742 hwdesc = &tx_ring->desc_head[producer];
1743 tx_ring->cmd_buf_arr[producer].skb = NULL;
1744
1745 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1746 hdr_len + VLAN_HLEN);
1747
1748 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1749 skb_copy_from_linear_data(skb, vh, 12);
1750 vh->h_vlan_proto = htons(ETH_P_8021Q);
1751 vh->h_vlan_TCI = htons(vid);
1752 skb_copy_from_linear_data_offset(skb, 12,
1753 (char *)vh + 16, copy_len - 16);
1754
1755 copied = copy_len - VLAN_HLEN;
1756 offset = 0;
1757
1758 producer = get_next_index(producer, tx_ring->num_desc);
1759 }
1760
1761 while (copied < hdr_len) {
1762
1763 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1764 (hdr_len - copied));
1765
1766 hwdesc = &tx_ring->desc_head[producer];
1767 tx_ring->cmd_buf_arr[producer].skb = NULL;
1768
1769 skb_copy_from_linear_data_offset(skb, copied,
1770 (char *)hwdesc + offset, copy_len);
1771
1772 copied += copy_len;
1773 offset = 0;
1774
1775 producer = get_next_index(producer, tx_ring->num_desc);
1776 }
1777
1778 tx_ring->producer = producer;
1779 barrier();
1780 }
1781
1782 static int
1783 netxen_map_tx_skb(struct pci_dev *pdev,
1784 struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
1785 {
1786 struct netxen_skb_frag *nf;
1787 struct skb_frag_struct *frag;
1788 int i, nr_frags;
1789 dma_addr_t map;
1790
1791 nr_frags = skb_shinfo(skb)->nr_frags;
1792 nf = &pbuf->frag_array[0];
1793
1794 map = pci_map_single(pdev, skb->data,
1795 skb_headlen(skb), PCI_DMA_TODEVICE);
1796 if (pci_dma_mapping_error(pdev, map))
1797 goto out_err;
1798
1799 nf->dma = map;
1800 nf->length = skb_headlen(skb);
1801
1802 for (i = 0; i < nr_frags; i++) {
1803 frag = &skb_shinfo(skb)->frags[i];
1804 nf = &pbuf->frag_array[i+1];
1805
1806 map = pci_map_page(pdev, frag->page, frag->page_offset,
1807 frag->size, PCI_DMA_TODEVICE);
1808 if (pci_dma_mapping_error(pdev, map))
1809 goto unwind;
1810
1811 nf->dma = map;
1812 nf->length = frag->size;
1813 }
1814
1815 return 0;
1816
1817 unwind:
1818 while (--i >= 0) {
1819 nf = &pbuf->frag_array[i+1];
1820 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1821 }
1822
1823 nf = &pbuf->frag_array[0];
1824 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1825
1826 out_err:
1827 return -ENOMEM;
1828 }
1829
1830 static inline void
1831 netxen_clear_cmddesc(u64 *desc)
1832 {
1833 desc[0] = 0ULL;
1834 desc[2] = 0ULL;
1835 }
1836
1837 static netdev_tx_t
1838 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1839 {
1840 struct netxen_adapter *adapter = netdev_priv(netdev);
1841 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1842 struct netxen_cmd_buffer *pbuf;
1843 struct netxen_skb_frag *buffrag;
1844 struct cmd_desc_type0 *hwdesc, *first_desc;
1845 struct pci_dev *pdev;
1846 int i, k;
1847 int delta = 0;
1848 struct skb_frag_struct *frag;
1849
1850 u32 producer;
1851 int frag_count, no_of_desc;
1852 u32 num_txd = tx_ring->num_desc;
1853
1854 frag_count = skb_shinfo(skb)->nr_frags + 1;
1855
1856 /* 14 frags supported for normal packet and
1857 * 32 frags supported for TSO packet
1858 */
1859 if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
1860
1861 for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
1862 frag = &skb_shinfo(skb)->frags[i];
1863 delta += frag->size;
1864 }
1865
1866 if (!__pskb_pull_tail(skb, delta))
1867 goto drop_packet;
1868
1869 frag_count = 1 + skb_shinfo(skb)->nr_frags;
1870 }
1871 /* 4 fragments per cmd des */
1872 no_of_desc = (frag_count + 3) >> 2;
1873
1874 if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
1875 netif_stop_queue(netdev);
1876 smp_mb();
1877 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
1878 netif_start_queue(netdev);
1879 else
1880 return NETDEV_TX_BUSY;
1881 }
1882
1883 producer = tx_ring->producer;
1884 pbuf = &tx_ring->cmd_buf_arr[producer];
1885
1886 pdev = adapter->pdev;
1887
1888 if (netxen_map_tx_skb(pdev, skb, pbuf))
1889 goto drop_packet;
1890
1891 pbuf->skb = skb;
1892 pbuf->frag_count = frag_count;
1893
1894 first_desc = hwdesc = &tx_ring->desc_head[producer];
1895 netxen_clear_cmddesc((u64 *)hwdesc);
1896
1897 netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
1898 netxen_set_tx_port(first_desc, adapter->portnum);
1899
1900 for (i = 0; i < frag_count; i++) {
1901
1902 k = i % 4;
1903
1904 if ((k == 0) && (i > 0)) {
1905 /* move to next desc.*/
1906 producer = get_next_index(producer, num_txd);
1907 hwdesc = &tx_ring->desc_head[producer];
1908 netxen_clear_cmddesc((u64 *)hwdesc);
1909 tx_ring->cmd_buf_arr[producer].skb = NULL;
1910 }
1911
1912 buffrag = &pbuf->frag_array[i];
1913
1914 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1915 switch (k) {
1916 case 0:
1917 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1918 break;
1919 case 1:
1920 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1921 break;
1922 case 2:
1923 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1924 break;
1925 case 3:
1926 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1927 break;
1928 }
1929 }
1930
1931 tx_ring->producer = get_next_index(producer, num_txd);
1932
1933 netxen_tso_check(netdev, tx_ring, first_desc, skb);
1934
1935 netxen_nic_update_cmd_producer(adapter, tx_ring);
1936
1937 adapter->stats.txbytes += skb->len;
1938 adapter->stats.xmitcalled++;
1939
1940 return NETDEV_TX_OK;
1941
1942 drop_packet:
1943 adapter->stats.txdropped++;
1944 dev_kfree_skb_any(skb);
1945 return NETDEV_TX_OK;
1946 }
1947
1948 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1949 {
1950 struct net_device *netdev = adapter->netdev;
1951 uint32_t temp, temp_state, temp_val;
1952 int rv = 0;
1953
1954 temp = NXRD32(adapter, CRB_TEMP_STATE);
1955
1956 temp_state = nx_get_temp_state(temp);
1957 temp_val = nx_get_temp_val(temp);
1958
1959 if (temp_state == NX_TEMP_PANIC) {
1960 printk(KERN_ALERT
1961 "%s: Device temperature %d degrees C exceeds"
1962 " maximum allowed. Hardware has been shut down.\n",
1963 netdev->name, temp_val);
1964 rv = 1;
1965 } else if (temp_state == NX_TEMP_WARN) {
1966 if (adapter->temp == NX_TEMP_NORMAL) {
1967 printk(KERN_ALERT
1968 "%s: Device temperature %d degrees C "
1969 "exceeds operating range."
1970 " Immediate action needed.\n",
1971 netdev->name, temp_val);
1972 }
1973 } else {
1974 if (adapter->temp == NX_TEMP_WARN) {
1975 printk(KERN_INFO
1976 "%s: Device temperature is now %d degrees C"
1977 " in normal range.\n", netdev->name,
1978 temp_val);
1979 }
1980 }
1981 adapter->temp = temp_state;
1982 return rv;
1983 }
1984
1985 void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1986 {
1987 struct net_device *netdev = adapter->netdev;
1988
1989 if (adapter->ahw.linkup && !linkup) {
1990 printk(KERN_INFO "%s: %s NIC Link is down\n",
1991 netxen_nic_driver_name, netdev->name);
1992 adapter->ahw.linkup = 0;
1993 if (netif_running(netdev)) {
1994 netif_carrier_off(netdev);
1995 netif_stop_queue(netdev);
1996 }
1997 adapter->link_changed = !adapter->has_link_events;
1998 } else if (!adapter->ahw.linkup && linkup) {
1999 printk(KERN_INFO "%s: %s NIC Link is up\n",
2000 netxen_nic_driver_name, netdev->name);
2001 adapter->ahw.linkup = 1;
2002 if (netif_running(netdev)) {
2003 netif_carrier_on(netdev);
2004 netif_wake_queue(netdev);
2005 }
2006 adapter->link_changed = !adapter->has_link_events;
2007 }
2008 }
2009
2010 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
2011 {
2012 u32 val, port, linkup;
2013
2014 port = adapter->physical_port;
2015
2016 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
2017 val = NXRD32(adapter, CRB_XG_STATE_P3);
2018 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
2019 linkup = (val == XG_LINK_UP_P3);
2020 } else {
2021 val = NXRD32(adapter, CRB_XG_STATE);
2022 val = (val >> port*8) & 0xff;
2023 linkup = (val == XG_LINK_UP);
2024 }
2025
2026 netxen_advert_link_change(adapter, linkup);
2027 }
2028
2029 static void netxen_tx_timeout(struct net_device *netdev)
2030 {
2031 struct netxen_adapter *adapter = netdev_priv(netdev);
2032
2033 if (test_bit(__NX_RESETTING, &adapter->state))
2034 return;
2035
2036 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
2037 schedule_work(&adapter->tx_timeout_task);
2038 }
2039
2040 static void netxen_tx_timeout_task(struct work_struct *work)
2041 {
2042 struct netxen_adapter *adapter =
2043 container_of(work, struct netxen_adapter, tx_timeout_task);
2044
2045 if (!netif_running(adapter->netdev))
2046 return;
2047
2048 if (test_and_set_bit(__NX_RESETTING, &adapter->state))
2049 return;
2050
2051 if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS)
2052 goto request_reset;
2053
2054 rtnl_lock();
2055 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
2056 /* try to scrub interrupt */
2057 netxen_napi_disable(adapter);
2058
2059 netxen_napi_enable(adapter);
2060
2061 netif_wake_queue(adapter->netdev);
2062
2063 clear_bit(__NX_RESETTING, &adapter->state);
2064 } else {
2065 clear_bit(__NX_RESETTING, &adapter->state);
2066 if (netxen_nic_reset_context(adapter)) {
2067 rtnl_unlock();
2068 goto request_reset;
2069 }
2070 }
2071 adapter->netdev->trans_start = jiffies;
2072 rtnl_unlock();
2073 return;
2074
2075 request_reset:
2076 adapter->need_fw_reset = 1;
2077 clear_bit(__NX_RESETTING, &adapter->state);
2078 }
2079
2080 static struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
2081 {
2082 struct netxen_adapter *adapter = netdev_priv(netdev);
2083 struct net_device_stats *stats = &netdev->stats;
2084
2085 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2086 stats->tx_packets = adapter->stats.xmitfinished;
2087 stats->rx_bytes = adapter->stats.rxbytes;
2088 stats->tx_bytes = adapter->stats.txbytes;
2089 stats->rx_dropped = adapter->stats.rxdropped;
2090 stats->tx_dropped = adapter->stats.txdropped;
2091
2092 return stats;
2093 }
2094
2095 static irqreturn_t netxen_intr(int irq, void *data)
2096 {
2097 struct nx_host_sds_ring *sds_ring = data;
2098 struct netxen_adapter *adapter = sds_ring->adapter;
2099 u32 status = 0;
2100
2101 status = readl(adapter->isr_int_vec);
2102
2103 if (!(status & adapter->int_vec_bit))
2104 return IRQ_NONE;
2105
2106 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
2107 /* check interrupt state machine, to be sure */
2108 status = readl(adapter->crb_int_state_reg);
2109 if (!ISR_LEGACY_INT_TRIGGERED(status))
2110 return IRQ_NONE;
2111
2112 } else {
2113 unsigned long our_int = 0;
2114
2115 our_int = readl(adapter->crb_int_state_reg);
2116
2117 /* not our interrupt */
2118 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
2119 return IRQ_NONE;
2120
2121 /* claim interrupt */
2122 writel((our_int & 0xffffffff), adapter->crb_int_state_reg);
2123
2124 /* clear interrupt */
2125 netxen_nic_disable_int(sds_ring);
2126 }
2127
2128 writel(0xffffffff, adapter->tgt_status_reg);
2129 /* read twice to ensure write is flushed */
2130 readl(adapter->isr_int_vec);
2131 readl(adapter->isr_int_vec);
2132
2133 napi_schedule(&sds_ring->napi);
2134
2135 return IRQ_HANDLED;
2136 }
2137
2138 static irqreturn_t netxen_msi_intr(int irq, void *data)
2139 {
2140 struct nx_host_sds_ring *sds_ring = data;
2141 struct netxen_adapter *adapter = sds_ring->adapter;
2142
2143 /* clear interrupt */
2144 writel(0xffffffff, adapter->tgt_status_reg);
2145
2146 napi_schedule(&sds_ring->napi);
2147 return IRQ_HANDLED;
2148 }
2149
2150 static irqreturn_t netxen_msix_intr(int irq, void *data)
2151 {
2152 struct nx_host_sds_ring *sds_ring = data;
2153
2154 napi_schedule(&sds_ring->napi);
2155 return IRQ_HANDLED;
2156 }
2157
2158 static int netxen_nic_poll(struct napi_struct *napi, int budget)
2159 {
2160 struct nx_host_sds_ring *sds_ring =
2161 container_of(napi, struct nx_host_sds_ring, napi);
2162
2163 struct netxen_adapter *adapter = sds_ring->adapter;
2164
2165 int tx_complete;
2166 int work_done;
2167
2168 tx_complete = netxen_process_cmd_ring(adapter);
2169
2170 work_done = netxen_process_rcv_ring(sds_ring, budget);
2171
2172 if ((work_done < budget) && tx_complete) {
2173 napi_complete(&sds_ring->napi);
2174 if (test_bit(__NX_DEV_UP, &adapter->state))
2175 netxen_nic_enable_int(sds_ring);
2176 }
2177
2178 return work_done;
2179 }
2180
2181 #ifdef CONFIG_NET_POLL_CONTROLLER
2182 static void netxen_nic_poll_controller(struct net_device *netdev)
2183 {
2184 int ring;
2185 struct nx_host_sds_ring *sds_ring;
2186 struct netxen_adapter *adapter = netdev_priv(netdev);
2187 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
2188
2189 disable_irq(adapter->irq);
2190 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2191 sds_ring = &recv_ctx->sds_rings[ring];
2192 netxen_intr(adapter->irq, sds_ring);
2193 }
2194 enable_irq(adapter->irq);
2195 }
2196 #endif
2197
2198 static int
2199 nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
2200 {
2201 int count;
2202 if (netxen_api_lock(adapter))
2203 return -EIO;
2204
2205 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
2206
2207 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
2208
2209 netxen_api_unlock(adapter);
2210 return count;
2211 }
2212
2213 static int
2214 nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
2215 {
2216 int count;
2217 if (netxen_api_lock(adapter))
2218 return -EIO;
2219
2220 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
2221 WARN_ON(count == 0);
2222
2223 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
2224
2225 if (count == 0)
2226 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
2227
2228 netxen_api_unlock(adapter);
2229 return count;
2230 }
2231
2232 static int
2233 nx_dev_request_aer(struct netxen_adapter *adapter)
2234 {
2235 u32 state;
2236 int ret = -EINVAL;
2237
2238 if (netxen_api_lock(adapter))
2239 return ret;
2240
2241 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2242
2243 if (state == NX_DEV_NEED_AER)
2244 ret = 0;
2245 else if (state == NX_DEV_READY) {
2246 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
2247 ret = 0;
2248 }
2249
2250 netxen_api_unlock(adapter);
2251 return ret;
2252 }
2253
2254 static int
2255 nx_dev_request_reset(struct netxen_adapter *adapter)
2256 {
2257 u32 state;
2258 int ret = -EINVAL;
2259
2260 if (netxen_api_lock(adapter))
2261 return ret;
2262
2263 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2264
2265 if (state == NX_DEV_NEED_RESET)
2266 ret = 0;
2267 else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
2268 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
2269 ret = 0;
2270 }
2271
2272 netxen_api_unlock(adapter);
2273
2274 return ret;
2275 }
2276
2277 static int
2278 netxen_can_start_firmware(struct netxen_adapter *adapter)
2279 {
2280 int count;
2281 int can_start = 0;
2282
2283 if (netxen_api_lock(adapter))
2284 return 0;
2285
2286 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
2287
2288 if ((count < 0) || (count >= NX_MAX_PCI_FUNC))
2289 count = 0;
2290
2291 if (count == 0) {
2292 can_start = 1;
2293 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING);
2294 }
2295
2296 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
2297
2298 netxen_api_unlock(adapter);
2299
2300 return can_start;
2301 }
2302
2303 static void
2304 netxen_schedule_work(struct netxen_adapter *adapter,
2305 work_func_t func, int delay)
2306 {
2307 INIT_DELAYED_WORK(&adapter->fw_work, func);
2308 schedule_delayed_work(&adapter->fw_work, delay);
2309 }
2310
2311 static void
2312 netxen_cancel_fw_work(struct netxen_adapter *adapter)
2313 {
2314 while (test_and_set_bit(__NX_RESETTING, &adapter->state))
2315 msleep(10);
2316
2317 cancel_delayed_work_sync(&adapter->fw_work);
2318 }
2319
2320 static void
2321 netxen_attach_work(struct work_struct *work)
2322 {
2323 struct netxen_adapter *adapter = container_of(work,
2324 struct netxen_adapter, fw_work.work);
2325 struct net_device *netdev = adapter->netdev;
2326 int err = 0;
2327
2328 if (netif_running(netdev)) {
2329 err = netxen_nic_attach(adapter);
2330 if (err)
2331 goto done;
2332
2333 err = netxen_nic_up(adapter, netdev);
2334 if (err) {
2335 netxen_nic_detach(adapter);
2336 goto done;
2337 }
2338
2339 netxen_config_indev_addr(netdev, NETDEV_UP);
2340 }
2341
2342 netif_device_attach(netdev);
2343
2344 done:
2345 adapter->fw_fail_cnt = 0;
2346 clear_bit(__NX_RESETTING, &adapter->state);
2347 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
2348 }
2349
2350 static void
2351 netxen_fwinit_work(struct work_struct *work)
2352 {
2353 struct netxen_adapter *adapter = container_of(work,
2354 struct netxen_adapter, fw_work.work);
2355 int dev_state;
2356
2357 dev_state = NXRD32(adapter, NX_CRB_DEV_STATE);
2358
2359 switch (dev_state) {
2360 case NX_DEV_COLD:
2361 case NX_DEV_READY:
2362 if (!netxen_start_firmware(adapter)) {
2363 netxen_schedule_work(adapter, netxen_attach_work, 0);
2364 return;
2365 }
2366 break;
2367
2368 case NX_DEV_NEED_RESET:
2369 case NX_DEV_INITALIZING:
2370 if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
2371 netxen_schedule_work(adapter,
2372 netxen_fwinit_work, 2 * FW_POLL_DELAY);
2373 return;
2374 }
2375
2376 case NX_DEV_FAILED:
2377 default:
2378 nx_incr_dev_ref_cnt(adapter);
2379 break;
2380 }
2381
2382 clear_bit(__NX_RESETTING, &adapter->state);
2383 }
2384
2385 static void
2386 netxen_detach_work(struct work_struct *work)
2387 {
2388 struct netxen_adapter *adapter = container_of(work,
2389 struct netxen_adapter, fw_work.work);
2390 struct net_device *netdev = adapter->netdev;
2391 int ref_cnt, delay;
2392 u32 status;
2393
2394 netif_device_detach(netdev);
2395
2396 netxen_nic_down(adapter, netdev);
2397
2398 rtnl_lock();
2399 netxen_nic_detach(adapter);
2400 rtnl_unlock();
2401
2402 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
2403
2404 if (status & NX_RCODE_FATAL_ERROR)
2405 goto err_ret;
2406
2407 if (adapter->temp == NX_TEMP_PANIC)
2408 goto err_ret;
2409
2410 ref_cnt = nx_decr_dev_ref_cnt(adapter);
2411
2412 if (ref_cnt == -EIO)
2413 goto err_ret;
2414
2415 delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
2416
2417 adapter->fw_wait_cnt = 0;
2418 netxen_schedule_work(adapter, netxen_fwinit_work, delay);
2419
2420 return;
2421
2422 err_ret:
2423 clear_bit(__NX_RESETTING, &adapter->state);
2424 }
2425
2426 static int
2427 netxen_check_health(struct netxen_adapter *adapter)
2428 {
2429 u32 state, heartbit;
2430 struct net_device *netdev = adapter->netdev;
2431
2432 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2433 if (state == NX_DEV_NEED_AER)
2434 return 0;
2435
2436 if (netxen_nic_check_temp(adapter))
2437 goto detach;
2438
2439 if (adapter->need_fw_reset) {
2440 if (nx_dev_request_reset(adapter))
2441 return 0;
2442 goto detach;
2443 }
2444
2445 /* NX_DEV_NEED_RESET, this state can be marked in two cases
2446 * 1. Tx timeout 2. Fw hang
2447 * Send request to destroy context in case of tx timeout only
2448 * and doesn't required in case of Fw hang
2449 */
2450 if (state == NX_DEV_NEED_RESET) {
2451 adapter->need_fw_reset = 1;
2452 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2453 goto detach;
2454 }
2455
2456 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2457 return 0;
2458
2459 heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
2460 if (heartbit != adapter->heartbit) {
2461 adapter->heartbit = heartbit;
2462 adapter->fw_fail_cnt = 0;
2463 if (adapter->need_fw_reset)
2464 goto detach;
2465 return 0;
2466 }
2467
2468 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2469 return 0;
2470
2471 if (nx_dev_request_reset(adapter))
2472 return 0;
2473
2474 clear_bit(__NX_FW_ATTACHED, &adapter->state);
2475
2476 dev_info(&netdev->dev, "firmware hang detected\n");
2477
2478 detach:
2479 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2480 !test_and_set_bit(__NX_RESETTING, &adapter->state))
2481 netxen_schedule_work(adapter, netxen_detach_work, 0);
2482 return 1;
2483 }
2484
2485 static void
2486 netxen_fw_poll_work(struct work_struct *work)
2487 {
2488 struct netxen_adapter *adapter = container_of(work,
2489 struct netxen_adapter, fw_work.work);
2490
2491 if (test_bit(__NX_RESETTING, &adapter->state))
2492 goto reschedule;
2493
2494 if (test_bit(__NX_DEV_UP, &adapter->state)) {
2495 if (!adapter->has_link_events) {
2496
2497 netxen_nic_handle_phy_intr(adapter);
2498
2499 if (adapter->link_changed)
2500 netxen_nic_set_link_parameters(adapter);
2501 }
2502 }
2503
2504 if (netxen_check_health(adapter))
2505 return;
2506
2507 reschedule:
2508 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
2509 }
2510
2511 static ssize_t
2512 netxen_store_bridged_mode(struct device *dev,
2513 struct device_attribute *attr, const char *buf, size_t len)
2514 {
2515 struct net_device *net = to_net_dev(dev);
2516 struct netxen_adapter *adapter = netdev_priv(net);
2517 unsigned long new;
2518 int ret = -EINVAL;
2519
2520 if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG))
2521 goto err_out;
2522
2523 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
2524 goto err_out;
2525
2526 if (strict_strtoul(buf, 2, &new))
2527 goto err_out;
2528
2529 if (!netxen_config_bridged_mode(adapter, !!new))
2530 ret = len;
2531
2532 err_out:
2533 return ret;
2534 }
2535
2536 static ssize_t
2537 netxen_show_bridged_mode(struct device *dev,
2538 struct device_attribute *attr, char *buf)
2539 {
2540 struct net_device *net = to_net_dev(dev);
2541 struct netxen_adapter *adapter;
2542 int bridged_mode = 0;
2543
2544 adapter = netdev_priv(net);
2545
2546 if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
2547 bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED);
2548
2549 return sprintf(buf, "%d\n", bridged_mode);
2550 }
2551
2552 static struct device_attribute dev_attr_bridged_mode = {
2553 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2554 .show = netxen_show_bridged_mode,
2555 .store = netxen_store_bridged_mode,
2556 };
2557
2558 static ssize_t
2559 netxen_store_diag_mode(struct device *dev,
2560 struct device_attribute *attr, const char *buf, size_t len)
2561 {
2562 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2563 unsigned long new;
2564
2565 if (strict_strtoul(buf, 2, &new))
2566 return -EINVAL;
2567
2568 if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2569 adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
2570
2571 return len;
2572 }
2573
2574 static ssize_t
2575 netxen_show_diag_mode(struct device *dev,
2576 struct device_attribute *attr, char *buf)
2577 {
2578 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2579
2580 return sprintf(buf, "%d\n",
2581 !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
2582 }
2583
2584 static struct device_attribute dev_attr_diag_mode = {
2585 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2586 .show = netxen_show_diag_mode,
2587 .store = netxen_store_diag_mode,
2588 };
2589
2590 static int
2591 netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
2592 loff_t offset, size_t size)
2593 {
2594 size_t crb_size = 4;
2595
2596 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2597 return -EIO;
2598
2599 if (offset < NETXEN_PCI_CRBSPACE) {
2600 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2601 return -EINVAL;
2602
2603 if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
2604 NETXEN_PCI_CAMQM_2M_END))
2605 crb_size = 8;
2606 else
2607 return -EINVAL;
2608 }
2609
2610 if ((size != crb_size) || (offset & (crb_size-1)))
2611 return -EINVAL;
2612
2613 return 0;
2614 }
2615
2616 static ssize_t
2617 netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2618 struct bin_attribute *attr,
2619 char *buf, loff_t offset, size_t size)
2620 {
2621 struct device *dev = container_of(kobj, struct device, kobj);
2622 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2623 u32 data;
2624 u64 qmdata;
2625 int ret;
2626
2627 ret = netxen_sysfs_validate_crb(adapter, offset, size);
2628 if (ret != 0)
2629 return ret;
2630
2631 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
2632 ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
2633 NETXEN_PCI_CAMQM_2M_END)) {
2634 netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
2635 memcpy(buf, &qmdata, size);
2636 } else {
2637 data = NXRD32(adapter, offset);
2638 memcpy(buf, &data, size);
2639 }
2640
2641 return size;
2642 }
2643
2644 static ssize_t
2645 netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2646 struct bin_attribute *attr,
2647 char *buf, loff_t offset, size_t size)
2648 {
2649 struct device *dev = container_of(kobj, struct device, kobj);
2650 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2651 u32 data;
2652 u64 qmdata;
2653 int ret;
2654
2655 ret = netxen_sysfs_validate_crb(adapter, offset, size);
2656 if (ret != 0)
2657 return ret;
2658
2659 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
2660 ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
2661 NETXEN_PCI_CAMQM_2M_END)) {
2662 memcpy(&qmdata, buf, size);
2663 netxen_pci_camqm_write_2M(adapter, offset, qmdata);
2664 } else {
2665 memcpy(&data, buf, size);
2666 NXWR32(adapter, offset, data);
2667 }
2668
2669 return size;
2670 }
2671
2672 static int
2673 netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
2674 loff_t offset, size_t size)
2675 {
2676 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2677 return -EIO;
2678
2679 if ((size != 8) || (offset & 0x7))
2680 return -EIO;
2681
2682 return 0;
2683 }
2684
2685 static ssize_t
2686 netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
2687 struct bin_attribute *attr,
2688 char *buf, loff_t offset, size_t size)
2689 {
2690 struct device *dev = container_of(kobj, struct device, kobj);
2691 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2692 u64 data;
2693 int ret;
2694
2695 ret = netxen_sysfs_validate_mem(adapter, offset, size);
2696 if (ret != 0)
2697 return ret;
2698
2699 if (adapter->pci_mem_read(adapter, offset, &data))
2700 return -EIO;
2701
2702 memcpy(buf, &data, size);
2703
2704 return size;
2705 }
2706
2707 static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
2708 struct bin_attribute *attr, char *buf,
2709 loff_t offset, size_t size)
2710 {
2711 struct device *dev = container_of(kobj, struct device, kobj);
2712 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2713 u64 data;
2714 int ret;
2715
2716 ret = netxen_sysfs_validate_mem(adapter, offset, size);
2717 if (ret != 0)
2718 return ret;
2719
2720 memcpy(&data, buf, size);
2721
2722 if (adapter->pci_mem_write(adapter, offset, data))
2723 return -EIO;
2724
2725 return size;
2726 }
2727
2728
2729 static struct bin_attribute bin_attr_crb = {
2730 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2731 .size = 0,
2732 .read = netxen_sysfs_read_crb,
2733 .write = netxen_sysfs_write_crb,
2734 };
2735
2736 static struct bin_attribute bin_attr_mem = {
2737 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2738 .size = 0,
2739 .read = netxen_sysfs_read_mem,
2740 .write = netxen_sysfs_write_mem,
2741 };
2742
2743
2744 static void
2745 netxen_create_sysfs_entries(struct netxen_adapter *adapter)
2746 {
2747 struct net_device *netdev = adapter->netdev;
2748 struct device *dev = &netdev->dev;
2749
2750 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) {
2751 /* bridged_mode control */
2752 if (device_create_file(dev, &dev_attr_bridged_mode)) {
2753 dev_warn(&netdev->dev,
2754 "failed to create bridged_mode sysfs entry\n");
2755 }
2756 }
2757 }
2758
2759 static void
2760 netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
2761 {
2762 struct net_device *netdev = adapter->netdev;
2763 struct device *dev = &netdev->dev;
2764
2765 if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
2766 device_remove_file(dev, &dev_attr_bridged_mode);
2767 }
2768
2769 static void
2770 netxen_create_diag_entries(struct netxen_adapter *adapter)
2771 {
2772 struct pci_dev *pdev = adapter->pdev;
2773 struct device *dev;
2774
2775 dev = &pdev->dev;
2776 if (device_create_file(dev, &dev_attr_diag_mode))
2777 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2778 if (device_create_bin_file(dev, &bin_attr_crb))
2779 dev_info(dev, "failed to create crb sysfs entry\n");
2780 if (device_create_bin_file(dev, &bin_attr_mem))
2781 dev_info(dev, "failed to create mem sysfs entry\n");
2782 }
2783
2784
2785 static void
2786 netxen_remove_diag_entries(struct netxen_adapter *adapter)
2787 {
2788 struct pci_dev *pdev = adapter->pdev;
2789 struct device *dev = &pdev->dev;
2790
2791 device_remove_file(dev, &dev_attr_diag_mode);
2792 device_remove_bin_file(dev, &bin_attr_crb);
2793 device_remove_bin_file(dev, &bin_attr_mem);
2794 }
2795
2796 #ifdef CONFIG_INET
2797
2798 #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
2799
2800 static int
2801 netxen_destip_supported(struct netxen_adapter *adapter)
2802 {
2803 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2804 return 0;
2805
2806 if (adapter->ahw.cut_through)
2807 return 0;
2808
2809 return 1;
2810 }
2811
2812 static void
2813 netxen_config_indev_addr(struct net_device *dev, unsigned long event)
2814 {
2815 struct in_device *indev;
2816 struct netxen_adapter *adapter = netdev_priv(dev);
2817
2818 if (!netxen_destip_supported(adapter))
2819 return;
2820
2821 indev = in_dev_get(dev);
2822 if (!indev)
2823 return;
2824
2825 for_ifa(indev) {
2826 switch (event) {
2827 case NETDEV_UP:
2828 netxen_config_ipaddr(adapter,
2829 ifa->ifa_address, NX_IP_UP);
2830 break;
2831 case NETDEV_DOWN:
2832 netxen_config_ipaddr(adapter,
2833 ifa->ifa_address, NX_IP_DOWN);
2834 break;
2835 default:
2836 break;
2837 }
2838 } endfor_ifa(indev);
2839
2840 in_dev_put(indev);
2841 }
2842
2843 static int netxen_netdev_event(struct notifier_block *this,
2844 unsigned long event, void *ptr)
2845 {
2846 struct netxen_adapter *adapter;
2847 struct net_device *dev = (struct net_device *)ptr;
2848
2849 recheck:
2850 if (dev == NULL)
2851 goto done;
2852
2853 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2854 dev = vlan_dev_real_dev(dev);
2855 goto recheck;
2856 }
2857
2858 if (!is_netxen_netdev(dev))
2859 goto done;
2860
2861 adapter = netdev_priv(dev);
2862
2863 if (!adapter)
2864 goto done;
2865
2866 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
2867 goto done;
2868
2869 netxen_config_indev_addr(dev, event);
2870 done:
2871 return NOTIFY_DONE;
2872 }
2873
2874 static int
2875 netxen_inetaddr_event(struct notifier_block *this,
2876 unsigned long event, void *ptr)
2877 {
2878 struct netxen_adapter *adapter;
2879 struct net_device *dev;
2880
2881 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2882
2883 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2884
2885 recheck:
2886 if (dev == NULL || !netif_running(dev))
2887 goto done;
2888
2889 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2890 dev = vlan_dev_real_dev(dev);
2891 goto recheck;
2892 }
2893
2894 if (!is_netxen_netdev(dev))
2895 goto done;
2896
2897 adapter = netdev_priv(dev);
2898
2899 if (!adapter || !netxen_destip_supported(adapter))
2900 goto done;
2901
2902 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
2903 goto done;
2904
2905 switch (event) {
2906 case NETDEV_UP:
2907 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
2908 break;
2909 case NETDEV_DOWN:
2910 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
2911 break;
2912 default:
2913 break;
2914 }
2915
2916 done:
2917 return NOTIFY_DONE;
2918 }
2919
2920 static struct notifier_block netxen_netdev_cb = {
2921 .notifier_call = netxen_netdev_event,
2922 };
2923
2924 static struct notifier_block netxen_inetaddr_cb = {
2925 .notifier_call = netxen_inetaddr_event,
2926 };
2927 #else
2928 static void
2929 netxen_config_indev_addr(struct net_device *dev, unsigned long event)
2930 { }
2931 #endif
2932
2933 static struct pci_error_handlers netxen_err_handler = {
2934 .error_detected = netxen_io_error_detected,
2935 .slot_reset = netxen_io_slot_reset,
2936 .resume = netxen_io_resume,
2937 };
2938
2939 static struct pci_driver netxen_driver = {
2940 .name = netxen_nic_driver_name,
2941 .id_table = netxen_pci_tbl,
2942 .probe = netxen_nic_probe,
2943 .remove = __devexit_p(netxen_nic_remove),
2944 #ifdef CONFIG_PM
2945 .suspend = netxen_nic_suspend,
2946 .resume = netxen_nic_resume,
2947 #endif
2948 .shutdown = netxen_nic_shutdown,
2949 .err_handler = &netxen_err_handler
2950 };
2951
2952 static int __init netxen_init_module(void)
2953 {
2954 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
2955
2956 #ifdef CONFIG_INET
2957 register_netdevice_notifier(&netxen_netdev_cb);
2958 register_inetaddr_notifier(&netxen_inetaddr_cb);
2959 #endif
2960 return pci_register_driver(&netxen_driver);
2961 }
2962
2963 module_init(netxen_init_module);
2964
2965 static void __exit netxen_exit_module(void)
2966 {
2967 pci_unregister_driver(&netxen_driver);
2968
2969 #ifdef CONFIG_INET
2970 unregister_inetaddr_notifier(&netxen_inetaddr_cb);
2971 unregister_netdevice_notifier(&netxen_netdev_cb);
2972 #endif
2973 }
2974
2975 module_exit(netxen_exit_module);