sata_rcar: fix interrupt handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
94 {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146 unsigned int wait_count = 30;
147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
150 udelay(100);
151 } while (--wait_count);
152 return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
187 return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216 {
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230 return -ENOMEM;
231 }
232
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
241 goto exit;
242 }
243
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255 exit:
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259 }
260
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264 {
265 u32 offset = 0;
266 int status;
267
268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311 MAC_ADDR_MR, 0);
312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
323 status = -EPERM;
324 }
325 exit:
326 return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334 {
335 u32 offset = 0;
336 int status = 0;
337
338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
378 status =
379 ql_wait_reg_rdy(qdev,
380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
412 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
426 status =
427 ql_wait_reg_rdy(qdev,
428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
441 status = -EPERM;
442 }
443 exit:
444 return status;
445 }
446
447 /* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
458 addr = &qdev->current_mac_addr[0];
459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
476 return status;
477 }
478
479 void ql_link_on(struct ql_adapter *qdev)
480 {
481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484 }
485
486 void ql_link_off(struct ql_adapter *qdev)
487 {
488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491 }
492
493 /* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498 int status = 0;
499
500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510 exit:
511 return status;
512 }
513
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521 {
522 int status = -EINVAL; /* Return error if no mask match. */
523 u32 value = 0;
524
525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613 exit:
614 return status;
615 }
616
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
630 * incremented every time we queue a worker and decremented every time
631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
643 ql_write32(qdev, INTR_EN,
644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
647 }
648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
657 }
658
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661 u32 var = 0;
662 struct intr_context *ctx;
663
664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
671 spin_lock(&qdev->hw_lock);
672 if (!atomic_read(&ctx->irq_cnt)) {
673 ql_write32(qdev, INTR_EN,
674 ctx->intr_dis_mask);
675 var = ql_read32(qdev, STS);
676 }
677 atomic_inc(&ctx->irq_cnt);
678 spin_unlock(&qdev->hw_lock);
679 return var;
680 }
681
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696 }
697
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716
717 return csum;
718 }
719
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741 return status;
742 }
743
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
750 u8 mac_addr[6];
751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
755 if (!qdev->port)
756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778 status = -EINVAL;
779 goto exit;
780 }
781
782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
801 mac_addr,
802 qdev->ndev->addr_len);
803
804 exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807 }
808
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811 int i;
812 int status;
813 __le32 *p = (__le32 *)&qdev->flash;
814 u32 offset = 0;
815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816
817 /* Second function's parameters follow the first
818 * function's.
819 */
820 if (qdev->port)
821 offset = size;
822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
826 for (i = 0; i < size; i++, p++) {
827 status = ql_read_flash_word(qdev, i+offset, p);
828 if (status) {
829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
831 goto exit;
832 }
833
834 }
835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
854 exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857 }
858
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876 }
877
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900 return status;
901 }
902
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920 exit:
921 return status;
922 }
923
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926 int status;
927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939 exit:
940 return status;
941 }
942
943 /* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
964 }
965 return status;
966 }
967
968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022 }
1023
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038 }
1039
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042 {
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
1046 dma_unmap_addr(lbq_desc, mapaddr),
1047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060 }
1061
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071 }
1072
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082 }
1083
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091 {
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
1098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
1100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
1109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
1111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135 }
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138 {
1139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
1141 struct bq_desc *lbq_desc;
1142 u64 map;
1143 int i;
1144
1145 while (rx_ring->lbq_free_cnt > 32) {
1146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
1150 lbq_desc = &rx_ring->lbq[clean_idx];
1151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152 rx_ring->lbq_clean_idx = clean_idx;
1153 netif_err(qdev, ifup, qdev->ndev,
1154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
1156 return;
1157 }
1158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
1161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
1163 rx_ring->lbq_buf_size);
1164 *lbq_desc->addr = cpu_to_le64(map);
1165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
1169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
1178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
1185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
1187 }
1188 }
1189
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192 {
1193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
1195 struct bq_desc *sbq_desc;
1196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
1200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201 sbq_desc = &rx_ring->sbq[clean_idx];
1202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
1205 if (sbq_desc->p.skb == NULL) {
1206 netif_printk(qdev, rx_status, KERN_DEBUG,
1207 qdev->ndev,
1208 "sbq: getting new skb for index %d.\n",
1209 sbq_desc->index);
1210 sbq_desc->p.skb =
1211 netdev_alloc_skb(qdev->ndev,
1212 SMALL_BUFFER_SIZE);
1213 if (sbq_desc->p.skb == NULL) {
1214 rx_ring->sbq_clean_idx = clean_idx;
1215 return;
1216 }
1217 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1218 map = pci_map_single(qdev->pdev,
1219 sbq_desc->p.skb->data,
1220 rx_ring->sbq_buf_size,
1221 PCI_DMA_FROMDEVICE);
1222 if (pci_dma_mapping_error(qdev->pdev, map)) {
1223 netif_err(qdev, ifup, qdev->ndev,
1224 "PCI mapping failed.\n");
1225 rx_ring->sbq_clean_idx = clean_idx;
1226 dev_kfree_skb_any(sbq_desc->p.skb);
1227 sbq_desc->p.skb = NULL;
1228 return;
1229 }
1230 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1231 dma_unmap_len_set(sbq_desc, maplen,
1232 rx_ring->sbq_buf_size);
1233 *sbq_desc->addr = cpu_to_le64(map);
1234 }
1235
1236 clean_idx++;
1237 if (clean_idx == rx_ring->sbq_len)
1238 clean_idx = 0;
1239 }
1240 rx_ring->sbq_clean_idx = clean_idx;
1241 rx_ring->sbq_prod_idx += 16;
1242 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1243 rx_ring->sbq_prod_idx = 0;
1244 rx_ring->sbq_free_cnt -= 16;
1245 }
1246
1247 if (start_idx != clean_idx) {
1248 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1249 "sbq: updating prod idx = %d.\n",
1250 rx_ring->sbq_prod_idx);
1251 ql_write_db_reg(rx_ring->sbq_prod_idx,
1252 rx_ring->sbq_prod_idx_db_reg);
1253 }
1254 }
1255
1256 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1257 struct rx_ring *rx_ring)
1258 {
1259 ql_update_sbq(qdev, rx_ring);
1260 ql_update_lbq(qdev, rx_ring);
1261 }
1262
1263 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1264 * fails at some stage, or from the interrupt when a tx completes.
1265 */
1266 static void ql_unmap_send(struct ql_adapter *qdev,
1267 struct tx_ring_desc *tx_ring_desc, int mapped)
1268 {
1269 int i;
1270 for (i = 0; i < mapped; i++) {
1271 if (i == 0 || (i == 7 && mapped > 7)) {
1272 /*
1273 * Unmap the skb->data area, or the
1274 * external sglist (AKA the Outbound
1275 * Address List (OAL)).
1276 * If its the zeroeth element, then it's
1277 * the skb->data area. If it's the 7th
1278 * element and there is more than 6 frags,
1279 * then its an OAL.
1280 */
1281 if (i == 7) {
1282 netif_printk(qdev, tx_done, KERN_DEBUG,
1283 qdev->ndev,
1284 "unmapping OAL area.\n");
1285 }
1286 pci_unmap_single(qdev->pdev,
1287 dma_unmap_addr(&tx_ring_desc->map[i],
1288 mapaddr),
1289 dma_unmap_len(&tx_ring_desc->map[i],
1290 maplen),
1291 PCI_DMA_TODEVICE);
1292 } else {
1293 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1294 "unmapping frag %d.\n", i);
1295 pci_unmap_page(qdev->pdev,
1296 dma_unmap_addr(&tx_ring_desc->map[i],
1297 mapaddr),
1298 dma_unmap_len(&tx_ring_desc->map[i],
1299 maplen), PCI_DMA_TODEVICE);
1300 }
1301 }
1302
1303 }
1304
1305 /* Map the buffers for this transmit. This will return
1306 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1307 */
1308 static int ql_map_send(struct ql_adapter *qdev,
1309 struct ob_mac_iocb_req *mac_iocb_ptr,
1310 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1311 {
1312 int len = skb_headlen(skb);
1313 dma_addr_t map;
1314 int frag_idx, err, map_idx = 0;
1315 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1316 int frag_cnt = skb_shinfo(skb)->nr_frags;
1317
1318 if (frag_cnt) {
1319 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1320 "frag_cnt = %d.\n", frag_cnt);
1321 }
1322 /*
1323 * Map the skb buffer first.
1324 */
1325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) {
1329 netif_err(qdev, tx_queued, qdev->ndev,
1330 "PCI mapping failed with error: %d\n", err);
1331
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 tbd->len = cpu_to_le32(len);
1336 tbd->addr = cpu_to_le64(map);
1337 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1339 map_idx++;
1340
1341 /*
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1346 * in this list.
1347 */
1348 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350 tbd++;
1351 if (frag_idx == 6 && frag_cnt > 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1354 * look like this:
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1368 * etc...
1369 */
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372 sizeof(struct oal),
1373 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) {
1376 netif_err(qdev, tx_queued, qdev->ndev,
1377 "PCI mapping outbound address list with error: %d\n",
1378 err);
1379 goto map_error;
1380 }
1381
1382 tbd->addr = cpu_to_le64(map);
1383 /*
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1387 */
1388 tbd->len =
1389 cpu_to_le32((sizeof(struct tx_buf_desc) *
1390 (frag_cnt - frag_idx)) | TX_DESC_C);
1391 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1392 map);
1393 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1394 sizeof(struct oal));
1395 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396 map_idx++;
1397 }
1398
1399 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1400 DMA_TO_DEVICE);
1401
1402 err = dma_mapping_error(&qdev->pdev->dev, map);
1403 if (err) {
1404 netif_err(qdev, tx_queued, qdev->ndev,
1405 "PCI mapping frags failed with error: %d.\n",
1406 err);
1407 goto map_error;
1408 }
1409
1410 tbd->addr = cpu_to_le64(map);
1411 tbd->len = cpu_to_le32(skb_frag_size(frag));
1412 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1413 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1414 skb_frag_size(frag));
1415
1416 }
1417 /* Save the number of segments we've mapped. */
1418 tx_ring_desc->map_cnt = map_idx;
1419 /* Terminate the last segment. */
1420 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1421 return NETDEV_TX_OK;
1422
1423 map_error:
1424 /*
1425 * If the first frag mapping failed, then i will be zero.
1426 * This causes the unmap of the skb->data area. Otherwise
1427 * we pass in the number of frags that mapped successfully
1428 * so they can be umapped.
1429 */
1430 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1431 return NETDEV_TX_BUSY;
1432 }
1433
1434 /* Categorizing receive firmware frame errors */
1435 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1436 struct rx_ring *rx_ring)
1437 {
1438 struct nic_stats *stats = &qdev->nic_stats;
1439
1440 stats->rx_err_count++;
1441 rx_ring->rx_errors++;
1442
1443 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1445 stats->rx_code_err++;
1446 break;
1447 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1448 stats->rx_oversize_err++;
1449 break;
1450 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1451 stats->rx_undersize_err++;
1452 break;
1453 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1454 stats->rx_preamble_err++;
1455 break;
1456 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1457 stats->rx_frame_len_err++;
1458 break;
1459 case IB_MAC_IOCB_RSP_ERR_CRC:
1460 stats->rx_crc_err++;
1461 default:
1462 break;
1463 }
1464 }
1465
1466 /* Process an inbound completion from an rx ring. */
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1469 struct ib_mac_iocb_rsp *ib_mac_rsp,
1470 u32 length,
1471 u16 vlan_id)
1472 {
1473 struct sk_buff *skb;
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475 struct napi_struct *napi = &rx_ring->napi;
1476
1477 /* Frame error, so drop the packet. */
1478 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1479 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1480 put_page(lbq_desc->p.pg_chunk.page);
1481 return;
1482 }
1483 napi->dev = qdev->ndev;
1484
1485 skb = napi_get_frags(napi);
1486 if (!skb) {
1487 netif_err(qdev, drv, qdev->ndev,
1488 "Couldn't get an skb, exiting.\n");
1489 rx_ring->rx_dropped++;
1490 put_page(lbq_desc->p.pg_chunk.page);
1491 return;
1492 }
1493 prefetch(lbq_desc->p.pg_chunk.va);
1494 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1495 lbq_desc->p.pg_chunk.page,
1496 lbq_desc->p.pg_chunk.offset,
1497 length);
1498
1499 skb->len += length;
1500 skb->data_len += length;
1501 skb->truesize += length;
1502 skb_shinfo(skb)->nr_frags++;
1503
1504 rx_ring->rx_packets++;
1505 rx_ring->rx_bytes += length;
1506 skb->ip_summed = CHECKSUM_UNNECESSARY;
1507 skb_record_rx_queue(skb, rx_ring->cq_id);
1508 if (vlan_id != 0xffff)
1509 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1510 napi_gro_frags(napi);
1511 }
1512
1513 /* Process an inbound completion from an rx ring. */
1514 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1515 struct rx_ring *rx_ring,
1516 struct ib_mac_iocb_rsp *ib_mac_rsp,
1517 u32 length,
1518 u16 vlan_id)
1519 {
1520 struct net_device *ndev = qdev->ndev;
1521 struct sk_buff *skb = NULL;
1522 void *addr;
1523 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1524 struct napi_struct *napi = &rx_ring->napi;
1525
1526 skb = netdev_alloc_skb(ndev, length);
1527 if (!skb) {
1528 rx_ring->rx_dropped++;
1529 put_page(lbq_desc->p.pg_chunk.page);
1530 return;
1531 }
1532
1533 addr = lbq_desc->p.pg_chunk.va;
1534 prefetch(addr);
1535
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1539 goto err_out;
1540 }
1541
1542 /* The max framesize filter on this chip is set higher than
1543 * MTU since FCoE uses 2k frames.
1544 */
1545 if (skb->len > ndev->mtu + ETH_HLEN) {
1546 netif_err(qdev, drv, qdev->ndev,
1547 "Segment too small, dropping.\n");
1548 rx_ring->rx_dropped++;
1549 goto err_out;
1550 }
1551 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1552 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1553 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1554 length);
1555 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1556 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1557 length-ETH_HLEN);
1558 skb->len += length-ETH_HLEN;
1559 skb->data_len += length-ETH_HLEN;
1560 skb->truesize += length-ETH_HLEN;
1561
1562 rx_ring->rx_packets++;
1563 rx_ring->rx_bytes += skb->len;
1564 skb->protocol = eth_type_trans(skb, ndev);
1565 skb_checksum_none_assert(skb);
1566
1567 if ((ndev->features & NETIF_F_RXCSUM) &&
1568 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1569 /* TCP frame. */
1570 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1571 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1572 "TCP checksum done!\n");
1573 skb->ip_summed = CHECKSUM_UNNECESSARY;
1574 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1575 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1576 /* Unfragmented ipv4 UDP frame. */
1577 struct iphdr *iph =
1578 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1579 if (!(iph->frag_off &
1580 htons(IP_MF|IP_OFFSET))) {
1581 skb->ip_summed = CHECKSUM_UNNECESSARY;
1582 netif_printk(qdev, rx_status, KERN_DEBUG,
1583 qdev->ndev,
1584 "UDP checksum done!\n");
1585 }
1586 }
1587 }
1588
1589 skb_record_rx_queue(skb, rx_ring->cq_id);
1590 if (vlan_id != 0xffff)
1591 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1592 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1593 napi_gro_receive(napi, skb);
1594 else
1595 netif_receive_skb(skb);
1596 return;
1597 err_out:
1598 dev_kfree_skb_any(skb);
1599 put_page(lbq_desc->p.pg_chunk.page);
1600 }
1601
1602 /* Process an inbound completion from an rx ring. */
1603 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1604 struct rx_ring *rx_ring,
1605 struct ib_mac_iocb_rsp *ib_mac_rsp,
1606 u32 length,
1607 u16 vlan_id)
1608 {
1609 struct net_device *ndev = qdev->ndev;
1610 struct sk_buff *skb = NULL;
1611 struct sk_buff *new_skb = NULL;
1612 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1613
1614 skb = sbq_desc->p.skb;
1615 /* Allocate new_skb and copy */
1616 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1617 if (new_skb == NULL) {
1618 rx_ring->rx_dropped++;
1619 return;
1620 }
1621 skb_reserve(new_skb, NET_IP_ALIGN);
1622 memcpy(skb_put(new_skb, length), skb->data, length);
1623 skb = new_skb;
1624
1625 /* Frame error, so drop the packet. */
1626 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1627 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1628 dev_kfree_skb_any(skb);
1629 return;
1630 }
1631
1632 /* loopback self test for ethtool */
1633 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1634 ql_check_lb_frame(qdev, skb);
1635 dev_kfree_skb_any(skb);
1636 return;
1637 }
1638
1639 /* The max framesize filter on this chip is set higher than
1640 * MTU since FCoE uses 2k frames.
1641 */
1642 if (skb->len > ndev->mtu + ETH_HLEN) {
1643 dev_kfree_skb_any(skb);
1644 rx_ring->rx_dropped++;
1645 return;
1646 }
1647
1648 prefetch(skb->data);
1649 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1650 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1651 "%s Multicast.\n",
1652 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1653 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1654 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1655 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1656 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1657 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1658 }
1659 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1660 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1661 "Promiscuous Packet.\n");
1662
1663 rx_ring->rx_packets++;
1664 rx_ring->rx_bytes += skb->len;
1665 skb->protocol = eth_type_trans(skb, ndev);
1666 skb_checksum_none_assert(skb);
1667
1668 /* If rx checksum is on, and there are no
1669 * csum or frame errors.
1670 */
1671 if ((ndev->features & NETIF_F_RXCSUM) &&
1672 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1673 /* TCP frame. */
1674 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1675 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1676 "TCP checksum done!\n");
1677 skb->ip_summed = CHECKSUM_UNNECESSARY;
1678 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1679 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1680 /* Unfragmented ipv4 UDP frame. */
1681 struct iphdr *iph = (struct iphdr *) skb->data;
1682 if (!(iph->frag_off &
1683 htons(IP_MF|IP_OFFSET))) {
1684 skb->ip_summed = CHECKSUM_UNNECESSARY;
1685 netif_printk(qdev, rx_status, KERN_DEBUG,
1686 qdev->ndev,
1687 "UDP checksum done!\n");
1688 }
1689 }
1690 }
1691
1692 skb_record_rx_queue(skb, rx_ring->cq_id);
1693 if (vlan_id != 0xffff)
1694 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1695 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1696 napi_gro_receive(&rx_ring->napi, skb);
1697 else
1698 netif_receive_skb(skb);
1699 }
1700
1701 static void ql_realign_skb(struct sk_buff *skb, int len)
1702 {
1703 void *temp_addr = skb->data;
1704
1705 /* Undo the skb_reserve(skb,32) we did before
1706 * giving to hardware, and realign data on
1707 * a 2-byte boundary.
1708 */
1709 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1710 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1711 skb_copy_to_linear_data(skb, temp_addr,
1712 (unsigned int)len);
1713 }
1714
1715 /*
1716 * This function builds an skb for the given inbound
1717 * completion. It will be rewritten for readability in the near
1718 * future, but for not it works well.
1719 */
1720 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1721 struct rx_ring *rx_ring,
1722 struct ib_mac_iocb_rsp *ib_mac_rsp)
1723 {
1724 struct bq_desc *lbq_desc;
1725 struct bq_desc *sbq_desc;
1726 struct sk_buff *skb = NULL;
1727 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1728 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1729
1730 /*
1731 * Handle the header buffer if present.
1732 */
1733 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1734 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1735 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1736 "Header of %d bytes in small buffer.\n", hdr_len);
1737 /*
1738 * Headers fit nicely into a small buffer.
1739 */
1740 sbq_desc = ql_get_curr_sbuf(rx_ring);
1741 pci_unmap_single(qdev->pdev,
1742 dma_unmap_addr(sbq_desc, mapaddr),
1743 dma_unmap_len(sbq_desc, maplen),
1744 PCI_DMA_FROMDEVICE);
1745 skb = sbq_desc->p.skb;
1746 ql_realign_skb(skb, hdr_len);
1747 skb_put(skb, hdr_len);
1748 sbq_desc->p.skb = NULL;
1749 }
1750
1751 /*
1752 * Handle the data buffer(s).
1753 */
1754 if (unlikely(!length)) { /* Is there data too? */
1755 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1756 "No Data buffer in this packet.\n");
1757 return skb;
1758 }
1759
1760 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1761 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1762 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1763 "Headers in small, data of %d bytes in small, combine them.\n",
1764 length);
1765 /*
1766 * Data is less than small buffer size so it's
1767 * stuffed in a small buffer.
1768 * For this case we append the data
1769 * from the "data" small buffer to the "header" small
1770 * buffer.
1771 */
1772 sbq_desc = ql_get_curr_sbuf(rx_ring);
1773 pci_dma_sync_single_for_cpu(qdev->pdev,
1774 dma_unmap_addr
1775 (sbq_desc, mapaddr),
1776 dma_unmap_len
1777 (sbq_desc, maplen),
1778 PCI_DMA_FROMDEVICE);
1779 memcpy(skb_put(skb, length),
1780 sbq_desc->p.skb->data, length);
1781 pci_dma_sync_single_for_device(qdev->pdev,
1782 dma_unmap_addr
1783 (sbq_desc,
1784 mapaddr),
1785 dma_unmap_len
1786 (sbq_desc,
1787 maplen),
1788 PCI_DMA_FROMDEVICE);
1789 } else {
1790 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1791 "%d bytes in a single small buffer.\n",
1792 length);
1793 sbq_desc = ql_get_curr_sbuf(rx_ring);
1794 skb = sbq_desc->p.skb;
1795 ql_realign_skb(skb, length);
1796 skb_put(skb, length);
1797 pci_unmap_single(qdev->pdev,
1798 dma_unmap_addr(sbq_desc,
1799 mapaddr),
1800 dma_unmap_len(sbq_desc,
1801 maplen),
1802 PCI_DMA_FROMDEVICE);
1803 sbq_desc->p.skb = NULL;
1804 }
1805 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1806 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1807 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1808 "Header in small, %d bytes in large. Chain large to small!\n",
1809 length);
1810 /*
1811 * The data is in a single large buffer. We
1812 * chain it to the header buffer's skb and let
1813 * it rip.
1814 */
1815 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1816 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1817 "Chaining page at offset = %d, for %d bytes to skb.\n",
1818 lbq_desc->p.pg_chunk.offset, length);
1819 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1820 lbq_desc->p.pg_chunk.offset,
1821 length);
1822 skb->len += length;
1823 skb->data_len += length;
1824 skb->truesize += length;
1825 } else {
1826 /*
1827 * The headers and data are in a single large buffer. We
1828 * copy it to a new skb and let it go. This can happen with
1829 * jumbo mtu on a non-TCP/UDP frame.
1830 */
1831 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1832 skb = netdev_alloc_skb(qdev->ndev, length);
1833 if (skb == NULL) {
1834 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1835 "No skb available, drop the packet.\n");
1836 return NULL;
1837 }
1838 pci_unmap_page(qdev->pdev,
1839 dma_unmap_addr(lbq_desc,
1840 mapaddr),
1841 dma_unmap_len(lbq_desc, maplen),
1842 PCI_DMA_FROMDEVICE);
1843 skb_reserve(skb, NET_IP_ALIGN);
1844 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1845 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1846 length);
1847 skb_fill_page_desc(skb, 0,
1848 lbq_desc->p.pg_chunk.page,
1849 lbq_desc->p.pg_chunk.offset,
1850 length);
1851 skb->len += length;
1852 skb->data_len += length;
1853 skb->truesize += length;
1854 length -= length;
1855 __pskb_pull_tail(skb,
1856 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1857 VLAN_ETH_HLEN : ETH_HLEN);
1858 }
1859 } else {
1860 /*
1861 * The data is in a chain of large buffers
1862 * pointed to by a small buffer. We loop
1863 * thru and chain them to the our small header
1864 * buffer's skb.
1865 * frags: There are 18 max frags and our small
1866 * buffer will hold 32 of them. The thing is,
1867 * we'll use 3 max for our 9000 byte jumbo
1868 * frames. If the MTU goes up we could
1869 * eventually be in trouble.
1870 */
1871 int size, i = 0;
1872 sbq_desc = ql_get_curr_sbuf(rx_ring);
1873 pci_unmap_single(qdev->pdev,
1874 dma_unmap_addr(sbq_desc, mapaddr),
1875 dma_unmap_len(sbq_desc, maplen),
1876 PCI_DMA_FROMDEVICE);
1877 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1878 /*
1879 * This is an non TCP/UDP IP frame, so
1880 * the headers aren't split into a small
1881 * buffer. We have to use the small buffer
1882 * that contains our sg list as our skb to
1883 * send upstairs. Copy the sg list here to
1884 * a local buffer and use it to find the
1885 * pages to chain.
1886 */
1887 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1888 "%d bytes of headers & data in chain of large.\n",
1889 length);
1890 skb = sbq_desc->p.skb;
1891 sbq_desc->p.skb = NULL;
1892 skb_reserve(skb, NET_IP_ALIGN);
1893 }
1894 while (length > 0) {
1895 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1896 size = (length < rx_ring->lbq_buf_size) ? length :
1897 rx_ring->lbq_buf_size;
1898
1899 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1900 "Adding page %d to skb for %d bytes.\n",
1901 i, size);
1902 skb_fill_page_desc(skb, i,
1903 lbq_desc->p.pg_chunk.page,
1904 lbq_desc->p.pg_chunk.offset,
1905 size);
1906 skb->len += size;
1907 skb->data_len += size;
1908 skb->truesize += size;
1909 length -= size;
1910 i++;
1911 }
1912 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1913 VLAN_ETH_HLEN : ETH_HLEN);
1914 }
1915 return skb;
1916 }
1917
1918 /* Process an inbound completion from an rx ring. */
1919 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1920 struct rx_ring *rx_ring,
1921 struct ib_mac_iocb_rsp *ib_mac_rsp,
1922 u16 vlan_id)
1923 {
1924 struct net_device *ndev = qdev->ndev;
1925 struct sk_buff *skb = NULL;
1926
1927 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1928
1929 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1930 if (unlikely(!skb)) {
1931 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1932 "No skb available, drop packet.\n");
1933 rx_ring->rx_dropped++;
1934 return;
1935 }
1936
1937 /* Frame error, so drop the packet. */
1938 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1939 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1940 dev_kfree_skb_any(skb);
1941 return;
1942 }
1943
1944 /* The max framesize filter on this chip is set higher than
1945 * MTU since FCoE uses 2k frames.
1946 */
1947 if (skb->len > ndev->mtu + ETH_HLEN) {
1948 dev_kfree_skb_any(skb);
1949 rx_ring->rx_dropped++;
1950 return;
1951 }
1952
1953 /* loopback self test for ethtool */
1954 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1955 ql_check_lb_frame(qdev, skb);
1956 dev_kfree_skb_any(skb);
1957 return;
1958 }
1959
1960 prefetch(skb->data);
1961 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1962 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1963 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1964 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1965 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1966 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1967 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1968 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1969 rx_ring->rx_multicast++;
1970 }
1971 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1972 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1973 "Promiscuous Packet.\n");
1974 }
1975
1976 skb->protocol = eth_type_trans(skb, ndev);
1977 skb_checksum_none_assert(skb);
1978
1979 /* If rx checksum is on, and there are no
1980 * csum or frame errors.
1981 */
1982 if ((ndev->features & NETIF_F_RXCSUM) &&
1983 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1984 /* TCP frame. */
1985 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1986 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1987 "TCP checksum done!\n");
1988 skb->ip_summed = CHECKSUM_UNNECESSARY;
1989 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1990 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1991 /* Unfragmented ipv4 UDP frame. */
1992 struct iphdr *iph = (struct iphdr *) skb->data;
1993 if (!(iph->frag_off &
1994 htons(IP_MF|IP_OFFSET))) {
1995 skb->ip_summed = CHECKSUM_UNNECESSARY;
1996 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1997 "TCP checksum done!\n");
1998 }
1999 }
2000 }
2001
2002 rx_ring->rx_packets++;
2003 rx_ring->rx_bytes += skb->len;
2004 skb_record_rx_queue(skb, rx_ring->cq_id);
2005 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2006 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2007 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2008 napi_gro_receive(&rx_ring->napi, skb);
2009 else
2010 netif_receive_skb(skb);
2011 }
2012
2013 /* Process an inbound completion from an rx ring. */
2014 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2015 struct rx_ring *rx_ring,
2016 struct ib_mac_iocb_rsp *ib_mac_rsp)
2017 {
2018 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2019 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2020 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2021 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2022
2023 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2024
2025 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2026 /* The data and headers are split into
2027 * separate buffers.
2028 */
2029 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2030 vlan_id);
2031 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2032 /* The data fit in a single small buffer.
2033 * Allocate a new skb, copy the data and
2034 * return the buffer to the free pool.
2035 */
2036 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2037 length, vlan_id);
2038 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2039 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2041 /* TCP packet in a page chunk that's been checksummed.
2042 * Tack it on to our GRO skb and let it go.
2043 */
2044 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2045 length, vlan_id);
2046 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2047 /* Non-TCP packet in a page chunk. Allocate an
2048 * skb, tack it on frags, and send it up.
2049 */
2050 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2051 length, vlan_id);
2052 } else {
2053 /* Non-TCP/UDP large frames that span multiple buffers
2054 * can be processed corrrectly by the split frame logic.
2055 */
2056 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2057 vlan_id);
2058 }
2059
2060 return (unsigned long)length;
2061 }
2062
2063 /* Process an outbound completion from an rx ring. */
2064 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2065 struct ob_mac_iocb_rsp *mac_rsp)
2066 {
2067 struct tx_ring *tx_ring;
2068 struct tx_ring_desc *tx_ring_desc;
2069
2070 QL_DUMP_OB_MAC_RSP(mac_rsp);
2071 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2072 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2073 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2074 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2075 tx_ring->tx_packets++;
2076 dev_kfree_skb(tx_ring_desc->skb);
2077 tx_ring_desc->skb = NULL;
2078
2079 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2080 OB_MAC_IOCB_RSP_S |
2081 OB_MAC_IOCB_RSP_L |
2082 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2083 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2084 netif_warn(qdev, tx_done, qdev->ndev,
2085 "Total descriptor length did not match transfer length.\n");
2086 }
2087 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2088 netif_warn(qdev, tx_done, qdev->ndev,
2089 "Frame too short to be valid, not sent.\n");
2090 }
2091 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2092 netif_warn(qdev, tx_done, qdev->ndev,
2093 "Frame too long, but sent anyway.\n");
2094 }
2095 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2096 netif_warn(qdev, tx_done, qdev->ndev,
2097 "PCI backplane error. Frame not sent.\n");
2098 }
2099 }
2100 atomic_inc(&tx_ring->tx_count);
2101 }
2102
2103 /* Fire up a handler to reset the MPI processor. */
2104 void ql_queue_fw_error(struct ql_adapter *qdev)
2105 {
2106 ql_link_off(qdev);
2107 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2108 }
2109
2110 void ql_queue_asic_error(struct ql_adapter *qdev)
2111 {
2112 ql_link_off(qdev);
2113 ql_disable_interrupts(qdev);
2114 /* Clear adapter up bit to signal the recovery
2115 * process that it shouldn't kill the reset worker
2116 * thread
2117 */
2118 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2119 /* Set asic recovery bit to indicate reset process that we are
2120 * in fatal error recovery process rather than normal close
2121 */
2122 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2123 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2124 }
2125
2126 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2127 struct ib_ae_iocb_rsp *ib_ae_rsp)
2128 {
2129 switch (ib_ae_rsp->event) {
2130 case MGMT_ERR_EVENT:
2131 netif_err(qdev, rx_err, qdev->ndev,
2132 "Management Processor Fatal Error.\n");
2133 ql_queue_fw_error(qdev);
2134 return;
2135
2136 case CAM_LOOKUP_ERR_EVENT:
2137 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2138 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2139 ql_queue_asic_error(qdev);
2140 return;
2141
2142 case SOFT_ECC_ERROR_EVENT:
2143 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2144 ql_queue_asic_error(qdev);
2145 break;
2146
2147 case PCI_ERR_ANON_BUF_RD:
2148 netdev_err(qdev->ndev, "PCI error occurred when reading "
2149 "anonymous buffers from rx_ring %d.\n",
2150 ib_ae_rsp->q_id);
2151 ql_queue_asic_error(qdev);
2152 break;
2153
2154 default:
2155 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2156 ib_ae_rsp->event);
2157 ql_queue_asic_error(qdev);
2158 break;
2159 }
2160 }
2161
2162 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2163 {
2164 struct ql_adapter *qdev = rx_ring->qdev;
2165 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2166 struct ob_mac_iocb_rsp *net_rsp = NULL;
2167 int count = 0;
2168
2169 struct tx_ring *tx_ring;
2170 /* While there are entries in the completion queue. */
2171 while (prod != rx_ring->cnsmr_idx) {
2172
2173 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2174 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2175 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2176
2177 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2178 rmb();
2179 switch (net_rsp->opcode) {
2180
2181 case OPCODE_OB_MAC_TSO_IOCB:
2182 case OPCODE_OB_MAC_IOCB:
2183 ql_process_mac_tx_intr(qdev, net_rsp);
2184 break;
2185 default:
2186 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2187 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2188 net_rsp->opcode);
2189 }
2190 count++;
2191 ql_update_cq(rx_ring);
2192 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2193 }
2194 if (!net_rsp)
2195 return 0;
2196 ql_write_cq_idx(rx_ring);
2197 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2198 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2199 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2200 /*
2201 * The queue got stopped because the tx_ring was full.
2202 * Wake it up, because it's now at least 25% empty.
2203 */
2204 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2205 }
2206
2207 return count;
2208 }
2209
2210 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2211 {
2212 struct ql_adapter *qdev = rx_ring->qdev;
2213 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2214 struct ql_net_rsp_iocb *net_rsp;
2215 int count = 0;
2216
2217 /* While there are entries in the completion queue. */
2218 while (prod != rx_ring->cnsmr_idx) {
2219
2220 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2222 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2223
2224 net_rsp = rx_ring->curr_entry;
2225 rmb();
2226 switch (net_rsp->opcode) {
2227 case OPCODE_IB_MAC_IOCB:
2228 ql_process_mac_rx_intr(qdev, rx_ring,
2229 (struct ib_mac_iocb_rsp *)
2230 net_rsp);
2231 break;
2232
2233 case OPCODE_IB_AE_IOCB:
2234 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2235 net_rsp);
2236 break;
2237 default:
2238 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2239 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2240 net_rsp->opcode);
2241 break;
2242 }
2243 count++;
2244 ql_update_cq(rx_ring);
2245 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2246 if (count == budget)
2247 break;
2248 }
2249 ql_update_buffer_queues(qdev, rx_ring);
2250 ql_write_cq_idx(rx_ring);
2251 return count;
2252 }
2253
2254 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2255 {
2256 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2257 struct ql_adapter *qdev = rx_ring->qdev;
2258 struct rx_ring *trx_ring;
2259 int i, work_done = 0;
2260 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2261
2262 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2263 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2264
2265 /* Service the TX rings first. They start
2266 * right after the RSS rings. */
2267 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2268 trx_ring = &qdev->rx_ring[i];
2269 /* If this TX completion ring belongs to this vector and
2270 * it's not empty then service it.
2271 */
2272 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2273 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2274 trx_ring->cnsmr_idx)) {
2275 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2276 "%s: Servicing TX completion ring %d.\n",
2277 __func__, trx_ring->cq_id);
2278 ql_clean_outbound_rx_ring(trx_ring);
2279 }
2280 }
2281
2282 /*
2283 * Now service the RSS ring if it's active.
2284 */
2285 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2286 rx_ring->cnsmr_idx) {
2287 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2288 "%s: Servicing RX completion ring %d.\n",
2289 __func__, rx_ring->cq_id);
2290 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2291 }
2292
2293 if (work_done < budget) {
2294 napi_complete(napi);
2295 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2296 }
2297 return work_done;
2298 }
2299
2300 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2301 {
2302 struct ql_adapter *qdev = netdev_priv(ndev);
2303
2304 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2305 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2306 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2307 } else {
2308 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2309 }
2310 }
2311
2312 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2313 netdev_features_t features)
2314 {
2315 /*
2316 * Since there is no support for separate rx/tx vlan accel
2317 * enable/disable make sure tx flag is always in same state as rx.
2318 */
2319 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2320 features |= NETIF_F_HW_VLAN_CTAG_TX;
2321 else
2322 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2323
2324 return features;
2325 }
2326
2327 static int qlge_set_features(struct net_device *ndev,
2328 netdev_features_t features)
2329 {
2330 netdev_features_t changed = ndev->features ^ features;
2331
2332 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2333 qlge_vlan_mode(ndev, features);
2334
2335 return 0;
2336 }
2337
2338 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2339 {
2340 u32 enable_bit = MAC_ADDR_E;
2341 int err;
2342
2343 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2344 MAC_ADDR_TYPE_VLAN, vid);
2345 if (err)
2346 netif_err(qdev, ifup, qdev->ndev,
2347 "Failed to init vlan address.\n");
2348 return err;
2349 }
2350
2351 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2352 {
2353 struct ql_adapter *qdev = netdev_priv(ndev);
2354 int status;
2355 int err;
2356
2357 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2358 if (status)
2359 return status;
2360
2361 err = __qlge_vlan_rx_add_vid(qdev, vid);
2362 set_bit(vid, qdev->active_vlans);
2363
2364 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2365
2366 return err;
2367 }
2368
2369 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2370 {
2371 u32 enable_bit = 0;
2372 int err;
2373
2374 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2375 MAC_ADDR_TYPE_VLAN, vid);
2376 if (err)
2377 netif_err(qdev, ifup, qdev->ndev,
2378 "Failed to clear vlan address.\n");
2379 return err;
2380 }
2381
2382 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2383 {
2384 struct ql_adapter *qdev = netdev_priv(ndev);
2385 int status;
2386 int err;
2387
2388 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2389 if (status)
2390 return status;
2391
2392 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2393 clear_bit(vid, qdev->active_vlans);
2394
2395 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2396
2397 return err;
2398 }
2399
2400 static void qlge_restore_vlan(struct ql_adapter *qdev)
2401 {
2402 int status;
2403 u16 vid;
2404
2405 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2406 if (status)
2407 return;
2408
2409 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2410 __qlge_vlan_rx_add_vid(qdev, vid);
2411
2412 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2413 }
2414
2415 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2416 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2417 {
2418 struct rx_ring *rx_ring = dev_id;
2419 napi_schedule(&rx_ring->napi);
2420 return IRQ_HANDLED;
2421 }
2422
2423 /* This handles a fatal error, MPI activity, and the default
2424 * rx_ring in an MSI-X multiple vector environment.
2425 * In MSI/Legacy environment it also process the rest of
2426 * the rx_rings.
2427 */
2428 static irqreturn_t qlge_isr(int irq, void *dev_id)
2429 {
2430 struct rx_ring *rx_ring = dev_id;
2431 struct ql_adapter *qdev = rx_ring->qdev;
2432 struct intr_context *intr_context = &qdev->intr_context[0];
2433 u32 var;
2434 int work_done = 0;
2435
2436 spin_lock(&qdev->hw_lock);
2437 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2438 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2439 "Shared Interrupt, Not ours!\n");
2440 spin_unlock(&qdev->hw_lock);
2441 return IRQ_NONE;
2442 }
2443 spin_unlock(&qdev->hw_lock);
2444
2445 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2446
2447 /*
2448 * Check for fatal error.
2449 */
2450 if (var & STS_FE) {
2451 ql_queue_asic_error(qdev);
2452 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2453 var = ql_read32(qdev, ERR_STS);
2454 netdev_err(qdev->ndev, "Resetting chip. "
2455 "Error Status Register = 0x%x\n", var);
2456 return IRQ_HANDLED;
2457 }
2458
2459 /*
2460 * Check MPI processor activity.
2461 */
2462 if ((var & STS_PI) &&
2463 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2464 /*
2465 * We've got an async event or mailbox completion.
2466 * Handle it and clear the source of the interrupt.
2467 */
2468 netif_err(qdev, intr, qdev->ndev,
2469 "Got MPI processor interrupt.\n");
2470 ql_disable_completion_interrupt(qdev, intr_context->intr);
2471 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2472 queue_delayed_work_on(smp_processor_id(),
2473 qdev->workqueue, &qdev->mpi_work, 0);
2474 work_done++;
2475 }
2476
2477 /*
2478 * Get the bit-mask that shows the active queues for this
2479 * pass. Compare it to the queues that this irq services
2480 * and call napi if there's a match.
2481 */
2482 var = ql_read32(qdev, ISR1);
2483 if (var & intr_context->irq_mask) {
2484 netif_info(qdev, intr, qdev->ndev,
2485 "Waking handler for rx_ring[0].\n");
2486 ql_disable_completion_interrupt(qdev, intr_context->intr);
2487 napi_schedule(&rx_ring->napi);
2488 work_done++;
2489 }
2490 ql_enable_completion_interrupt(qdev, intr_context->intr);
2491 return work_done ? IRQ_HANDLED : IRQ_NONE;
2492 }
2493
2494 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2495 {
2496
2497 if (skb_is_gso(skb)) {
2498 int err;
2499 if (skb_header_cloned(skb)) {
2500 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2501 if (err)
2502 return err;
2503 }
2504
2505 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2506 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2507 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2508 mac_iocb_ptr->total_hdrs_len =
2509 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2510 mac_iocb_ptr->net_trans_offset =
2511 cpu_to_le16(skb_network_offset(skb) |
2512 skb_transport_offset(skb)
2513 << OB_MAC_TRANSPORT_HDR_SHIFT);
2514 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2515 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2516 if (likely(skb->protocol == htons(ETH_P_IP))) {
2517 struct iphdr *iph = ip_hdr(skb);
2518 iph->check = 0;
2519 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2520 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2521 iph->daddr, 0,
2522 IPPROTO_TCP,
2523 0);
2524 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2525 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2526 tcp_hdr(skb)->check =
2527 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2528 &ipv6_hdr(skb)->daddr,
2529 0, IPPROTO_TCP, 0);
2530 }
2531 return 1;
2532 }
2533 return 0;
2534 }
2535
2536 static void ql_hw_csum_setup(struct sk_buff *skb,
2537 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2538 {
2539 int len;
2540 struct iphdr *iph = ip_hdr(skb);
2541 __sum16 *check;
2542 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2543 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2544 mac_iocb_ptr->net_trans_offset =
2545 cpu_to_le16(skb_network_offset(skb) |
2546 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2547
2548 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2549 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2550 if (likely(iph->protocol == IPPROTO_TCP)) {
2551 check = &(tcp_hdr(skb)->check);
2552 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2553 mac_iocb_ptr->total_hdrs_len =
2554 cpu_to_le16(skb_transport_offset(skb) +
2555 (tcp_hdr(skb)->doff << 2));
2556 } else {
2557 check = &(udp_hdr(skb)->check);
2558 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2559 mac_iocb_ptr->total_hdrs_len =
2560 cpu_to_le16(skb_transport_offset(skb) +
2561 sizeof(struct udphdr));
2562 }
2563 *check = ~csum_tcpudp_magic(iph->saddr,
2564 iph->daddr, len, iph->protocol, 0);
2565 }
2566
2567 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2568 {
2569 struct tx_ring_desc *tx_ring_desc;
2570 struct ob_mac_iocb_req *mac_iocb_ptr;
2571 struct ql_adapter *qdev = netdev_priv(ndev);
2572 int tso;
2573 struct tx_ring *tx_ring;
2574 u32 tx_ring_idx = (u32) skb->queue_mapping;
2575
2576 tx_ring = &qdev->tx_ring[tx_ring_idx];
2577
2578 if (skb_padto(skb, ETH_ZLEN))
2579 return NETDEV_TX_OK;
2580
2581 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2582 netif_info(qdev, tx_queued, qdev->ndev,
2583 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2584 __func__, tx_ring_idx);
2585 netif_stop_subqueue(ndev, tx_ring->wq_id);
2586 tx_ring->tx_errors++;
2587 return NETDEV_TX_BUSY;
2588 }
2589 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2590 mac_iocb_ptr = tx_ring_desc->queue_entry;
2591 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2592
2593 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2594 mac_iocb_ptr->tid = tx_ring_desc->index;
2595 /* We use the upper 32-bits to store the tx queue for this IO.
2596 * When we get the completion we can use it to establish the context.
2597 */
2598 mac_iocb_ptr->txq_idx = tx_ring_idx;
2599 tx_ring_desc->skb = skb;
2600
2601 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2602
2603 if (vlan_tx_tag_present(skb)) {
2604 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2605 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2606 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2607 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2608 }
2609 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2610 if (tso < 0) {
2611 dev_kfree_skb_any(skb);
2612 return NETDEV_TX_OK;
2613 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2614 ql_hw_csum_setup(skb,
2615 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2616 }
2617 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2618 NETDEV_TX_OK) {
2619 netif_err(qdev, tx_queued, qdev->ndev,
2620 "Could not map the segments.\n");
2621 tx_ring->tx_errors++;
2622 return NETDEV_TX_BUSY;
2623 }
2624 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2625 tx_ring->prod_idx++;
2626 if (tx_ring->prod_idx == tx_ring->wq_len)
2627 tx_ring->prod_idx = 0;
2628 wmb();
2629
2630 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2631 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2632 "tx queued, slot %d, len %d\n",
2633 tx_ring->prod_idx, skb->len);
2634
2635 atomic_dec(&tx_ring->tx_count);
2636
2637 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2638 netif_stop_subqueue(ndev, tx_ring->wq_id);
2639 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2640 /*
2641 * The queue got stopped because the tx_ring was full.
2642 * Wake it up, because it's now at least 25% empty.
2643 */
2644 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2645 }
2646 return NETDEV_TX_OK;
2647 }
2648
2649
2650 static void ql_free_shadow_space(struct ql_adapter *qdev)
2651 {
2652 if (qdev->rx_ring_shadow_reg_area) {
2653 pci_free_consistent(qdev->pdev,
2654 PAGE_SIZE,
2655 qdev->rx_ring_shadow_reg_area,
2656 qdev->rx_ring_shadow_reg_dma);
2657 qdev->rx_ring_shadow_reg_area = NULL;
2658 }
2659 if (qdev->tx_ring_shadow_reg_area) {
2660 pci_free_consistent(qdev->pdev,
2661 PAGE_SIZE,
2662 qdev->tx_ring_shadow_reg_area,
2663 qdev->tx_ring_shadow_reg_dma);
2664 qdev->tx_ring_shadow_reg_area = NULL;
2665 }
2666 }
2667
2668 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2669 {
2670 qdev->rx_ring_shadow_reg_area =
2671 pci_alloc_consistent(qdev->pdev,
2672 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2673 if (qdev->rx_ring_shadow_reg_area == NULL) {
2674 netif_err(qdev, ifup, qdev->ndev,
2675 "Allocation of RX shadow space failed.\n");
2676 return -ENOMEM;
2677 }
2678 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2679 qdev->tx_ring_shadow_reg_area =
2680 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2681 &qdev->tx_ring_shadow_reg_dma);
2682 if (qdev->tx_ring_shadow_reg_area == NULL) {
2683 netif_err(qdev, ifup, qdev->ndev,
2684 "Allocation of TX shadow space failed.\n");
2685 goto err_wqp_sh_area;
2686 }
2687 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2688 return 0;
2689
2690 err_wqp_sh_area:
2691 pci_free_consistent(qdev->pdev,
2692 PAGE_SIZE,
2693 qdev->rx_ring_shadow_reg_area,
2694 qdev->rx_ring_shadow_reg_dma);
2695 return -ENOMEM;
2696 }
2697
2698 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2699 {
2700 struct tx_ring_desc *tx_ring_desc;
2701 int i;
2702 struct ob_mac_iocb_req *mac_iocb_ptr;
2703
2704 mac_iocb_ptr = tx_ring->wq_base;
2705 tx_ring_desc = tx_ring->q;
2706 for (i = 0; i < tx_ring->wq_len; i++) {
2707 tx_ring_desc->index = i;
2708 tx_ring_desc->skb = NULL;
2709 tx_ring_desc->queue_entry = mac_iocb_ptr;
2710 mac_iocb_ptr++;
2711 tx_ring_desc++;
2712 }
2713 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2714 }
2715
2716 static void ql_free_tx_resources(struct ql_adapter *qdev,
2717 struct tx_ring *tx_ring)
2718 {
2719 if (tx_ring->wq_base) {
2720 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2721 tx_ring->wq_base, tx_ring->wq_base_dma);
2722 tx_ring->wq_base = NULL;
2723 }
2724 kfree(tx_ring->q);
2725 tx_ring->q = NULL;
2726 }
2727
2728 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2729 struct tx_ring *tx_ring)
2730 {
2731 tx_ring->wq_base =
2732 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2733 &tx_ring->wq_base_dma);
2734
2735 if ((tx_ring->wq_base == NULL) ||
2736 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2737 goto pci_alloc_err;
2738
2739 tx_ring->q =
2740 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2741 if (tx_ring->q == NULL)
2742 goto err;
2743
2744 return 0;
2745 err:
2746 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2747 tx_ring->wq_base, tx_ring->wq_base_dma);
2748 tx_ring->wq_base = NULL;
2749 pci_alloc_err:
2750 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2751 return -ENOMEM;
2752 }
2753
2754 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2755 {
2756 struct bq_desc *lbq_desc;
2757
2758 uint32_t curr_idx, clean_idx;
2759
2760 curr_idx = rx_ring->lbq_curr_idx;
2761 clean_idx = rx_ring->lbq_clean_idx;
2762 while (curr_idx != clean_idx) {
2763 lbq_desc = &rx_ring->lbq[curr_idx];
2764
2765 if (lbq_desc->p.pg_chunk.last_flag) {
2766 pci_unmap_page(qdev->pdev,
2767 lbq_desc->p.pg_chunk.map,
2768 ql_lbq_block_size(qdev),
2769 PCI_DMA_FROMDEVICE);
2770 lbq_desc->p.pg_chunk.last_flag = 0;
2771 }
2772
2773 put_page(lbq_desc->p.pg_chunk.page);
2774 lbq_desc->p.pg_chunk.page = NULL;
2775
2776 if (++curr_idx == rx_ring->lbq_len)
2777 curr_idx = 0;
2778
2779 }
2780 }
2781
2782 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2783 {
2784 int i;
2785 struct bq_desc *sbq_desc;
2786
2787 for (i = 0; i < rx_ring->sbq_len; i++) {
2788 sbq_desc = &rx_ring->sbq[i];
2789 if (sbq_desc == NULL) {
2790 netif_err(qdev, ifup, qdev->ndev,
2791 "sbq_desc %d is NULL.\n", i);
2792 return;
2793 }
2794 if (sbq_desc->p.skb) {
2795 pci_unmap_single(qdev->pdev,
2796 dma_unmap_addr(sbq_desc, mapaddr),
2797 dma_unmap_len(sbq_desc, maplen),
2798 PCI_DMA_FROMDEVICE);
2799 dev_kfree_skb(sbq_desc->p.skb);
2800 sbq_desc->p.skb = NULL;
2801 }
2802 }
2803 }
2804
2805 /* Free all large and small rx buffers associated
2806 * with the completion queues for this device.
2807 */
2808 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2809 {
2810 int i;
2811 struct rx_ring *rx_ring;
2812
2813 for (i = 0; i < qdev->rx_ring_count; i++) {
2814 rx_ring = &qdev->rx_ring[i];
2815 if (rx_ring->lbq)
2816 ql_free_lbq_buffers(qdev, rx_ring);
2817 if (rx_ring->sbq)
2818 ql_free_sbq_buffers(qdev, rx_ring);
2819 }
2820 }
2821
2822 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2823 {
2824 struct rx_ring *rx_ring;
2825 int i;
2826
2827 for (i = 0; i < qdev->rx_ring_count; i++) {
2828 rx_ring = &qdev->rx_ring[i];
2829 if (rx_ring->type != TX_Q)
2830 ql_update_buffer_queues(qdev, rx_ring);
2831 }
2832 }
2833
2834 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2835 struct rx_ring *rx_ring)
2836 {
2837 int i;
2838 struct bq_desc *lbq_desc;
2839 __le64 *bq = rx_ring->lbq_base;
2840
2841 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2842 for (i = 0; i < rx_ring->lbq_len; i++) {
2843 lbq_desc = &rx_ring->lbq[i];
2844 memset(lbq_desc, 0, sizeof(*lbq_desc));
2845 lbq_desc->index = i;
2846 lbq_desc->addr = bq;
2847 bq++;
2848 }
2849 }
2850
2851 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2852 struct rx_ring *rx_ring)
2853 {
2854 int i;
2855 struct bq_desc *sbq_desc;
2856 __le64 *bq = rx_ring->sbq_base;
2857
2858 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2859 for (i = 0; i < rx_ring->sbq_len; i++) {
2860 sbq_desc = &rx_ring->sbq[i];
2861 memset(sbq_desc, 0, sizeof(*sbq_desc));
2862 sbq_desc->index = i;
2863 sbq_desc->addr = bq;
2864 bq++;
2865 }
2866 }
2867
2868 static void ql_free_rx_resources(struct ql_adapter *qdev,
2869 struct rx_ring *rx_ring)
2870 {
2871 /* Free the small buffer queue. */
2872 if (rx_ring->sbq_base) {
2873 pci_free_consistent(qdev->pdev,
2874 rx_ring->sbq_size,
2875 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2876 rx_ring->sbq_base = NULL;
2877 }
2878
2879 /* Free the small buffer queue control blocks. */
2880 kfree(rx_ring->sbq);
2881 rx_ring->sbq = NULL;
2882
2883 /* Free the large buffer queue. */
2884 if (rx_ring->lbq_base) {
2885 pci_free_consistent(qdev->pdev,
2886 rx_ring->lbq_size,
2887 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2888 rx_ring->lbq_base = NULL;
2889 }
2890
2891 /* Free the large buffer queue control blocks. */
2892 kfree(rx_ring->lbq);
2893 rx_ring->lbq = NULL;
2894
2895 /* Free the rx queue. */
2896 if (rx_ring->cq_base) {
2897 pci_free_consistent(qdev->pdev,
2898 rx_ring->cq_size,
2899 rx_ring->cq_base, rx_ring->cq_base_dma);
2900 rx_ring->cq_base = NULL;
2901 }
2902 }
2903
2904 /* Allocate queues and buffers for this completions queue based
2905 * on the values in the parameter structure. */
2906 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2907 struct rx_ring *rx_ring)
2908 {
2909
2910 /*
2911 * Allocate the completion queue for this rx_ring.
2912 */
2913 rx_ring->cq_base =
2914 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2915 &rx_ring->cq_base_dma);
2916
2917 if (rx_ring->cq_base == NULL) {
2918 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2919 return -ENOMEM;
2920 }
2921
2922 if (rx_ring->sbq_len) {
2923 /*
2924 * Allocate small buffer queue.
2925 */
2926 rx_ring->sbq_base =
2927 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2928 &rx_ring->sbq_base_dma);
2929
2930 if (rx_ring->sbq_base == NULL) {
2931 netif_err(qdev, ifup, qdev->ndev,
2932 "Small buffer queue allocation failed.\n");
2933 goto err_mem;
2934 }
2935
2936 /*
2937 * Allocate small buffer queue control blocks.
2938 */
2939 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
2940 sizeof(struct bq_desc),
2941 GFP_KERNEL);
2942 if (rx_ring->sbq == NULL)
2943 goto err_mem;
2944
2945 ql_init_sbq_ring(qdev, rx_ring);
2946 }
2947
2948 if (rx_ring->lbq_len) {
2949 /*
2950 * Allocate large buffer queue.
2951 */
2952 rx_ring->lbq_base =
2953 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2954 &rx_ring->lbq_base_dma);
2955
2956 if (rx_ring->lbq_base == NULL) {
2957 netif_err(qdev, ifup, qdev->ndev,
2958 "Large buffer queue allocation failed.\n");
2959 goto err_mem;
2960 }
2961 /*
2962 * Allocate large buffer queue control blocks.
2963 */
2964 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
2965 sizeof(struct bq_desc),
2966 GFP_KERNEL);
2967 if (rx_ring->lbq == NULL)
2968 goto err_mem;
2969
2970 ql_init_lbq_ring(qdev, rx_ring);
2971 }
2972
2973 return 0;
2974
2975 err_mem:
2976 ql_free_rx_resources(qdev, rx_ring);
2977 return -ENOMEM;
2978 }
2979
2980 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2981 {
2982 struct tx_ring *tx_ring;
2983 struct tx_ring_desc *tx_ring_desc;
2984 int i, j;
2985
2986 /*
2987 * Loop through all queues and free
2988 * any resources.
2989 */
2990 for (j = 0; j < qdev->tx_ring_count; j++) {
2991 tx_ring = &qdev->tx_ring[j];
2992 for (i = 0; i < tx_ring->wq_len; i++) {
2993 tx_ring_desc = &tx_ring->q[i];
2994 if (tx_ring_desc && tx_ring_desc->skb) {
2995 netif_err(qdev, ifdown, qdev->ndev,
2996 "Freeing lost SKB %p, from queue %d, index %d.\n",
2997 tx_ring_desc->skb, j,
2998 tx_ring_desc->index);
2999 ql_unmap_send(qdev, tx_ring_desc,
3000 tx_ring_desc->map_cnt);
3001 dev_kfree_skb(tx_ring_desc->skb);
3002 tx_ring_desc->skb = NULL;
3003 }
3004 }
3005 }
3006 }
3007
3008 static void ql_free_mem_resources(struct ql_adapter *qdev)
3009 {
3010 int i;
3011
3012 for (i = 0; i < qdev->tx_ring_count; i++)
3013 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3014 for (i = 0; i < qdev->rx_ring_count; i++)
3015 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3016 ql_free_shadow_space(qdev);
3017 }
3018
3019 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3020 {
3021 int i;
3022
3023 /* Allocate space for our shadow registers and such. */
3024 if (ql_alloc_shadow_space(qdev))
3025 return -ENOMEM;
3026
3027 for (i = 0; i < qdev->rx_ring_count; i++) {
3028 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3029 netif_err(qdev, ifup, qdev->ndev,
3030 "RX resource allocation failed.\n");
3031 goto err_mem;
3032 }
3033 }
3034 /* Allocate tx queue resources */
3035 for (i = 0; i < qdev->tx_ring_count; i++) {
3036 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3037 netif_err(qdev, ifup, qdev->ndev,
3038 "TX resource allocation failed.\n");
3039 goto err_mem;
3040 }
3041 }
3042 return 0;
3043
3044 err_mem:
3045 ql_free_mem_resources(qdev);
3046 return -ENOMEM;
3047 }
3048
3049 /* Set up the rx ring control block and pass it to the chip.
3050 * The control block is defined as
3051 * "Completion Queue Initialization Control Block", or cqicb.
3052 */
3053 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3054 {
3055 struct cqicb *cqicb = &rx_ring->cqicb;
3056 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3057 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3058 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3059 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3060 void __iomem *doorbell_area =
3061 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3062 int err = 0;
3063 u16 bq_len;
3064 u64 tmp;
3065 __le64 *base_indirect_ptr;
3066 int page_entries;
3067
3068 /* Set up the shadow registers for this ring. */
3069 rx_ring->prod_idx_sh_reg = shadow_reg;
3070 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3071 *rx_ring->prod_idx_sh_reg = 0;
3072 shadow_reg += sizeof(u64);
3073 shadow_reg_dma += sizeof(u64);
3074 rx_ring->lbq_base_indirect = shadow_reg;
3075 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3076 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3077 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3078 rx_ring->sbq_base_indirect = shadow_reg;
3079 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3080
3081 /* PCI doorbell mem area + 0x00 for consumer index register */
3082 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3083 rx_ring->cnsmr_idx = 0;
3084 rx_ring->curr_entry = rx_ring->cq_base;
3085
3086 /* PCI doorbell mem area + 0x04 for valid register */
3087 rx_ring->valid_db_reg = doorbell_area + 0x04;
3088
3089 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3090 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3091
3092 /* PCI doorbell mem area + 0x1c */
3093 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3094
3095 memset((void *)cqicb, 0, sizeof(struct cqicb));
3096 cqicb->msix_vect = rx_ring->irq;
3097
3098 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3099 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3100
3101 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3102
3103 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3104
3105 /*
3106 * Set up the control block load flags.
3107 */
3108 cqicb->flags = FLAGS_LC | /* Load queue base address */
3109 FLAGS_LV | /* Load MSI-X vector */
3110 FLAGS_LI; /* Load irq delay values */
3111 if (rx_ring->lbq_len) {
3112 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3113 tmp = (u64)rx_ring->lbq_base_dma;
3114 base_indirect_ptr = rx_ring->lbq_base_indirect;
3115 page_entries = 0;
3116 do {
3117 *base_indirect_ptr = cpu_to_le64(tmp);
3118 tmp += DB_PAGE_SIZE;
3119 base_indirect_ptr++;
3120 page_entries++;
3121 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3122 cqicb->lbq_addr =
3123 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3124 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3125 (u16) rx_ring->lbq_buf_size;
3126 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3127 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3128 (u16) rx_ring->lbq_len;
3129 cqicb->lbq_len = cpu_to_le16(bq_len);
3130 rx_ring->lbq_prod_idx = 0;
3131 rx_ring->lbq_curr_idx = 0;
3132 rx_ring->lbq_clean_idx = 0;
3133 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3134 }
3135 if (rx_ring->sbq_len) {
3136 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3137 tmp = (u64)rx_ring->sbq_base_dma;
3138 base_indirect_ptr = rx_ring->sbq_base_indirect;
3139 page_entries = 0;
3140 do {
3141 *base_indirect_ptr = cpu_to_le64(tmp);
3142 tmp += DB_PAGE_SIZE;
3143 base_indirect_ptr++;
3144 page_entries++;
3145 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3146 cqicb->sbq_addr =
3147 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3148 cqicb->sbq_buf_size =
3149 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3150 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3151 (u16) rx_ring->sbq_len;
3152 cqicb->sbq_len = cpu_to_le16(bq_len);
3153 rx_ring->sbq_prod_idx = 0;
3154 rx_ring->sbq_curr_idx = 0;
3155 rx_ring->sbq_clean_idx = 0;
3156 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3157 }
3158 switch (rx_ring->type) {
3159 case TX_Q:
3160 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3161 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3162 break;
3163 case RX_Q:
3164 /* Inbound completion handling rx_rings run in
3165 * separate NAPI contexts.
3166 */
3167 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3168 64);
3169 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3170 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3171 break;
3172 default:
3173 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3174 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3175 }
3176 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3177 CFG_LCQ, rx_ring->cq_id);
3178 if (err) {
3179 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3180 return err;
3181 }
3182 return err;
3183 }
3184
3185 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3186 {
3187 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3188 void __iomem *doorbell_area =
3189 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3190 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3191 (tx_ring->wq_id * sizeof(u64));
3192 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3193 (tx_ring->wq_id * sizeof(u64));
3194 int err = 0;
3195
3196 /*
3197 * Assign doorbell registers for this tx_ring.
3198 */
3199 /* TX PCI doorbell mem area for tx producer index */
3200 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3201 tx_ring->prod_idx = 0;
3202 /* TX PCI doorbell mem area + 0x04 */
3203 tx_ring->valid_db_reg = doorbell_area + 0x04;
3204
3205 /*
3206 * Assign shadow registers for this tx_ring.
3207 */
3208 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3209 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3210
3211 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3212 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3213 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3214 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3215 wqicb->rid = 0;
3216 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3217
3218 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3219
3220 ql_init_tx_ring(qdev, tx_ring);
3221
3222 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3223 (u16) tx_ring->wq_id);
3224 if (err) {
3225 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3226 return err;
3227 }
3228 return err;
3229 }
3230
3231 static void ql_disable_msix(struct ql_adapter *qdev)
3232 {
3233 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3234 pci_disable_msix(qdev->pdev);
3235 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3236 kfree(qdev->msi_x_entry);
3237 qdev->msi_x_entry = NULL;
3238 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3239 pci_disable_msi(qdev->pdev);
3240 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3241 }
3242 }
3243
3244 /* We start by trying to get the number of vectors
3245 * stored in qdev->intr_count. If we don't get that
3246 * many then we reduce the count and try again.
3247 */
3248 static void ql_enable_msix(struct ql_adapter *qdev)
3249 {
3250 int i, err;
3251
3252 /* Get the MSIX vectors. */
3253 if (qlge_irq_type == MSIX_IRQ) {
3254 /* Try to alloc space for the msix struct,
3255 * if it fails then go to MSI/legacy.
3256 */
3257 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3258 sizeof(struct msix_entry),
3259 GFP_KERNEL);
3260 if (!qdev->msi_x_entry) {
3261 qlge_irq_type = MSI_IRQ;
3262 goto msi;
3263 }
3264
3265 for (i = 0; i < qdev->intr_count; i++)
3266 qdev->msi_x_entry[i].entry = i;
3267
3268 /* Loop to get our vectors. We start with
3269 * what we want and settle for what we get.
3270 */
3271 do {
3272 err = pci_enable_msix(qdev->pdev,
3273 qdev->msi_x_entry, qdev->intr_count);
3274 if (err > 0)
3275 qdev->intr_count = err;
3276 } while (err > 0);
3277
3278 if (err < 0) {
3279 kfree(qdev->msi_x_entry);
3280 qdev->msi_x_entry = NULL;
3281 netif_warn(qdev, ifup, qdev->ndev,
3282 "MSI-X Enable failed, trying MSI.\n");
3283 qdev->intr_count = 1;
3284 qlge_irq_type = MSI_IRQ;
3285 } else if (err == 0) {
3286 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3287 netif_info(qdev, ifup, qdev->ndev,
3288 "MSI-X Enabled, got %d vectors.\n",
3289 qdev->intr_count);
3290 return;
3291 }
3292 }
3293 msi:
3294 qdev->intr_count = 1;
3295 if (qlge_irq_type == MSI_IRQ) {
3296 if (!pci_enable_msi(qdev->pdev)) {
3297 set_bit(QL_MSI_ENABLED, &qdev->flags);
3298 netif_info(qdev, ifup, qdev->ndev,
3299 "Running with MSI interrupts.\n");
3300 return;
3301 }
3302 }
3303 qlge_irq_type = LEG_IRQ;
3304 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3305 "Running with legacy interrupts.\n");
3306 }
3307
3308 /* Each vector services 1 RSS ring and and 1 or more
3309 * TX completion rings. This function loops through
3310 * the TX completion rings and assigns the vector that
3311 * will service it. An example would be if there are
3312 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3313 * This would mean that vector 0 would service RSS ring 0
3314 * and TX completion rings 0,1,2 and 3. Vector 1 would
3315 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3316 */
3317 static void ql_set_tx_vect(struct ql_adapter *qdev)
3318 {
3319 int i, j, vect;
3320 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3321
3322 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3323 /* Assign irq vectors to TX rx_rings.*/
3324 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3325 i < qdev->rx_ring_count; i++) {
3326 if (j == tx_rings_per_vector) {
3327 vect++;
3328 j = 0;
3329 }
3330 qdev->rx_ring[i].irq = vect;
3331 j++;
3332 }
3333 } else {
3334 /* For single vector all rings have an irq
3335 * of zero.
3336 */
3337 for (i = 0; i < qdev->rx_ring_count; i++)
3338 qdev->rx_ring[i].irq = 0;
3339 }
3340 }
3341
3342 /* Set the interrupt mask for this vector. Each vector
3343 * will service 1 RSS ring and 1 or more TX completion
3344 * rings. This function sets up a bit mask per vector
3345 * that indicates which rings it services.
3346 */
3347 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3348 {
3349 int j, vect = ctx->intr;
3350 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3351
3352 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3353 /* Add the RSS ring serviced by this vector
3354 * to the mask.
3355 */
3356 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3357 /* Add the TX ring(s) serviced by this vector
3358 * to the mask. */
3359 for (j = 0; j < tx_rings_per_vector; j++) {
3360 ctx->irq_mask |=
3361 (1 << qdev->rx_ring[qdev->rss_ring_count +
3362 (vect * tx_rings_per_vector) + j].cq_id);
3363 }
3364 } else {
3365 /* For single vector we just shift each queue's
3366 * ID into the mask.
3367 */
3368 for (j = 0; j < qdev->rx_ring_count; j++)
3369 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3370 }
3371 }
3372
3373 /*
3374 * Here we build the intr_context structures based on
3375 * our rx_ring count and intr vector count.
3376 * The intr_context structure is used to hook each vector
3377 * to possibly different handlers.
3378 */
3379 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3380 {
3381 int i = 0;
3382 struct intr_context *intr_context = &qdev->intr_context[0];
3383
3384 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3385 /* Each rx_ring has it's
3386 * own intr_context since we have separate
3387 * vectors for each queue.
3388 */
3389 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3390 qdev->rx_ring[i].irq = i;
3391 intr_context->intr = i;
3392 intr_context->qdev = qdev;
3393 /* Set up this vector's bit-mask that indicates
3394 * which queues it services.
3395 */
3396 ql_set_irq_mask(qdev, intr_context);
3397 /*
3398 * We set up each vectors enable/disable/read bits so
3399 * there's no bit/mask calculations in the critical path.
3400 */
3401 intr_context->intr_en_mask =
3402 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3403 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3404 | i;
3405 intr_context->intr_dis_mask =
3406 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3407 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3408 INTR_EN_IHD | i;
3409 intr_context->intr_read_mask =
3410 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3411 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3412 i;
3413 if (i == 0) {
3414 /* The first vector/queue handles
3415 * broadcast/multicast, fatal errors,
3416 * and firmware events. This in addition
3417 * to normal inbound NAPI processing.
3418 */
3419 intr_context->handler = qlge_isr;
3420 sprintf(intr_context->name, "%s-rx-%d",
3421 qdev->ndev->name, i);
3422 } else {
3423 /*
3424 * Inbound queues handle unicast frames only.
3425 */
3426 intr_context->handler = qlge_msix_rx_isr;
3427 sprintf(intr_context->name, "%s-rx-%d",
3428 qdev->ndev->name, i);
3429 }
3430 }
3431 } else {
3432 /*
3433 * All rx_rings use the same intr_context since
3434 * there is only one vector.
3435 */
3436 intr_context->intr = 0;
3437 intr_context->qdev = qdev;
3438 /*
3439 * We set up each vectors enable/disable/read bits so
3440 * there's no bit/mask calculations in the critical path.
3441 */
3442 intr_context->intr_en_mask =
3443 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3444 intr_context->intr_dis_mask =
3445 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3446 INTR_EN_TYPE_DISABLE;
3447 intr_context->intr_read_mask =
3448 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3449 /*
3450 * Single interrupt means one handler for all rings.
3451 */
3452 intr_context->handler = qlge_isr;
3453 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3454 /* Set up this vector's bit-mask that indicates
3455 * which queues it services. In this case there is
3456 * a single vector so it will service all RSS and
3457 * TX completion rings.
3458 */
3459 ql_set_irq_mask(qdev, intr_context);
3460 }
3461 /* Tell the TX completion rings which MSIx vector
3462 * they will be using.
3463 */
3464 ql_set_tx_vect(qdev);
3465 }
3466
3467 static void ql_free_irq(struct ql_adapter *qdev)
3468 {
3469 int i;
3470 struct intr_context *intr_context = &qdev->intr_context[0];
3471
3472 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3473 if (intr_context->hooked) {
3474 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3475 free_irq(qdev->msi_x_entry[i].vector,
3476 &qdev->rx_ring[i]);
3477 } else {
3478 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3479 }
3480 }
3481 }
3482 ql_disable_msix(qdev);
3483 }
3484
3485 static int ql_request_irq(struct ql_adapter *qdev)
3486 {
3487 int i;
3488 int status = 0;
3489 struct pci_dev *pdev = qdev->pdev;
3490 struct intr_context *intr_context = &qdev->intr_context[0];
3491
3492 ql_resolve_queues_to_irqs(qdev);
3493
3494 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3495 atomic_set(&intr_context->irq_cnt, 0);
3496 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3497 status = request_irq(qdev->msi_x_entry[i].vector,
3498 intr_context->handler,
3499 0,
3500 intr_context->name,
3501 &qdev->rx_ring[i]);
3502 if (status) {
3503 netif_err(qdev, ifup, qdev->ndev,
3504 "Failed request for MSIX interrupt %d.\n",
3505 i);
3506 goto err_irq;
3507 }
3508 } else {
3509 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3510 "trying msi or legacy interrupts.\n");
3511 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3512 "%s: irq = %d.\n", __func__, pdev->irq);
3513 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3514 "%s: context->name = %s.\n", __func__,
3515 intr_context->name);
3516 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3517 "%s: dev_id = 0x%p.\n", __func__,
3518 &qdev->rx_ring[0]);
3519 status =
3520 request_irq(pdev->irq, qlge_isr,
3521 test_bit(QL_MSI_ENABLED,
3522 &qdev->
3523 flags) ? 0 : IRQF_SHARED,
3524 intr_context->name, &qdev->rx_ring[0]);
3525 if (status)
3526 goto err_irq;
3527
3528 netif_err(qdev, ifup, qdev->ndev,
3529 "Hooked intr %d, queue type %s, with name %s.\n",
3530 i,
3531 qdev->rx_ring[0].type == DEFAULT_Q ?
3532 "DEFAULT_Q" :
3533 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3534 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3535 intr_context->name);
3536 }
3537 intr_context->hooked = 1;
3538 }
3539 return status;
3540 err_irq:
3541 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3542 ql_free_irq(qdev);
3543 return status;
3544 }
3545
3546 static int ql_start_rss(struct ql_adapter *qdev)
3547 {
3548 static const u8 init_hash_seed[] = {
3549 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3550 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3551 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3552 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3553 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3554 };
3555 struct ricb *ricb = &qdev->ricb;
3556 int status = 0;
3557 int i;
3558 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3559
3560 memset((void *)ricb, 0, sizeof(*ricb));
3561
3562 ricb->base_cq = RSS_L4K;
3563 ricb->flags =
3564 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3565 ricb->mask = cpu_to_le16((u16)(0x3ff));
3566
3567 /*
3568 * Fill out the Indirection Table.
3569 */
3570 for (i = 0; i < 1024; i++)
3571 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3572
3573 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3574 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3575
3576 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3577 if (status) {
3578 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3579 return status;
3580 }
3581 return status;
3582 }
3583
3584 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3585 {
3586 int i, status = 0;
3587
3588 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3589 if (status)
3590 return status;
3591 /* Clear all the entries in the routing table. */
3592 for (i = 0; i < 16; i++) {
3593 status = ql_set_routing_reg(qdev, i, 0, 0);
3594 if (status) {
3595 netif_err(qdev, ifup, qdev->ndev,
3596 "Failed to init routing register for CAM packets.\n");
3597 break;
3598 }
3599 }
3600 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3601 return status;
3602 }
3603
3604 /* Initialize the frame-to-queue routing. */
3605 static int ql_route_initialize(struct ql_adapter *qdev)
3606 {
3607 int status = 0;
3608
3609 /* Clear all the entries in the routing table. */
3610 status = ql_clear_routing_entries(qdev);
3611 if (status)
3612 return status;
3613
3614 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3615 if (status)
3616 return status;
3617
3618 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3619 RT_IDX_IP_CSUM_ERR, 1);
3620 if (status) {
3621 netif_err(qdev, ifup, qdev->ndev,
3622 "Failed to init routing register "
3623 "for IP CSUM error packets.\n");
3624 goto exit;
3625 }
3626 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3627 RT_IDX_TU_CSUM_ERR, 1);
3628 if (status) {
3629 netif_err(qdev, ifup, qdev->ndev,
3630 "Failed to init routing register "
3631 "for TCP/UDP CSUM error packets.\n");
3632 goto exit;
3633 }
3634 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3635 if (status) {
3636 netif_err(qdev, ifup, qdev->ndev,
3637 "Failed to init routing register for broadcast packets.\n");
3638 goto exit;
3639 }
3640 /* If we have more than one inbound queue, then turn on RSS in the
3641 * routing block.
3642 */
3643 if (qdev->rss_ring_count > 1) {
3644 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3645 RT_IDX_RSS_MATCH, 1);
3646 if (status) {
3647 netif_err(qdev, ifup, qdev->ndev,
3648 "Failed to init routing register for MATCH RSS packets.\n");
3649 goto exit;
3650 }
3651 }
3652
3653 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3654 RT_IDX_CAM_HIT, 1);
3655 if (status)
3656 netif_err(qdev, ifup, qdev->ndev,
3657 "Failed to init routing register for CAM packets.\n");
3658 exit:
3659 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3660 return status;
3661 }
3662
3663 int ql_cam_route_initialize(struct ql_adapter *qdev)
3664 {
3665 int status, set;
3666
3667 /* If check if the link is up and use to
3668 * determine if we are setting or clearing
3669 * the MAC address in the CAM.
3670 */
3671 set = ql_read32(qdev, STS);
3672 set &= qdev->port_link_up;
3673 status = ql_set_mac_addr(qdev, set);
3674 if (status) {
3675 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3676 return status;
3677 }
3678
3679 status = ql_route_initialize(qdev);
3680 if (status)
3681 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3682
3683 return status;
3684 }
3685
3686 static int ql_adapter_initialize(struct ql_adapter *qdev)
3687 {
3688 u32 value, mask;
3689 int i;
3690 int status = 0;
3691
3692 /*
3693 * Set up the System register to halt on errors.
3694 */
3695 value = SYS_EFE | SYS_FAE;
3696 mask = value << 16;
3697 ql_write32(qdev, SYS, mask | value);
3698
3699 /* Set the default queue, and VLAN behavior. */
3700 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3701 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3702 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3703
3704 /* Set the MPI interrupt to enabled. */
3705 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3706
3707 /* Enable the function, set pagesize, enable error checking. */
3708 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3709 FSC_EC | FSC_VM_PAGE_4K;
3710 value |= SPLT_SETTING;
3711
3712 /* Set/clear header splitting. */
3713 mask = FSC_VM_PAGESIZE_MASK |
3714 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3715 ql_write32(qdev, FSC, mask | value);
3716
3717 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3718
3719 /* Set RX packet routing to use port/pci function on which the
3720 * packet arrived on in addition to usual frame routing.
3721 * This is helpful on bonding where both interfaces can have
3722 * the same MAC address.
3723 */
3724 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3725 /* Reroute all packets to our Interface.
3726 * They may have been routed to MPI firmware
3727 * due to WOL.
3728 */
3729 value = ql_read32(qdev, MGMT_RCV_CFG);
3730 value &= ~MGMT_RCV_CFG_RM;
3731 mask = 0xffff0000;
3732
3733 /* Sticky reg needs clearing due to WOL. */
3734 ql_write32(qdev, MGMT_RCV_CFG, mask);
3735 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3736
3737 /* Default WOL is enable on Mezz cards */
3738 if (qdev->pdev->subsystem_device == 0x0068 ||
3739 qdev->pdev->subsystem_device == 0x0180)
3740 qdev->wol = WAKE_MAGIC;
3741
3742 /* Start up the rx queues. */
3743 for (i = 0; i < qdev->rx_ring_count; i++) {
3744 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3745 if (status) {
3746 netif_err(qdev, ifup, qdev->ndev,
3747 "Failed to start rx ring[%d].\n", i);
3748 return status;
3749 }
3750 }
3751
3752 /* If there is more than one inbound completion queue
3753 * then download a RICB to configure RSS.
3754 */
3755 if (qdev->rss_ring_count > 1) {
3756 status = ql_start_rss(qdev);
3757 if (status) {
3758 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3759 return status;
3760 }
3761 }
3762
3763 /* Start up the tx queues. */
3764 for (i = 0; i < qdev->tx_ring_count; i++) {
3765 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3766 if (status) {
3767 netif_err(qdev, ifup, qdev->ndev,
3768 "Failed to start tx ring[%d].\n", i);
3769 return status;
3770 }
3771 }
3772
3773 /* Initialize the port and set the max framesize. */
3774 status = qdev->nic_ops->port_initialize(qdev);
3775 if (status)
3776 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3777
3778 /* Set up the MAC address and frame routing filter. */
3779 status = ql_cam_route_initialize(qdev);
3780 if (status) {
3781 netif_err(qdev, ifup, qdev->ndev,
3782 "Failed to init CAM/Routing tables.\n");
3783 return status;
3784 }
3785
3786 /* Start NAPI for the RSS queues. */
3787 for (i = 0; i < qdev->rss_ring_count; i++)
3788 napi_enable(&qdev->rx_ring[i].napi);
3789
3790 return status;
3791 }
3792
3793 /* Issue soft reset to chip. */
3794 static int ql_adapter_reset(struct ql_adapter *qdev)
3795 {
3796 u32 value;
3797 int status = 0;
3798 unsigned long end_jiffies;
3799
3800 /* Clear all the entries in the routing table. */
3801 status = ql_clear_routing_entries(qdev);
3802 if (status) {
3803 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3804 return status;
3805 }
3806
3807 end_jiffies = jiffies +
3808 max((unsigned long)1, usecs_to_jiffies(30));
3809
3810 /* Check if bit is set then skip the mailbox command and
3811 * clear the bit, else we are in normal reset process.
3812 */
3813 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3814 /* Stop management traffic. */
3815 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3816
3817 /* Wait for the NIC and MGMNT FIFOs to empty. */
3818 ql_wait_fifo_empty(qdev);
3819 } else
3820 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3821
3822 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3823
3824 do {
3825 value = ql_read32(qdev, RST_FO);
3826 if ((value & RST_FO_FR) == 0)
3827 break;
3828 cpu_relax();
3829 } while (time_before(jiffies, end_jiffies));
3830
3831 if (value & RST_FO_FR) {
3832 netif_err(qdev, ifdown, qdev->ndev,
3833 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3834 status = -ETIMEDOUT;
3835 }
3836
3837 /* Resume management traffic. */
3838 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3839 return status;
3840 }
3841
3842 static void ql_display_dev_info(struct net_device *ndev)
3843 {
3844 struct ql_adapter *qdev = netdev_priv(ndev);
3845
3846 netif_info(qdev, probe, qdev->ndev,
3847 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3848 "XG Roll = %d, XG Rev = %d.\n",
3849 qdev->func,
3850 qdev->port,
3851 qdev->chip_rev_id & 0x0000000f,
3852 qdev->chip_rev_id >> 4 & 0x0000000f,
3853 qdev->chip_rev_id >> 8 & 0x0000000f,
3854 qdev->chip_rev_id >> 12 & 0x0000000f);
3855 netif_info(qdev, probe, qdev->ndev,
3856 "MAC address %pM\n", ndev->dev_addr);
3857 }
3858
3859 static int ql_wol(struct ql_adapter *qdev)
3860 {
3861 int status = 0;
3862 u32 wol = MB_WOL_DISABLE;
3863
3864 /* The CAM is still intact after a reset, but if we
3865 * are doing WOL, then we may need to program the
3866 * routing regs. We would also need to issue the mailbox
3867 * commands to instruct the MPI what to do per the ethtool
3868 * settings.
3869 */
3870
3871 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3872 WAKE_MCAST | WAKE_BCAST)) {
3873 netif_err(qdev, ifdown, qdev->ndev,
3874 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3875 qdev->wol);
3876 return -EINVAL;
3877 }
3878
3879 if (qdev->wol & WAKE_MAGIC) {
3880 status = ql_mb_wol_set_magic(qdev, 1);
3881 if (status) {
3882 netif_err(qdev, ifdown, qdev->ndev,
3883 "Failed to set magic packet on %s.\n",
3884 qdev->ndev->name);
3885 return status;
3886 } else
3887 netif_info(qdev, drv, qdev->ndev,
3888 "Enabled magic packet successfully on %s.\n",
3889 qdev->ndev->name);
3890
3891 wol |= MB_WOL_MAGIC_PKT;
3892 }
3893
3894 if (qdev->wol) {
3895 wol |= MB_WOL_MODE_ON;
3896 status = ql_mb_wol_mode(qdev, wol);
3897 netif_err(qdev, drv, qdev->ndev,
3898 "WOL %s (wol code 0x%x) on %s\n",
3899 (status == 0) ? "Successfully set" : "Failed",
3900 wol, qdev->ndev->name);
3901 }
3902
3903 return status;
3904 }
3905
3906 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3907 {
3908
3909 /* Don't kill the reset worker thread if we
3910 * are in the process of recovery.
3911 */
3912 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3913 cancel_delayed_work_sync(&qdev->asic_reset_work);
3914 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3915 cancel_delayed_work_sync(&qdev->mpi_work);
3916 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3917 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3918 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3919 }
3920
3921 static int ql_adapter_down(struct ql_adapter *qdev)
3922 {
3923 int i, status = 0;
3924
3925 ql_link_off(qdev);
3926
3927 ql_cancel_all_work_sync(qdev);
3928
3929 for (i = 0; i < qdev->rss_ring_count; i++)
3930 napi_disable(&qdev->rx_ring[i].napi);
3931
3932 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3933
3934 ql_disable_interrupts(qdev);
3935
3936 ql_tx_ring_clean(qdev);
3937
3938 /* Call netif_napi_del() from common point.
3939 */
3940 for (i = 0; i < qdev->rss_ring_count; i++)
3941 netif_napi_del(&qdev->rx_ring[i].napi);
3942
3943 status = ql_adapter_reset(qdev);
3944 if (status)
3945 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3946 qdev->func);
3947 ql_free_rx_buffers(qdev);
3948
3949 return status;
3950 }
3951
3952 static int ql_adapter_up(struct ql_adapter *qdev)
3953 {
3954 int err = 0;
3955
3956 err = ql_adapter_initialize(qdev);
3957 if (err) {
3958 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3959 goto err_init;
3960 }
3961 set_bit(QL_ADAPTER_UP, &qdev->flags);
3962 ql_alloc_rx_buffers(qdev);
3963 /* If the port is initialized and the
3964 * link is up the turn on the carrier.
3965 */
3966 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3967 (ql_read32(qdev, STS) & qdev->port_link_up))
3968 ql_link_on(qdev);
3969 /* Restore rx mode. */
3970 clear_bit(QL_ALLMULTI, &qdev->flags);
3971 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3972 qlge_set_multicast_list(qdev->ndev);
3973
3974 /* Restore vlan setting. */
3975 qlge_restore_vlan(qdev);
3976
3977 ql_enable_interrupts(qdev);
3978 ql_enable_all_completion_interrupts(qdev);
3979 netif_tx_start_all_queues(qdev->ndev);
3980
3981 return 0;
3982 err_init:
3983 ql_adapter_reset(qdev);
3984 return err;
3985 }
3986
3987 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3988 {
3989 ql_free_mem_resources(qdev);
3990 ql_free_irq(qdev);
3991 }
3992
3993 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3994 {
3995 int status = 0;
3996
3997 if (ql_alloc_mem_resources(qdev)) {
3998 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3999 return -ENOMEM;
4000 }
4001 status = ql_request_irq(qdev);
4002 return status;
4003 }
4004
4005 static int qlge_close(struct net_device *ndev)
4006 {
4007 struct ql_adapter *qdev = netdev_priv(ndev);
4008
4009 /* If we hit pci_channel_io_perm_failure
4010 * failure condition, then we already
4011 * brought the adapter down.
4012 */
4013 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4014 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4015 clear_bit(QL_EEH_FATAL, &qdev->flags);
4016 return 0;
4017 }
4018
4019 /*
4020 * Wait for device to recover from a reset.
4021 * (Rarely happens, but possible.)
4022 */
4023 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4024 msleep(1);
4025 ql_adapter_down(qdev);
4026 ql_release_adapter_resources(qdev);
4027 return 0;
4028 }
4029
4030 static int ql_configure_rings(struct ql_adapter *qdev)
4031 {
4032 int i;
4033 struct rx_ring *rx_ring;
4034 struct tx_ring *tx_ring;
4035 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4036 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4037 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4038
4039 qdev->lbq_buf_order = get_order(lbq_buf_len);
4040
4041 /* In a perfect world we have one RSS ring for each CPU
4042 * and each has it's own vector. To do that we ask for
4043 * cpu_cnt vectors. ql_enable_msix() will adjust the
4044 * vector count to what we actually get. We then
4045 * allocate an RSS ring for each.
4046 * Essentially, we are doing min(cpu_count, msix_vector_count).
4047 */
4048 qdev->intr_count = cpu_cnt;
4049 ql_enable_msix(qdev);
4050 /* Adjust the RSS ring count to the actual vector count. */
4051 qdev->rss_ring_count = qdev->intr_count;
4052 qdev->tx_ring_count = cpu_cnt;
4053 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4054
4055 for (i = 0; i < qdev->tx_ring_count; i++) {
4056 tx_ring = &qdev->tx_ring[i];
4057 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4058 tx_ring->qdev = qdev;
4059 tx_ring->wq_id = i;
4060 tx_ring->wq_len = qdev->tx_ring_size;
4061 tx_ring->wq_size =
4062 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4063
4064 /*
4065 * The completion queue ID for the tx rings start
4066 * immediately after the rss rings.
4067 */
4068 tx_ring->cq_id = qdev->rss_ring_count + i;
4069 }
4070
4071 for (i = 0; i < qdev->rx_ring_count; i++) {
4072 rx_ring = &qdev->rx_ring[i];
4073 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4074 rx_ring->qdev = qdev;
4075 rx_ring->cq_id = i;
4076 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4077 if (i < qdev->rss_ring_count) {
4078 /*
4079 * Inbound (RSS) queues.
4080 */
4081 rx_ring->cq_len = qdev->rx_ring_size;
4082 rx_ring->cq_size =
4083 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4084 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4085 rx_ring->lbq_size =
4086 rx_ring->lbq_len * sizeof(__le64);
4087 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4088 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4089 rx_ring->sbq_size =
4090 rx_ring->sbq_len * sizeof(__le64);
4091 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4092 rx_ring->type = RX_Q;
4093 } else {
4094 /*
4095 * Outbound queue handles outbound completions only.
4096 */
4097 /* outbound cq is same size as tx_ring it services. */
4098 rx_ring->cq_len = qdev->tx_ring_size;
4099 rx_ring->cq_size =
4100 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4101 rx_ring->lbq_len = 0;
4102 rx_ring->lbq_size = 0;
4103 rx_ring->lbq_buf_size = 0;
4104 rx_ring->sbq_len = 0;
4105 rx_ring->sbq_size = 0;
4106 rx_ring->sbq_buf_size = 0;
4107 rx_ring->type = TX_Q;
4108 }
4109 }
4110 return 0;
4111 }
4112
4113 static int qlge_open(struct net_device *ndev)
4114 {
4115 int err = 0;
4116 struct ql_adapter *qdev = netdev_priv(ndev);
4117
4118 err = ql_adapter_reset(qdev);
4119 if (err)
4120 return err;
4121
4122 err = ql_configure_rings(qdev);
4123 if (err)
4124 return err;
4125
4126 err = ql_get_adapter_resources(qdev);
4127 if (err)
4128 goto error_up;
4129
4130 err = ql_adapter_up(qdev);
4131 if (err)
4132 goto error_up;
4133
4134 return err;
4135
4136 error_up:
4137 ql_release_adapter_resources(qdev);
4138 return err;
4139 }
4140
4141 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4142 {
4143 struct rx_ring *rx_ring;
4144 int i, status;
4145 u32 lbq_buf_len;
4146
4147 /* Wait for an outstanding reset to complete. */
4148 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4149 int i = 3;
4150 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4151 netif_err(qdev, ifup, qdev->ndev,
4152 "Waiting for adapter UP...\n");
4153 ssleep(1);
4154 }
4155
4156 if (!i) {
4157 netif_err(qdev, ifup, qdev->ndev,
4158 "Timed out waiting for adapter UP\n");
4159 return -ETIMEDOUT;
4160 }
4161 }
4162
4163 status = ql_adapter_down(qdev);
4164 if (status)
4165 goto error;
4166
4167 /* Get the new rx buffer size. */
4168 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4169 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4170 qdev->lbq_buf_order = get_order(lbq_buf_len);
4171
4172 for (i = 0; i < qdev->rss_ring_count; i++) {
4173 rx_ring = &qdev->rx_ring[i];
4174 /* Set the new size. */
4175 rx_ring->lbq_buf_size = lbq_buf_len;
4176 }
4177
4178 status = ql_adapter_up(qdev);
4179 if (status)
4180 goto error;
4181
4182 return status;
4183 error:
4184 netif_alert(qdev, ifup, qdev->ndev,
4185 "Driver up/down cycle failed, closing device.\n");
4186 set_bit(QL_ADAPTER_UP, &qdev->flags);
4187 dev_close(qdev->ndev);
4188 return status;
4189 }
4190
4191 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4192 {
4193 struct ql_adapter *qdev = netdev_priv(ndev);
4194 int status;
4195
4196 if (ndev->mtu == 1500 && new_mtu == 9000) {
4197 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4198 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4199 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4200 } else
4201 return -EINVAL;
4202
4203 queue_delayed_work(qdev->workqueue,
4204 &qdev->mpi_port_cfg_work, 3*HZ);
4205
4206 ndev->mtu = new_mtu;
4207
4208 if (!netif_running(qdev->ndev)) {
4209 return 0;
4210 }
4211
4212 status = ql_change_rx_buffers(qdev);
4213 if (status) {
4214 netif_err(qdev, ifup, qdev->ndev,
4215 "Changing MTU failed.\n");
4216 }
4217
4218 return status;
4219 }
4220
4221 static struct net_device_stats *qlge_get_stats(struct net_device
4222 *ndev)
4223 {
4224 struct ql_adapter *qdev = netdev_priv(ndev);
4225 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4226 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4227 unsigned long pkts, mcast, dropped, errors, bytes;
4228 int i;
4229
4230 /* Get RX stats. */
4231 pkts = mcast = dropped = errors = bytes = 0;
4232 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4233 pkts += rx_ring->rx_packets;
4234 bytes += rx_ring->rx_bytes;
4235 dropped += rx_ring->rx_dropped;
4236 errors += rx_ring->rx_errors;
4237 mcast += rx_ring->rx_multicast;
4238 }
4239 ndev->stats.rx_packets = pkts;
4240 ndev->stats.rx_bytes = bytes;
4241 ndev->stats.rx_dropped = dropped;
4242 ndev->stats.rx_errors = errors;
4243 ndev->stats.multicast = mcast;
4244
4245 /* Get TX stats. */
4246 pkts = errors = bytes = 0;
4247 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4248 pkts += tx_ring->tx_packets;
4249 bytes += tx_ring->tx_bytes;
4250 errors += tx_ring->tx_errors;
4251 }
4252 ndev->stats.tx_packets = pkts;
4253 ndev->stats.tx_bytes = bytes;
4254 ndev->stats.tx_errors = errors;
4255 return &ndev->stats;
4256 }
4257
4258 static void qlge_set_multicast_list(struct net_device *ndev)
4259 {
4260 struct ql_adapter *qdev = netdev_priv(ndev);
4261 struct netdev_hw_addr *ha;
4262 int i, status;
4263
4264 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4265 if (status)
4266 return;
4267 /*
4268 * Set or clear promiscuous mode if a
4269 * transition is taking place.
4270 */
4271 if (ndev->flags & IFF_PROMISC) {
4272 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4273 if (ql_set_routing_reg
4274 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4275 netif_err(qdev, hw, qdev->ndev,
4276 "Failed to set promiscuous mode.\n");
4277 } else {
4278 set_bit(QL_PROMISCUOUS, &qdev->flags);
4279 }
4280 }
4281 } else {
4282 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4283 if (ql_set_routing_reg
4284 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4285 netif_err(qdev, hw, qdev->ndev,
4286 "Failed to clear promiscuous mode.\n");
4287 } else {
4288 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4289 }
4290 }
4291 }
4292
4293 /*
4294 * Set or clear all multicast mode if a
4295 * transition is taking place.
4296 */
4297 if ((ndev->flags & IFF_ALLMULTI) ||
4298 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4299 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4300 if (ql_set_routing_reg
4301 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4302 netif_err(qdev, hw, qdev->ndev,
4303 "Failed to set all-multi mode.\n");
4304 } else {
4305 set_bit(QL_ALLMULTI, &qdev->flags);
4306 }
4307 }
4308 } else {
4309 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4310 if (ql_set_routing_reg
4311 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4312 netif_err(qdev, hw, qdev->ndev,
4313 "Failed to clear all-multi mode.\n");
4314 } else {
4315 clear_bit(QL_ALLMULTI, &qdev->flags);
4316 }
4317 }
4318 }
4319
4320 if (!netdev_mc_empty(ndev)) {
4321 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4322 if (status)
4323 goto exit;
4324 i = 0;
4325 netdev_for_each_mc_addr(ha, ndev) {
4326 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4327 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4328 netif_err(qdev, hw, qdev->ndev,
4329 "Failed to loadmulticast address.\n");
4330 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4331 goto exit;
4332 }
4333 i++;
4334 }
4335 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4336 if (ql_set_routing_reg
4337 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4338 netif_err(qdev, hw, qdev->ndev,
4339 "Failed to set multicast match mode.\n");
4340 } else {
4341 set_bit(QL_ALLMULTI, &qdev->flags);
4342 }
4343 }
4344 exit:
4345 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4346 }
4347
4348 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4349 {
4350 struct ql_adapter *qdev = netdev_priv(ndev);
4351 struct sockaddr *addr = p;
4352 int status;
4353
4354 if (!is_valid_ether_addr(addr->sa_data))
4355 return -EADDRNOTAVAIL;
4356 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4357 /* Update local copy of current mac address. */
4358 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4359
4360 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4361 if (status)
4362 return status;
4363 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4364 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4365 if (status)
4366 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4367 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4368 return status;
4369 }
4370
4371 static void qlge_tx_timeout(struct net_device *ndev)
4372 {
4373 struct ql_adapter *qdev = netdev_priv(ndev);
4374 ql_queue_asic_error(qdev);
4375 }
4376
4377 static void ql_asic_reset_work(struct work_struct *work)
4378 {
4379 struct ql_adapter *qdev =
4380 container_of(work, struct ql_adapter, asic_reset_work.work);
4381 int status;
4382 rtnl_lock();
4383 status = ql_adapter_down(qdev);
4384 if (status)
4385 goto error;
4386
4387 status = ql_adapter_up(qdev);
4388 if (status)
4389 goto error;
4390
4391 /* Restore rx mode. */
4392 clear_bit(QL_ALLMULTI, &qdev->flags);
4393 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4394 qlge_set_multicast_list(qdev->ndev);
4395
4396 rtnl_unlock();
4397 return;
4398 error:
4399 netif_alert(qdev, ifup, qdev->ndev,
4400 "Driver up/down cycle failed, closing device\n");
4401
4402 set_bit(QL_ADAPTER_UP, &qdev->flags);
4403 dev_close(qdev->ndev);
4404 rtnl_unlock();
4405 }
4406
4407 static const struct nic_operations qla8012_nic_ops = {
4408 .get_flash = ql_get_8012_flash_params,
4409 .port_initialize = ql_8012_port_initialize,
4410 };
4411
4412 static const struct nic_operations qla8000_nic_ops = {
4413 .get_flash = ql_get_8000_flash_params,
4414 .port_initialize = ql_8000_port_initialize,
4415 };
4416
4417 /* Find the pcie function number for the other NIC
4418 * on this chip. Since both NIC functions share a
4419 * common firmware we have the lowest enabled function
4420 * do any common work. Examples would be resetting
4421 * after a fatal firmware error, or doing a firmware
4422 * coredump.
4423 */
4424 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4425 {
4426 int status = 0;
4427 u32 temp;
4428 u32 nic_func1, nic_func2;
4429
4430 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4431 &temp);
4432 if (status)
4433 return status;
4434
4435 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4436 MPI_TEST_NIC_FUNC_MASK);
4437 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4438 MPI_TEST_NIC_FUNC_MASK);
4439
4440 if (qdev->func == nic_func1)
4441 qdev->alt_func = nic_func2;
4442 else if (qdev->func == nic_func2)
4443 qdev->alt_func = nic_func1;
4444 else
4445 status = -EIO;
4446
4447 return status;
4448 }
4449
4450 static int ql_get_board_info(struct ql_adapter *qdev)
4451 {
4452 int status;
4453 qdev->func =
4454 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4455 if (qdev->func > 3)
4456 return -EIO;
4457
4458 status = ql_get_alt_pcie_func(qdev);
4459 if (status)
4460 return status;
4461
4462 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4463 if (qdev->port) {
4464 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4465 qdev->port_link_up = STS_PL1;
4466 qdev->port_init = STS_PI1;
4467 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4468 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4469 } else {
4470 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4471 qdev->port_link_up = STS_PL0;
4472 qdev->port_init = STS_PI0;
4473 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4474 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4475 }
4476 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4477 qdev->device_id = qdev->pdev->device;
4478 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4479 qdev->nic_ops = &qla8012_nic_ops;
4480 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4481 qdev->nic_ops = &qla8000_nic_ops;
4482 return status;
4483 }
4484
4485 static void ql_release_all(struct pci_dev *pdev)
4486 {
4487 struct net_device *ndev = pci_get_drvdata(pdev);
4488 struct ql_adapter *qdev = netdev_priv(ndev);
4489
4490 if (qdev->workqueue) {
4491 destroy_workqueue(qdev->workqueue);
4492 qdev->workqueue = NULL;
4493 }
4494
4495 if (qdev->reg_base)
4496 iounmap(qdev->reg_base);
4497 if (qdev->doorbell_area)
4498 iounmap(qdev->doorbell_area);
4499 vfree(qdev->mpi_coredump);
4500 pci_release_regions(pdev);
4501 pci_set_drvdata(pdev, NULL);
4502 }
4503
4504 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4505 int cards_found)
4506 {
4507 struct ql_adapter *qdev = netdev_priv(ndev);
4508 int err = 0;
4509
4510 memset((void *)qdev, 0, sizeof(*qdev));
4511 err = pci_enable_device(pdev);
4512 if (err) {
4513 dev_err(&pdev->dev, "PCI device enable failed.\n");
4514 return err;
4515 }
4516
4517 qdev->ndev = ndev;
4518 qdev->pdev = pdev;
4519 pci_set_drvdata(pdev, ndev);
4520
4521 /* Set PCIe read request size */
4522 err = pcie_set_readrq(pdev, 4096);
4523 if (err) {
4524 dev_err(&pdev->dev, "Set readrq failed.\n");
4525 goto err_out1;
4526 }
4527
4528 err = pci_request_regions(pdev, DRV_NAME);
4529 if (err) {
4530 dev_err(&pdev->dev, "PCI region request failed.\n");
4531 return err;
4532 }
4533
4534 pci_set_master(pdev);
4535 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4536 set_bit(QL_DMA64, &qdev->flags);
4537 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4538 } else {
4539 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4540 if (!err)
4541 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4542 }
4543
4544 if (err) {
4545 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4546 goto err_out2;
4547 }
4548
4549 /* Set PCIe reset type for EEH to fundamental. */
4550 pdev->needs_freset = 1;
4551 pci_save_state(pdev);
4552 qdev->reg_base =
4553 ioremap_nocache(pci_resource_start(pdev, 1),
4554 pci_resource_len(pdev, 1));
4555 if (!qdev->reg_base) {
4556 dev_err(&pdev->dev, "Register mapping failed.\n");
4557 err = -ENOMEM;
4558 goto err_out2;
4559 }
4560
4561 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4562 qdev->doorbell_area =
4563 ioremap_nocache(pci_resource_start(pdev, 3),
4564 pci_resource_len(pdev, 3));
4565 if (!qdev->doorbell_area) {
4566 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4567 err = -ENOMEM;
4568 goto err_out2;
4569 }
4570
4571 err = ql_get_board_info(qdev);
4572 if (err) {
4573 dev_err(&pdev->dev, "Register access failed.\n");
4574 err = -EIO;
4575 goto err_out2;
4576 }
4577 qdev->msg_enable = netif_msg_init(debug, default_msg);
4578 spin_lock_init(&qdev->hw_lock);
4579 spin_lock_init(&qdev->stats_lock);
4580
4581 if (qlge_mpi_coredump) {
4582 qdev->mpi_coredump =
4583 vmalloc(sizeof(struct ql_mpi_coredump));
4584 if (qdev->mpi_coredump == NULL) {
4585 err = -ENOMEM;
4586 goto err_out2;
4587 }
4588 if (qlge_force_coredump)
4589 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4590 }
4591 /* make sure the EEPROM is good */
4592 err = qdev->nic_ops->get_flash(qdev);
4593 if (err) {
4594 dev_err(&pdev->dev, "Invalid FLASH.\n");
4595 goto err_out2;
4596 }
4597
4598 /* Keep local copy of current mac address. */
4599 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4600
4601 /* Set up the default ring sizes. */
4602 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4603 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4604
4605 /* Set up the coalescing parameters. */
4606 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4607 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4608 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4609 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4610
4611 /*
4612 * Set up the operating parameters.
4613 */
4614 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4615 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4616 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4617 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4618 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4619 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4620 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4621 init_completion(&qdev->ide_completion);
4622 mutex_init(&qdev->mpi_mutex);
4623
4624 if (!cards_found) {
4625 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4626 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4627 DRV_NAME, DRV_VERSION);
4628 }
4629 return 0;
4630 err_out2:
4631 ql_release_all(pdev);
4632 err_out1:
4633 pci_disable_device(pdev);
4634 return err;
4635 }
4636
4637 static const struct net_device_ops qlge_netdev_ops = {
4638 .ndo_open = qlge_open,
4639 .ndo_stop = qlge_close,
4640 .ndo_start_xmit = qlge_send,
4641 .ndo_change_mtu = qlge_change_mtu,
4642 .ndo_get_stats = qlge_get_stats,
4643 .ndo_set_rx_mode = qlge_set_multicast_list,
4644 .ndo_set_mac_address = qlge_set_mac_address,
4645 .ndo_validate_addr = eth_validate_addr,
4646 .ndo_tx_timeout = qlge_tx_timeout,
4647 .ndo_fix_features = qlge_fix_features,
4648 .ndo_set_features = qlge_set_features,
4649 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4650 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4651 };
4652
4653 static void ql_timer(unsigned long data)
4654 {
4655 struct ql_adapter *qdev = (struct ql_adapter *)data;
4656 u32 var = 0;
4657
4658 var = ql_read32(qdev, STS);
4659 if (pci_channel_offline(qdev->pdev)) {
4660 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4661 return;
4662 }
4663
4664 mod_timer(&qdev->timer, jiffies + (5*HZ));
4665 }
4666
4667 static int qlge_probe(struct pci_dev *pdev,
4668 const struct pci_device_id *pci_entry)
4669 {
4670 struct net_device *ndev = NULL;
4671 struct ql_adapter *qdev = NULL;
4672 static int cards_found = 0;
4673 int err = 0;
4674
4675 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4676 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4677 if (!ndev)
4678 return -ENOMEM;
4679
4680 err = ql_init_device(pdev, ndev, cards_found);
4681 if (err < 0) {
4682 free_netdev(ndev);
4683 return err;
4684 }
4685
4686 qdev = netdev_priv(ndev);
4687 SET_NETDEV_DEV(ndev, &pdev->dev);
4688 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4689 NETIF_F_TSO | NETIF_F_TSO_ECN |
4690 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
4691 ndev->features = ndev->hw_features |
4692 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4693 ndev->vlan_features = ndev->hw_features;
4694
4695 if (test_bit(QL_DMA64, &qdev->flags))
4696 ndev->features |= NETIF_F_HIGHDMA;
4697
4698 /*
4699 * Set up net_device structure.
4700 */
4701 ndev->tx_queue_len = qdev->tx_ring_size;
4702 ndev->irq = pdev->irq;
4703
4704 ndev->netdev_ops = &qlge_netdev_ops;
4705 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4706 ndev->watchdog_timeo = 10 * HZ;
4707
4708 err = register_netdev(ndev);
4709 if (err) {
4710 dev_err(&pdev->dev, "net device registration failed.\n");
4711 ql_release_all(pdev);
4712 pci_disable_device(pdev);
4713 return err;
4714 }
4715 /* Start up the timer to trigger EEH if
4716 * the bus goes dead
4717 */
4718 init_timer_deferrable(&qdev->timer);
4719 qdev->timer.data = (unsigned long)qdev;
4720 qdev->timer.function = ql_timer;
4721 qdev->timer.expires = jiffies + (5*HZ);
4722 add_timer(&qdev->timer);
4723 ql_link_off(qdev);
4724 ql_display_dev_info(ndev);
4725 atomic_set(&qdev->lb_count, 0);
4726 cards_found++;
4727 return 0;
4728 }
4729
4730 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4731 {
4732 return qlge_send(skb, ndev);
4733 }
4734
4735 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4736 {
4737 return ql_clean_inbound_rx_ring(rx_ring, budget);
4738 }
4739
4740 static void qlge_remove(struct pci_dev *pdev)
4741 {
4742 struct net_device *ndev = pci_get_drvdata(pdev);
4743 struct ql_adapter *qdev = netdev_priv(ndev);
4744 del_timer_sync(&qdev->timer);
4745 ql_cancel_all_work_sync(qdev);
4746 unregister_netdev(ndev);
4747 ql_release_all(pdev);
4748 pci_disable_device(pdev);
4749 free_netdev(ndev);
4750 }
4751
4752 /* Clean up resources without touching hardware. */
4753 static void ql_eeh_close(struct net_device *ndev)
4754 {
4755 int i;
4756 struct ql_adapter *qdev = netdev_priv(ndev);
4757
4758 if (netif_carrier_ok(ndev)) {
4759 netif_carrier_off(ndev);
4760 netif_stop_queue(ndev);
4761 }
4762
4763 /* Disabling the timer */
4764 del_timer_sync(&qdev->timer);
4765 ql_cancel_all_work_sync(qdev);
4766
4767 for (i = 0; i < qdev->rss_ring_count; i++)
4768 netif_napi_del(&qdev->rx_ring[i].napi);
4769
4770 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4771 ql_tx_ring_clean(qdev);
4772 ql_free_rx_buffers(qdev);
4773 ql_release_adapter_resources(qdev);
4774 }
4775
4776 /*
4777 * This callback is called by the PCI subsystem whenever
4778 * a PCI bus error is detected.
4779 */
4780 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4781 enum pci_channel_state state)
4782 {
4783 struct net_device *ndev = pci_get_drvdata(pdev);
4784 struct ql_adapter *qdev = netdev_priv(ndev);
4785
4786 switch (state) {
4787 case pci_channel_io_normal:
4788 return PCI_ERS_RESULT_CAN_RECOVER;
4789 case pci_channel_io_frozen:
4790 netif_device_detach(ndev);
4791 if (netif_running(ndev))
4792 ql_eeh_close(ndev);
4793 pci_disable_device(pdev);
4794 return PCI_ERS_RESULT_NEED_RESET;
4795 case pci_channel_io_perm_failure:
4796 dev_err(&pdev->dev,
4797 "%s: pci_channel_io_perm_failure.\n", __func__);
4798 ql_eeh_close(ndev);
4799 set_bit(QL_EEH_FATAL, &qdev->flags);
4800 return PCI_ERS_RESULT_DISCONNECT;
4801 }
4802
4803 /* Request a slot reset. */
4804 return PCI_ERS_RESULT_NEED_RESET;
4805 }
4806
4807 /*
4808 * This callback is called after the PCI buss has been reset.
4809 * Basically, this tries to restart the card from scratch.
4810 * This is a shortened version of the device probe/discovery code,
4811 * it resembles the first-half of the () routine.
4812 */
4813 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4814 {
4815 struct net_device *ndev = pci_get_drvdata(pdev);
4816 struct ql_adapter *qdev = netdev_priv(ndev);
4817
4818 pdev->error_state = pci_channel_io_normal;
4819
4820 pci_restore_state(pdev);
4821 if (pci_enable_device(pdev)) {
4822 netif_err(qdev, ifup, qdev->ndev,
4823 "Cannot re-enable PCI device after reset.\n");
4824 return PCI_ERS_RESULT_DISCONNECT;
4825 }
4826 pci_set_master(pdev);
4827
4828 if (ql_adapter_reset(qdev)) {
4829 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4830 set_bit(QL_EEH_FATAL, &qdev->flags);
4831 return PCI_ERS_RESULT_DISCONNECT;
4832 }
4833
4834 return PCI_ERS_RESULT_RECOVERED;
4835 }
4836
4837 static void qlge_io_resume(struct pci_dev *pdev)
4838 {
4839 struct net_device *ndev = pci_get_drvdata(pdev);
4840 struct ql_adapter *qdev = netdev_priv(ndev);
4841 int err = 0;
4842
4843 if (netif_running(ndev)) {
4844 err = qlge_open(ndev);
4845 if (err) {
4846 netif_err(qdev, ifup, qdev->ndev,
4847 "Device initialization failed after reset.\n");
4848 return;
4849 }
4850 } else {
4851 netif_err(qdev, ifup, qdev->ndev,
4852 "Device was not running prior to EEH.\n");
4853 }
4854 mod_timer(&qdev->timer, jiffies + (5*HZ));
4855 netif_device_attach(ndev);
4856 }
4857
4858 static const struct pci_error_handlers qlge_err_handler = {
4859 .error_detected = qlge_io_error_detected,
4860 .slot_reset = qlge_io_slot_reset,
4861 .resume = qlge_io_resume,
4862 };
4863
4864 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4865 {
4866 struct net_device *ndev = pci_get_drvdata(pdev);
4867 struct ql_adapter *qdev = netdev_priv(ndev);
4868 int err;
4869
4870 netif_device_detach(ndev);
4871 del_timer_sync(&qdev->timer);
4872
4873 if (netif_running(ndev)) {
4874 err = ql_adapter_down(qdev);
4875 if (!err)
4876 return err;
4877 }
4878
4879 ql_wol(qdev);
4880 err = pci_save_state(pdev);
4881 if (err)
4882 return err;
4883
4884 pci_disable_device(pdev);
4885
4886 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4887
4888 return 0;
4889 }
4890
4891 #ifdef CONFIG_PM
4892 static int qlge_resume(struct pci_dev *pdev)
4893 {
4894 struct net_device *ndev = pci_get_drvdata(pdev);
4895 struct ql_adapter *qdev = netdev_priv(ndev);
4896 int err;
4897
4898 pci_set_power_state(pdev, PCI_D0);
4899 pci_restore_state(pdev);
4900 err = pci_enable_device(pdev);
4901 if (err) {
4902 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4903 return err;
4904 }
4905 pci_set_master(pdev);
4906
4907 pci_enable_wake(pdev, PCI_D3hot, 0);
4908 pci_enable_wake(pdev, PCI_D3cold, 0);
4909
4910 if (netif_running(ndev)) {
4911 err = ql_adapter_up(qdev);
4912 if (err)
4913 return err;
4914 }
4915
4916 mod_timer(&qdev->timer, jiffies + (5*HZ));
4917 netif_device_attach(ndev);
4918
4919 return 0;
4920 }
4921 #endif /* CONFIG_PM */
4922
4923 static void qlge_shutdown(struct pci_dev *pdev)
4924 {
4925 qlge_suspend(pdev, PMSG_SUSPEND);
4926 }
4927
4928 static struct pci_driver qlge_driver = {
4929 .name = DRV_NAME,
4930 .id_table = qlge_pci_tbl,
4931 .probe = qlge_probe,
4932 .remove = qlge_remove,
4933 #ifdef CONFIG_PM
4934 .suspend = qlge_suspend,
4935 .resume = qlge_resume,
4936 #endif
4937 .shutdown = qlge_shutdown,
4938 .err_handler = &qlge_err_handler
4939 };
4940
4941 static int __init qlge_init_module(void)
4942 {
4943 return pci_register_driver(&qlge_driver);
4944 }
4945
4946 static void __exit qlge_exit(void)
4947 {
4948 pci_unregister_driver(&qlge_driver);
4949 }
4950
4951 module_init(qlge_init_module);
4952 module_exit(qlge_exit);