serial: Kill off Moorestown code
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / intel / igbvf / netdev.c
1 /*******************************************************************************
2
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/init.h>
33 #include <linux/pci.h>
34 #include <linux/vmalloc.h>
35 #include <linux/pagemap.h>
36 #include <linux/delay.h>
37 #include <linux/netdevice.h>
38 #include <linux/tcp.h>
39 #include <linux/ipv6.h>
40 #include <linux/slab.h>
41 #include <net/checksum.h>
42 #include <net/ip6_checksum.h>
43 #include <linux/mii.h>
44 #include <linux/ethtool.h>
45 #include <linux/if_vlan.h>
46 #include <linux/prefetch.h>
47
48 #include "igbvf.h"
49
50 #define DRV_VERSION "2.0.1-k"
51 char igbvf_driver_name[] = "igbvf";
52 const char igbvf_driver_version[] = DRV_VERSION;
53 static const char igbvf_driver_string[] =
54 "Intel(R) Gigabit Virtual Function Network Driver";
55 static const char igbvf_copyright[] =
56 "Copyright (c) 2009 - 2011 Intel Corporation.";
57
58 static int igbvf_poll(struct napi_struct *napi, int budget);
59 static void igbvf_reset(struct igbvf_adapter *);
60 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
61 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
62
63 static struct igbvf_info igbvf_vf_info = {
64 .mac = e1000_vfadapt,
65 .flags = 0,
66 .pba = 10,
67 .init_ops = e1000_init_function_pointers_vf,
68 };
69
70 static struct igbvf_info igbvf_i350_vf_info = {
71 .mac = e1000_vfadapt_i350,
72 .flags = 0,
73 .pba = 10,
74 .init_ops = e1000_init_function_pointers_vf,
75 };
76
77 static const struct igbvf_info *igbvf_info_tbl[] = {
78 [board_vf] = &igbvf_vf_info,
79 [board_i350_vf] = &igbvf_i350_vf_info,
80 };
81
82 /**
83 * igbvf_desc_unused - calculate if we have unused descriptors
84 **/
85 static int igbvf_desc_unused(struct igbvf_ring *ring)
86 {
87 if (ring->next_to_clean > ring->next_to_use)
88 return ring->next_to_clean - ring->next_to_use - 1;
89
90 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
91 }
92
93 /**
94 * igbvf_receive_skb - helper function to handle Rx indications
95 * @adapter: board private structure
96 * @status: descriptor status field as written by hardware
97 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
98 * @skb: pointer to sk_buff to be indicated to stack
99 **/
100 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
101 struct net_device *netdev,
102 struct sk_buff *skb,
103 u32 status, u16 vlan)
104 {
105 if (status & E1000_RXD_STAT_VP) {
106 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
107 if (test_bit(vid, adapter->active_vlans))
108 __vlan_hwaccel_put_tag(skb, vid);
109 }
110 netif_receive_skb(skb);
111 }
112
113 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
114 u32 status_err, struct sk_buff *skb)
115 {
116 skb_checksum_none_assert(skb);
117
118 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
119 if ((status_err & E1000_RXD_STAT_IXSM) ||
120 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
121 return;
122
123 /* TCP/UDP checksum error bit is set */
124 if (status_err &
125 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
126 /* let the stack verify checksum errors */
127 adapter->hw_csum_err++;
128 return;
129 }
130
131 /* It must be a TCP or UDP packet with a valid checksum */
132 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
133 skb->ip_summed = CHECKSUM_UNNECESSARY;
134
135 adapter->hw_csum_good++;
136 }
137
138 /**
139 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
140 * @rx_ring: address of ring structure to repopulate
141 * @cleaned_count: number of buffers to repopulate
142 **/
143 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
144 int cleaned_count)
145 {
146 struct igbvf_adapter *adapter = rx_ring->adapter;
147 struct net_device *netdev = adapter->netdev;
148 struct pci_dev *pdev = adapter->pdev;
149 union e1000_adv_rx_desc *rx_desc;
150 struct igbvf_buffer *buffer_info;
151 struct sk_buff *skb;
152 unsigned int i;
153 int bufsz;
154
155 i = rx_ring->next_to_use;
156 buffer_info = &rx_ring->buffer_info[i];
157
158 if (adapter->rx_ps_hdr_size)
159 bufsz = adapter->rx_ps_hdr_size;
160 else
161 bufsz = adapter->rx_buffer_len;
162
163 while (cleaned_count--) {
164 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
165
166 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
167 if (!buffer_info->page) {
168 buffer_info->page = alloc_page(GFP_ATOMIC);
169 if (!buffer_info->page) {
170 adapter->alloc_rx_buff_failed++;
171 goto no_buffers;
172 }
173 buffer_info->page_offset = 0;
174 } else {
175 buffer_info->page_offset ^= PAGE_SIZE / 2;
176 }
177 buffer_info->page_dma =
178 dma_map_page(&pdev->dev, buffer_info->page,
179 buffer_info->page_offset,
180 PAGE_SIZE / 2,
181 DMA_FROM_DEVICE);
182 }
183
184 if (!buffer_info->skb) {
185 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
186 if (!skb) {
187 adapter->alloc_rx_buff_failed++;
188 goto no_buffers;
189 }
190
191 buffer_info->skb = skb;
192 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
193 bufsz,
194 DMA_FROM_DEVICE);
195 }
196 /* Refresh the desc even if buffer_addrs didn't change because
197 * each write-back erases this info. */
198 if (adapter->rx_ps_hdr_size) {
199 rx_desc->read.pkt_addr =
200 cpu_to_le64(buffer_info->page_dma);
201 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
202 } else {
203 rx_desc->read.pkt_addr =
204 cpu_to_le64(buffer_info->dma);
205 rx_desc->read.hdr_addr = 0;
206 }
207
208 i++;
209 if (i == rx_ring->count)
210 i = 0;
211 buffer_info = &rx_ring->buffer_info[i];
212 }
213
214 no_buffers:
215 if (rx_ring->next_to_use != i) {
216 rx_ring->next_to_use = i;
217 if (i == 0)
218 i = (rx_ring->count - 1);
219 else
220 i--;
221
222 /* Force memory writes to complete before letting h/w
223 * know there are new descriptors to fetch. (Only
224 * applicable for weak-ordered memory model archs,
225 * such as IA-64). */
226 wmb();
227 writel(i, adapter->hw.hw_addr + rx_ring->tail);
228 }
229 }
230
231 /**
232 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
233 * @adapter: board private structure
234 *
235 * the return value indicates whether actual cleaning was done, there
236 * is no guarantee that everything was cleaned
237 **/
238 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
239 int *work_done, int work_to_do)
240 {
241 struct igbvf_ring *rx_ring = adapter->rx_ring;
242 struct net_device *netdev = adapter->netdev;
243 struct pci_dev *pdev = adapter->pdev;
244 union e1000_adv_rx_desc *rx_desc, *next_rxd;
245 struct igbvf_buffer *buffer_info, *next_buffer;
246 struct sk_buff *skb;
247 bool cleaned = false;
248 int cleaned_count = 0;
249 unsigned int total_bytes = 0, total_packets = 0;
250 unsigned int i;
251 u32 length, hlen, staterr;
252
253 i = rx_ring->next_to_clean;
254 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
255 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
256
257 while (staterr & E1000_RXD_STAT_DD) {
258 if (*work_done >= work_to_do)
259 break;
260 (*work_done)++;
261 rmb(); /* read descriptor and rx_buffer_info after status DD */
262
263 buffer_info = &rx_ring->buffer_info[i];
264
265 /* HW will not DMA in data larger than the given buffer, even
266 * if it parses the (NFS, of course) header to be larger. In
267 * that case, it fills the header buffer and spills the rest
268 * into the page.
269 */
270 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
271 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
272 if (hlen > adapter->rx_ps_hdr_size)
273 hlen = adapter->rx_ps_hdr_size;
274
275 length = le16_to_cpu(rx_desc->wb.upper.length);
276 cleaned = true;
277 cleaned_count++;
278
279 skb = buffer_info->skb;
280 prefetch(skb->data - NET_IP_ALIGN);
281 buffer_info->skb = NULL;
282 if (!adapter->rx_ps_hdr_size) {
283 dma_unmap_single(&pdev->dev, buffer_info->dma,
284 adapter->rx_buffer_len,
285 DMA_FROM_DEVICE);
286 buffer_info->dma = 0;
287 skb_put(skb, length);
288 goto send_up;
289 }
290
291 if (!skb_shinfo(skb)->nr_frags) {
292 dma_unmap_single(&pdev->dev, buffer_info->dma,
293 adapter->rx_ps_hdr_size,
294 DMA_FROM_DEVICE);
295 skb_put(skb, hlen);
296 }
297
298 if (length) {
299 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
300 PAGE_SIZE / 2,
301 DMA_FROM_DEVICE);
302 buffer_info->page_dma = 0;
303
304 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
305 buffer_info->page,
306 buffer_info->page_offset,
307 length);
308
309 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
310 (page_count(buffer_info->page) != 1))
311 buffer_info->page = NULL;
312 else
313 get_page(buffer_info->page);
314
315 skb->len += length;
316 skb->data_len += length;
317 skb->truesize += PAGE_SIZE / 2;
318 }
319 send_up:
320 i++;
321 if (i == rx_ring->count)
322 i = 0;
323 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
324 prefetch(next_rxd);
325 next_buffer = &rx_ring->buffer_info[i];
326
327 if (!(staterr & E1000_RXD_STAT_EOP)) {
328 buffer_info->skb = next_buffer->skb;
329 buffer_info->dma = next_buffer->dma;
330 next_buffer->skb = skb;
331 next_buffer->dma = 0;
332 goto next_desc;
333 }
334
335 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
336 dev_kfree_skb_irq(skb);
337 goto next_desc;
338 }
339
340 total_bytes += skb->len;
341 total_packets++;
342
343 igbvf_rx_checksum_adv(adapter, staterr, skb);
344
345 skb->protocol = eth_type_trans(skb, netdev);
346
347 igbvf_receive_skb(adapter, netdev, skb, staterr,
348 rx_desc->wb.upper.vlan);
349
350 next_desc:
351 rx_desc->wb.upper.status_error = 0;
352
353 /* return some buffers to hardware, one at a time is too slow */
354 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
355 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
356 cleaned_count = 0;
357 }
358
359 /* use prefetched values */
360 rx_desc = next_rxd;
361 buffer_info = next_buffer;
362
363 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
364 }
365
366 rx_ring->next_to_clean = i;
367 cleaned_count = igbvf_desc_unused(rx_ring);
368
369 if (cleaned_count)
370 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
371
372 adapter->total_rx_packets += total_packets;
373 adapter->total_rx_bytes += total_bytes;
374 adapter->net_stats.rx_bytes += total_bytes;
375 adapter->net_stats.rx_packets += total_packets;
376 return cleaned;
377 }
378
379 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
380 struct igbvf_buffer *buffer_info)
381 {
382 if (buffer_info->dma) {
383 if (buffer_info->mapped_as_page)
384 dma_unmap_page(&adapter->pdev->dev,
385 buffer_info->dma,
386 buffer_info->length,
387 DMA_TO_DEVICE);
388 else
389 dma_unmap_single(&adapter->pdev->dev,
390 buffer_info->dma,
391 buffer_info->length,
392 DMA_TO_DEVICE);
393 buffer_info->dma = 0;
394 }
395 if (buffer_info->skb) {
396 dev_kfree_skb_any(buffer_info->skb);
397 buffer_info->skb = NULL;
398 }
399 buffer_info->time_stamp = 0;
400 }
401
402 /**
403 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
404 * @adapter: board private structure
405 *
406 * Return 0 on success, negative on failure
407 **/
408 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
409 struct igbvf_ring *tx_ring)
410 {
411 struct pci_dev *pdev = adapter->pdev;
412 int size;
413
414 size = sizeof(struct igbvf_buffer) * tx_ring->count;
415 tx_ring->buffer_info = vzalloc(size);
416 if (!tx_ring->buffer_info)
417 goto err;
418
419 /* round up to nearest 4K */
420 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
421 tx_ring->size = ALIGN(tx_ring->size, 4096);
422
423 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
424 &tx_ring->dma, GFP_KERNEL);
425
426 if (!tx_ring->desc)
427 goto err;
428
429 tx_ring->adapter = adapter;
430 tx_ring->next_to_use = 0;
431 tx_ring->next_to_clean = 0;
432
433 return 0;
434 err:
435 vfree(tx_ring->buffer_info);
436 dev_err(&adapter->pdev->dev,
437 "Unable to allocate memory for the transmit descriptor ring\n");
438 return -ENOMEM;
439 }
440
441 /**
442 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
443 * @adapter: board private structure
444 *
445 * Returns 0 on success, negative on failure
446 **/
447 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
448 struct igbvf_ring *rx_ring)
449 {
450 struct pci_dev *pdev = adapter->pdev;
451 int size, desc_len;
452
453 size = sizeof(struct igbvf_buffer) * rx_ring->count;
454 rx_ring->buffer_info = vzalloc(size);
455 if (!rx_ring->buffer_info)
456 goto err;
457
458 desc_len = sizeof(union e1000_adv_rx_desc);
459
460 /* Round up to nearest 4K */
461 rx_ring->size = rx_ring->count * desc_len;
462 rx_ring->size = ALIGN(rx_ring->size, 4096);
463
464 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
465 &rx_ring->dma, GFP_KERNEL);
466
467 if (!rx_ring->desc)
468 goto err;
469
470 rx_ring->next_to_clean = 0;
471 rx_ring->next_to_use = 0;
472
473 rx_ring->adapter = adapter;
474
475 return 0;
476
477 err:
478 vfree(rx_ring->buffer_info);
479 rx_ring->buffer_info = NULL;
480 dev_err(&adapter->pdev->dev,
481 "Unable to allocate memory for the receive descriptor ring\n");
482 return -ENOMEM;
483 }
484
485 /**
486 * igbvf_clean_tx_ring - Free Tx Buffers
487 * @tx_ring: ring to be cleaned
488 **/
489 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
490 {
491 struct igbvf_adapter *adapter = tx_ring->adapter;
492 struct igbvf_buffer *buffer_info;
493 unsigned long size;
494 unsigned int i;
495
496 if (!tx_ring->buffer_info)
497 return;
498
499 /* Free all the Tx ring sk_buffs */
500 for (i = 0; i < tx_ring->count; i++) {
501 buffer_info = &tx_ring->buffer_info[i];
502 igbvf_put_txbuf(adapter, buffer_info);
503 }
504
505 size = sizeof(struct igbvf_buffer) * tx_ring->count;
506 memset(tx_ring->buffer_info, 0, size);
507
508 /* Zero out the descriptor ring */
509 memset(tx_ring->desc, 0, tx_ring->size);
510
511 tx_ring->next_to_use = 0;
512 tx_ring->next_to_clean = 0;
513
514 writel(0, adapter->hw.hw_addr + tx_ring->head);
515 writel(0, adapter->hw.hw_addr + tx_ring->tail);
516 }
517
518 /**
519 * igbvf_free_tx_resources - Free Tx Resources per Queue
520 * @tx_ring: ring to free resources from
521 *
522 * Free all transmit software resources
523 **/
524 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
525 {
526 struct pci_dev *pdev = tx_ring->adapter->pdev;
527
528 igbvf_clean_tx_ring(tx_ring);
529
530 vfree(tx_ring->buffer_info);
531 tx_ring->buffer_info = NULL;
532
533 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
534 tx_ring->dma);
535
536 tx_ring->desc = NULL;
537 }
538
539 /**
540 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
541 * @adapter: board private structure
542 **/
543 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
544 {
545 struct igbvf_adapter *adapter = rx_ring->adapter;
546 struct igbvf_buffer *buffer_info;
547 struct pci_dev *pdev = adapter->pdev;
548 unsigned long size;
549 unsigned int i;
550
551 if (!rx_ring->buffer_info)
552 return;
553
554 /* Free all the Rx ring sk_buffs */
555 for (i = 0; i < rx_ring->count; i++) {
556 buffer_info = &rx_ring->buffer_info[i];
557 if (buffer_info->dma) {
558 if (adapter->rx_ps_hdr_size){
559 dma_unmap_single(&pdev->dev, buffer_info->dma,
560 adapter->rx_ps_hdr_size,
561 DMA_FROM_DEVICE);
562 } else {
563 dma_unmap_single(&pdev->dev, buffer_info->dma,
564 adapter->rx_buffer_len,
565 DMA_FROM_DEVICE);
566 }
567 buffer_info->dma = 0;
568 }
569
570 if (buffer_info->skb) {
571 dev_kfree_skb(buffer_info->skb);
572 buffer_info->skb = NULL;
573 }
574
575 if (buffer_info->page) {
576 if (buffer_info->page_dma)
577 dma_unmap_page(&pdev->dev,
578 buffer_info->page_dma,
579 PAGE_SIZE / 2,
580 DMA_FROM_DEVICE);
581 put_page(buffer_info->page);
582 buffer_info->page = NULL;
583 buffer_info->page_dma = 0;
584 buffer_info->page_offset = 0;
585 }
586 }
587
588 size = sizeof(struct igbvf_buffer) * rx_ring->count;
589 memset(rx_ring->buffer_info, 0, size);
590
591 /* Zero out the descriptor ring */
592 memset(rx_ring->desc, 0, rx_ring->size);
593
594 rx_ring->next_to_clean = 0;
595 rx_ring->next_to_use = 0;
596
597 writel(0, adapter->hw.hw_addr + rx_ring->head);
598 writel(0, adapter->hw.hw_addr + rx_ring->tail);
599 }
600
601 /**
602 * igbvf_free_rx_resources - Free Rx Resources
603 * @rx_ring: ring to clean the resources from
604 *
605 * Free all receive software resources
606 **/
607
608 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
609 {
610 struct pci_dev *pdev = rx_ring->adapter->pdev;
611
612 igbvf_clean_rx_ring(rx_ring);
613
614 vfree(rx_ring->buffer_info);
615 rx_ring->buffer_info = NULL;
616
617 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
618 rx_ring->dma);
619 rx_ring->desc = NULL;
620 }
621
622 /**
623 * igbvf_update_itr - update the dynamic ITR value based on statistics
624 * @adapter: pointer to adapter
625 * @itr_setting: current adapter->itr
626 * @packets: the number of packets during this measurement interval
627 * @bytes: the number of bytes during this measurement interval
628 *
629 * Stores a new ITR value based on packets and byte
630 * counts during the last interrupt. The advantage of per interrupt
631 * computation is faster updates and more accurate ITR for the current
632 * traffic pattern. Constants in this function were computed
633 * based on theoretical maximum wire speed and thresholds were set based
634 * on testing data as well as attempting to minimize response time
635 * while increasing bulk throughput. This functionality is controlled
636 * by the InterruptThrottleRate module parameter.
637 **/
638 static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
639 u16 itr_setting, int packets,
640 int bytes)
641 {
642 unsigned int retval = itr_setting;
643
644 if (packets == 0)
645 goto update_itr_done;
646
647 switch (itr_setting) {
648 case lowest_latency:
649 /* handle TSO and jumbo frames */
650 if (bytes/packets > 8000)
651 retval = bulk_latency;
652 else if ((packets < 5) && (bytes > 512))
653 retval = low_latency;
654 break;
655 case low_latency: /* 50 usec aka 20000 ints/s */
656 if (bytes > 10000) {
657 /* this if handles the TSO accounting */
658 if (bytes/packets > 8000)
659 retval = bulk_latency;
660 else if ((packets < 10) || ((bytes/packets) > 1200))
661 retval = bulk_latency;
662 else if ((packets > 35))
663 retval = lowest_latency;
664 } else if (bytes/packets > 2000) {
665 retval = bulk_latency;
666 } else if (packets <= 2 && bytes < 512) {
667 retval = lowest_latency;
668 }
669 break;
670 case bulk_latency: /* 250 usec aka 4000 ints/s */
671 if (bytes > 25000) {
672 if (packets > 35)
673 retval = low_latency;
674 } else if (bytes < 6000) {
675 retval = low_latency;
676 }
677 break;
678 }
679
680 update_itr_done:
681 return retval;
682 }
683
684 static void igbvf_set_itr(struct igbvf_adapter *adapter)
685 {
686 struct e1000_hw *hw = &adapter->hw;
687 u16 current_itr;
688 u32 new_itr = adapter->itr;
689
690 adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr,
691 adapter->total_tx_packets,
692 adapter->total_tx_bytes);
693 /* conservative mode (itr 3) eliminates the lowest_latency setting */
694 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
695 adapter->tx_itr = low_latency;
696
697 adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr,
698 adapter->total_rx_packets,
699 adapter->total_rx_bytes);
700 /* conservative mode (itr 3) eliminates the lowest_latency setting */
701 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
702 adapter->rx_itr = low_latency;
703
704 current_itr = max(adapter->rx_itr, adapter->tx_itr);
705
706 switch (current_itr) {
707 /* counts and packets in update_itr are dependent on these numbers */
708 case lowest_latency:
709 new_itr = 70000;
710 break;
711 case low_latency:
712 new_itr = 20000; /* aka hwitr = ~200 */
713 break;
714 case bulk_latency:
715 new_itr = 4000;
716 break;
717 default:
718 break;
719 }
720
721 if (new_itr != adapter->itr) {
722 /*
723 * this attempts to bias the interrupt rate towards Bulk
724 * by adding intermediate steps when interrupt rate is
725 * increasing
726 */
727 new_itr = new_itr > adapter->itr ?
728 min(adapter->itr + (new_itr >> 2), new_itr) :
729 new_itr;
730 adapter->itr = new_itr;
731 adapter->rx_ring->itr_val = 1952;
732
733 if (adapter->msix_entries)
734 adapter->rx_ring->set_itr = 1;
735 else
736 ew32(ITR, 1952);
737 }
738 }
739
740 /**
741 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
742 * @adapter: board private structure
743 * returns true if ring is completely cleaned
744 **/
745 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
746 {
747 struct igbvf_adapter *adapter = tx_ring->adapter;
748 struct net_device *netdev = adapter->netdev;
749 struct igbvf_buffer *buffer_info;
750 struct sk_buff *skb;
751 union e1000_adv_tx_desc *tx_desc, *eop_desc;
752 unsigned int total_bytes = 0, total_packets = 0;
753 unsigned int i, eop, count = 0;
754 bool cleaned = false;
755
756 i = tx_ring->next_to_clean;
757 eop = tx_ring->buffer_info[i].next_to_watch;
758 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
759
760 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
761 (count < tx_ring->count)) {
762 rmb(); /* read buffer_info after eop_desc status */
763 for (cleaned = false; !cleaned; count++) {
764 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
765 buffer_info = &tx_ring->buffer_info[i];
766 cleaned = (i == eop);
767 skb = buffer_info->skb;
768
769 if (skb) {
770 unsigned int segs, bytecount;
771
772 /* gso_segs is currently only valid for tcp */
773 segs = skb_shinfo(skb)->gso_segs ?: 1;
774 /* multiply data chunks by size of headers */
775 bytecount = ((segs - 1) * skb_headlen(skb)) +
776 skb->len;
777 total_packets += segs;
778 total_bytes += bytecount;
779 }
780
781 igbvf_put_txbuf(adapter, buffer_info);
782 tx_desc->wb.status = 0;
783
784 i++;
785 if (i == tx_ring->count)
786 i = 0;
787 }
788 eop = tx_ring->buffer_info[i].next_to_watch;
789 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
790 }
791
792 tx_ring->next_to_clean = i;
793
794 if (unlikely(count &&
795 netif_carrier_ok(netdev) &&
796 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
797 /* Make sure that anybody stopping the queue after this
798 * sees the new next_to_clean.
799 */
800 smp_mb();
801 if (netif_queue_stopped(netdev) &&
802 !(test_bit(__IGBVF_DOWN, &adapter->state))) {
803 netif_wake_queue(netdev);
804 ++adapter->restart_queue;
805 }
806 }
807
808 adapter->net_stats.tx_bytes += total_bytes;
809 adapter->net_stats.tx_packets += total_packets;
810 return count < tx_ring->count;
811 }
812
813 static irqreturn_t igbvf_msix_other(int irq, void *data)
814 {
815 struct net_device *netdev = data;
816 struct igbvf_adapter *adapter = netdev_priv(netdev);
817 struct e1000_hw *hw = &adapter->hw;
818
819 adapter->int_counter1++;
820
821 netif_carrier_off(netdev);
822 hw->mac.get_link_status = 1;
823 if (!test_bit(__IGBVF_DOWN, &adapter->state))
824 mod_timer(&adapter->watchdog_timer, jiffies + 1);
825
826 ew32(EIMS, adapter->eims_other);
827
828 return IRQ_HANDLED;
829 }
830
831 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
832 {
833 struct net_device *netdev = data;
834 struct igbvf_adapter *adapter = netdev_priv(netdev);
835 struct e1000_hw *hw = &adapter->hw;
836 struct igbvf_ring *tx_ring = adapter->tx_ring;
837
838
839 adapter->total_tx_bytes = 0;
840 adapter->total_tx_packets = 0;
841
842 /* auto mask will automatically reenable the interrupt when we write
843 * EICS */
844 if (!igbvf_clean_tx_irq(tx_ring))
845 /* Ring was not completely cleaned, so fire another interrupt */
846 ew32(EICS, tx_ring->eims_value);
847 else
848 ew32(EIMS, tx_ring->eims_value);
849
850 return IRQ_HANDLED;
851 }
852
853 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
854 {
855 struct net_device *netdev = data;
856 struct igbvf_adapter *adapter = netdev_priv(netdev);
857
858 adapter->int_counter0++;
859
860 /* Write the ITR value calculated at the end of the
861 * previous interrupt.
862 */
863 if (adapter->rx_ring->set_itr) {
864 writel(adapter->rx_ring->itr_val,
865 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
866 adapter->rx_ring->set_itr = 0;
867 }
868
869 if (napi_schedule_prep(&adapter->rx_ring->napi)) {
870 adapter->total_rx_bytes = 0;
871 adapter->total_rx_packets = 0;
872 __napi_schedule(&adapter->rx_ring->napi);
873 }
874
875 return IRQ_HANDLED;
876 }
877
878 #define IGBVF_NO_QUEUE -1
879
880 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
881 int tx_queue, int msix_vector)
882 {
883 struct e1000_hw *hw = &adapter->hw;
884 u32 ivar, index;
885
886 /* 82576 uses a table-based method for assigning vectors.
887 Each queue has a single entry in the table to which we write
888 a vector number along with a "valid" bit. Sadly, the layout
889 of the table is somewhat counterintuitive. */
890 if (rx_queue > IGBVF_NO_QUEUE) {
891 index = (rx_queue >> 1);
892 ivar = array_er32(IVAR0, index);
893 if (rx_queue & 0x1) {
894 /* vector goes into third byte of register */
895 ivar = ivar & 0xFF00FFFF;
896 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
897 } else {
898 /* vector goes into low byte of register */
899 ivar = ivar & 0xFFFFFF00;
900 ivar |= msix_vector | E1000_IVAR_VALID;
901 }
902 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
903 array_ew32(IVAR0, index, ivar);
904 }
905 if (tx_queue > IGBVF_NO_QUEUE) {
906 index = (tx_queue >> 1);
907 ivar = array_er32(IVAR0, index);
908 if (tx_queue & 0x1) {
909 /* vector goes into high byte of register */
910 ivar = ivar & 0x00FFFFFF;
911 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
912 } else {
913 /* vector goes into second byte of register */
914 ivar = ivar & 0xFFFF00FF;
915 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
916 }
917 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
918 array_ew32(IVAR0, index, ivar);
919 }
920 }
921
922 /**
923 * igbvf_configure_msix - Configure MSI-X hardware
924 *
925 * igbvf_configure_msix sets up the hardware to properly
926 * generate MSI-X interrupts.
927 **/
928 static void igbvf_configure_msix(struct igbvf_adapter *adapter)
929 {
930 u32 tmp;
931 struct e1000_hw *hw = &adapter->hw;
932 struct igbvf_ring *tx_ring = adapter->tx_ring;
933 struct igbvf_ring *rx_ring = adapter->rx_ring;
934 int vector = 0;
935
936 adapter->eims_enable_mask = 0;
937
938 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
939 adapter->eims_enable_mask |= tx_ring->eims_value;
940 if (tx_ring->itr_val)
941 writel(tx_ring->itr_val,
942 hw->hw_addr + tx_ring->itr_register);
943 else
944 writel(1952, hw->hw_addr + tx_ring->itr_register);
945
946 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
947 adapter->eims_enable_mask |= rx_ring->eims_value;
948 if (rx_ring->itr_val)
949 writel(rx_ring->itr_val,
950 hw->hw_addr + rx_ring->itr_register);
951 else
952 writel(1952, hw->hw_addr + rx_ring->itr_register);
953
954 /* set vector for other causes, i.e. link changes */
955
956 tmp = (vector++ | E1000_IVAR_VALID);
957
958 ew32(IVAR_MISC, tmp);
959
960 adapter->eims_enable_mask = (1 << (vector)) - 1;
961 adapter->eims_other = 1 << (vector - 1);
962 e1e_flush();
963 }
964
965 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
966 {
967 if (adapter->msix_entries) {
968 pci_disable_msix(adapter->pdev);
969 kfree(adapter->msix_entries);
970 adapter->msix_entries = NULL;
971 }
972 }
973
974 /**
975 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
976 *
977 * Attempt to configure interrupts using the best available
978 * capabilities of the hardware and kernel.
979 **/
980 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
981 {
982 int err = -ENOMEM;
983 int i;
984
985 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
986 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
987 GFP_KERNEL);
988 if (adapter->msix_entries) {
989 for (i = 0; i < 3; i++)
990 adapter->msix_entries[i].entry = i;
991
992 err = pci_enable_msix(adapter->pdev,
993 adapter->msix_entries, 3);
994 }
995
996 if (err) {
997 /* MSI-X failed */
998 dev_err(&adapter->pdev->dev,
999 "Failed to initialize MSI-X interrupts.\n");
1000 igbvf_reset_interrupt_capability(adapter);
1001 }
1002 }
1003
1004 /**
1005 * igbvf_request_msix - Initialize MSI-X interrupts
1006 *
1007 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1008 * kernel.
1009 **/
1010 static int igbvf_request_msix(struct igbvf_adapter *adapter)
1011 {
1012 struct net_device *netdev = adapter->netdev;
1013 int err = 0, vector = 0;
1014
1015 if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1016 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1017 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1018 } else {
1019 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1020 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1021 }
1022
1023 err = request_irq(adapter->msix_entries[vector].vector,
1024 igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1025 netdev);
1026 if (err)
1027 goto out;
1028
1029 adapter->tx_ring->itr_register = E1000_EITR(vector);
1030 adapter->tx_ring->itr_val = 1952;
1031 vector++;
1032
1033 err = request_irq(adapter->msix_entries[vector].vector,
1034 igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1035 netdev);
1036 if (err)
1037 goto out;
1038
1039 adapter->rx_ring->itr_register = E1000_EITR(vector);
1040 adapter->rx_ring->itr_val = 1952;
1041 vector++;
1042
1043 err = request_irq(adapter->msix_entries[vector].vector,
1044 igbvf_msix_other, 0, netdev->name, netdev);
1045 if (err)
1046 goto out;
1047
1048 igbvf_configure_msix(adapter);
1049 return 0;
1050 out:
1051 return err;
1052 }
1053
1054 /**
1055 * igbvf_alloc_queues - Allocate memory for all rings
1056 * @adapter: board private structure to initialize
1057 **/
1058 static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
1059 {
1060 struct net_device *netdev = adapter->netdev;
1061
1062 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1063 if (!adapter->tx_ring)
1064 return -ENOMEM;
1065
1066 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1067 if (!adapter->rx_ring) {
1068 kfree(adapter->tx_ring);
1069 return -ENOMEM;
1070 }
1071
1072 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1073
1074 return 0;
1075 }
1076
1077 /**
1078 * igbvf_request_irq - initialize interrupts
1079 *
1080 * Attempts to configure interrupts using the best available
1081 * capabilities of the hardware and kernel.
1082 **/
1083 static int igbvf_request_irq(struct igbvf_adapter *adapter)
1084 {
1085 int err = -1;
1086
1087 /* igbvf supports msi-x only */
1088 if (adapter->msix_entries)
1089 err = igbvf_request_msix(adapter);
1090
1091 if (!err)
1092 return err;
1093
1094 dev_err(&adapter->pdev->dev,
1095 "Unable to allocate interrupt, Error: %d\n", err);
1096
1097 return err;
1098 }
1099
1100 static void igbvf_free_irq(struct igbvf_adapter *adapter)
1101 {
1102 struct net_device *netdev = adapter->netdev;
1103 int vector;
1104
1105 if (adapter->msix_entries) {
1106 for (vector = 0; vector < 3; vector++)
1107 free_irq(adapter->msix_entries[vector].vector, netdev);
1108 }
1109 }
1110
1111 /**
1112 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1113 **/
1114 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1115 {
1116 struct e1000_hw *hw = &adapter->hw;
1117
1118 ew32(EIMC, ~0);
1119
1120 if (adapter->msix_entries)
1121 ew32(EIAC, 0);
1122 }
1123
1124 /**
1125 * igbvf_irq_enable - Enable default interrupt generation settings
1126 **/
1127 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1128 {
1129 struct e1000_hw *hw = &adapter->hw;
1130
1131 ew32(EIAC, adapter->eims_enable_mask);
1132 ew32(EIAM, adapter->eims_enable_mask);
1133 ew32(EIMS, adapter->eims_enable_mask);
1134 }
1135
1136 /**
1137 * igbvf_poll - NAPI Rx polling callback
1138 * @napi: struct associated with this polling callback
1139 * @budget: amount of packets driver is allowed to process this poll
1140 **/
1141 static int igbvf_poll(struct napi_struct *napi, int budget)
1142 {
1143 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1144 struct igbvf_adapter *adapter = rx_ring->adapter;
1145 struct e1000_hw *hw = &adapter->hw;
1146 int work_done = 0;
1147
1148 igbvf_clean_rx_irq(adapter, &work_done, budget);
1149
1150 /* If not enough Rx work done, exit the polling mode */
1151 if (work_done < budget) {
1152 napi_complete(napi);
1153
1154 if (adapter->itr_setting & 3)
1155 igbvf_set_itr(adapter);
1156
1157 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1158 ew32(EIMS, adapter->rx_ring->eims_value);
1159 }
1160
1161 return work_done;
1162 }
1163
1164 /**
1165 * igbvf_set_rlpml - set receive large packet maximum length
1166 * @adapter: board private structure
1167 *
1168 * Configure the maximum size of packets that will be received
1169 */
1170 static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1171 {
1172 int max_frame_size;
1173 struct e1000_hw *hw = &adapter->hw;
1174
1175 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1176 e1000_rlpml_set_vf(hw, max_frame_size);
1177 }
1178
1179 static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1180 {
1181 struct igbvf_adapter *adapter = netdev_priv(netdev);
1182 struct e1000_hw *hw = &adapter->hw;
1183
1184 if (hw->mac.ops.set_vfta(hw, vid, true)) {
1185 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1186 return -EINVAL;
1187 }
1188 set_bit(vid, adapter->active_vlans);
1189 return 0;
1190 }
1191
1192 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1193 {
1194 struct igbvf_adapter *adapter = netdev_priv(netdev);
1195 struct e1000_hw *hw = &adapter->hw;
1196
1197 igbvf_irq_disable(adapter);
1198
1199 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1200 igbvf_irq_enable(adapter);
1201
1202 if (hw->mac.ops.set_vfta(hw, vid, false)) {
1203 dev_err(&adapter->pdev->dev,
1204 "Failed to remove vlan id %d\n", vid);
1205 return -EINVAL;
1206 }
1207 clear_bit(vid, adapter->active_vlans);
1208 return 0;
1209 }
1210
1211 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1212 {
1213 u16 vid;
1214
1215 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1216 igbvf_vlan_rx_add_vid(adapter->netdev, vid);
1217 }
1218
1219 /**
1220 * igbvf_configure_tx - Configure Transmit Unit after Reset
1221 * @adapter: board private structure
1222 *
1223 * Configure the Tx unit of the MAC after a reset.
1224 **/
1225 static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1226 {
1227 struct e1000_hw *hw = &adapter->hw;
1228 struct igbvf_ring *tx_ring = adapter->tx_ring;
1229 u64 tdba;
1230 u32 txdctl, dca_txctrl;
1231
1232 /* disable transmits */
1233 txdctl = er32(TXDCTL(0));
1234 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1235 e1e_flush();
1236 msleep(10);
1237
1238 /* Setup the HW Tx Head and Tail descriptor pointers */
1239 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1240 tdba = tx_ring->dma;
1241 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1242 ew32(TDBAH(0), (tdba >> 32));
1243 ew32(TDH(0), 0);
1244 ew32(TDT(0), 0);
1245 tx_ring->head = E1000_TDH(0);
1246 tx_ring->tail = E1000_TDT(0);
1247
1248 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1249 * MUST be delivered in order or it will completely screw up
1250 * our bookeeping.
1251 */
1252 dca_txctrl = er32(DCA_TXCTRL(0));
1253 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1254 ew32(DCA_TXCTRL(0), dca_txctrl);
1255
1256 /* enable transmits */
1257 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1258 ew32(TXDCTL(0), txdctl);
1259
1260 /* Setup Transmit Descriptor Settings for eop descriptor */
1261 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1262
1263 /* enable Report Status bit */
1264 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1265 }
1266
1267 /**
1268 * igbvf_setup_srrctl - configure the receive control registers
1269 * @adapter: Board private structure
1270 **/
1271 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1272 {
1273 struct e1000_hw *hw = &adapter->hw;
1274 u32 srrctl = 0;
1275
1276 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1277 E1000_SRRCTL_BSIZEHDR_MASK |
1278 E1000_SRRCTL_BSIZEPKT_MASK);
1279
1280 /* Enable queue drop to avoid head of line blocking */
1281 srrctl |= E1000_SRRCTL_DROP_EN;
1282
1283 /* Setup buffer sizes */
1284 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1285 E1000_SRRCTL_BSIZEPKT_SHIFT;
1286
1287 if (adapter->rx_buffer_len < 2048) {
1288 adapter->rx_ps_hdr_size = 0;
1289 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1290 } else {
1291 adapter->rx_ps_hdr_size = 128;
1292 srrctl |= adapter->rx_ps_hdr_size <<
1293 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1294 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1295 }
1296
1297 ew32(SRRCTL(0), srrctl);
1298 }
1299
1300 /**
1301 * igbvf_configure_rx - Configure Receive Unit after Reset
1302 * @adapter: board private structure
1303 *
1304 * Configure the Rx unit of the MAC after a reset.
1305 **/
1306 static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1307 {
1308 struct e1000_hw *hw = &adapter->hw;
1309 struct igbvf_ring *rx_ring = adapter->rx_ring;
1310 u64 rdba;
1311 u32 rdlen, rxdctl;
1312
1313 /* disable receives */
1314 rxdctl = er32(RXDCTL(0));
1315 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1316 e1e_flush();
1317 msleep(10);
1318
1319 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1320
1321 /*
1322 * Setup the HW Rx Head and Tail Descriptor Pointers and
1323 * the Base and Length of the Rx Descriptor Ring
1324 */
1325 rdba = rx_ring->dma;
1326 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1327 ew32(RDBAH(0), (rdba >> 32));
1328 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1329 rx_ring->head = E1000_RDH(0);
1330 rx_ring->tail = E1000_RDT(0);
1331 ew32(RDH(0), 0);
1332 ew32(RDT(0), 0);
1333
1334 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1335 rxdctl &= 0xFFF00000;
1336 rxdctl |= IGBVF_RX_PTHRESH;
1337 rxdctl |= IGBVF_RX_HTHRESH << 8;
1338 rxdctl |= IGBVF_RX_WTHRESH << 16;
1339
1340 igbvf_set_rlpml(adapter);
1341
1342 /* enable receives */
1343 ew32(RXDCTL(0), rxdctl);
1344 }
1345
1346 /**
1347 * igbvf_set_multi - Multicast and Promiscuous mode set
1348 * @netdev: network interface device structure
1349 *
1350 * The set_multi entry point is called whenever the multicast address
1351 * list or the network interface flags are updated. This routine is
1352 * responsible for configuring the hardware for proper multicast,
1353 * promiscuous mode, and all-multi behavior.
1354 **/
1355 static void igbvf_set_multi(struct net_device *netdev)
1356 {
1357 struct igbvf_adapter *adapter = netdev_priv(netdev);
1358 struct e1000_hw *hw = &adapter->hw;
1359 struct netdev_hw_addr *ha;
1360 u8 *mta_list = NULL;
1361 int i;
1362
1363 if (!netdev_mc_empty(netdev)) {
1364 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
1365 if (!mta_list) {
1366 dev_err(&adapter->pdev->dev,
1367 "failed to allocate multicast filter list\n");
1368 return;
1369 }
1370 }
1371
1372 /* prepare a packed array of only addresses. */
1373 i = 0;
1374 netdev_for_each_mc_addr(ha, netdev)
1375 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1376
1377 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1378 kfree(mta_list);
1379 }
1380
1381 /**
1382 * igbvf_configure - configure the hardware for Rx and Tx
1383 * @adapter: private board structure
1384 **/
1385 static void igbvf_configure(struct igbvf_adapter *adapter)
1386 {
1387 igbvf_set_multi(adapter->netdev);
1388
1389 igbvf_restore_vlan(adapter);
1390
1391 igbvf_configure_tx(adapter);
1392 igbvf_setup_srrctl(adapter);
1393 igbvf_configure_rx(adapter);
1394 igbvf_alloc_rx_buffers(adapter->rx_ring,
1395 igbvf_desc_unused(adapter->rx_ring));
1396 }
1397
1398 /* igbvf_reset - bring the hardware into a known good state
1399 *
1400 * This function boots the hardware and enables some settings that
1401 * require a configuration cycle of the hardware - those cannot be
1402 * set/changed during runtime. After reset the device needs to be
1403 * properly configured for Rx, Tx etc.
1404 */
1405 static void igbvf_reset(struct igbvf_adapter *adapter)
1406 {
1407 struct e1000_mac_info *mac = &adapter->hw.mac;
1408 struct net_device *netdev = adapter->netdev;
1409 struct e1000_hw *hw = &adapter->hw;
1410
1411 /* Allow time for pending master requests to run */
1412 if (mac->ops.reset_hw(hw))
1413 dev_err(&adapter->pdev->dev, "PF still resetting\n");
1414
1415 mac->ops.init_hw(hw);
1416
1417 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1418 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1419 netdev->addr_len);
1420 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1421 netdev->addr_len);
1422 }
1423
1424 adapter->last_reset = jiffies;
1425 }
1426
1427 int igbvf_up(struct igbvf_adapter *adapter)
1428 {
1429 struct e1000_hw *hw = &adapter->hw;
1430
1431 /* hardware has been reset, we need to reload some things */
1432 igbvf_configure(adapter);
1433
1434 clear_bit(__IGBVF_DOWN, &adapter->state);
1435
1436 napi_enable(&adapter->rx_ring->napi);
1437 if (adapter->msix_entries)
1438 igbvf_configure_msix(adapter);
1439
1440 /* Clear any pending interrupts. */
1441 er32(EICR);
1442 igbvf_irq_enable(adapter);
1443
1444 /* start the watchdog */
1445 hw->mac.get_link_status = 1;
1446 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1447
1448
1449 return 0;
1450 }
1451
1452 void igbvf_down(struct igbvf_adapter *adapter)
1453 {
1454 struct net_device *netdev = adapter->netdev;
1455 struct e1000_hw *hw = &adapter->hw;
1456 u32 rxdctl, txdctl;
1457
1458 /*
1459 * signal that we're down so the interrupt handler does not
1460 * reschedule our watchdog timer
1461 */
1462 set_bit(__IGBVF_DOWN, &adapter->state);
1463
1464 /* disable receives in the hardware */
1465 rxdctl = er32(RXDCTL(0));
1466 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1467
1468 netif_stop_queue(netdev);
1469
1470 /* disable transmits in the hardware */
1471 txdctl = er32(TXDCTL(0));
1472 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1473
1474 /* flush both disables and wait for them to finish */
1475 e1e_flush();
1476 msleep(10);
1477
1478 napi_disable(&adapter->rx_ring->napi);
1479
1480 igbvf_irq_disable(adapter);
1481
1482 del_timer_sync(&adapter->watchdog_timer);
1483
1484 netif_carrier_off(netdev);
1485
1486 /* record the stats before reset*/
1487 igbvf_update_stats(adapter);
1488
1489 adapter->link_speed = 0;
1490 adapter->link_duplex = 0;
1491
1492 igbvf_reset(adapter);
1493 igbvf_clean_tx_ring(adapter->tx_ring);
1494 igbvf_clean_rx_ring(adapter->rx_ring);
1495 }
1496
1497 void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1498 {
1499 might_sleep();
1500 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1501 msleep(1);
1502 igbvf_down(adapter);
1503 igbvf_up(adapter);
1504 clear_bit(__IGBVF_RESETTING, &adapter->state);
1505 }
1506
1507 /**
1508 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1509 * @adapter: board private structure to initialize
1510 *
1511 * igbvf_sw_init initializes the Adapter private data structure.
1512 * Fields are initialized based on PCI device information and
1513 * OS network device settings (MTU size).
1514 **/
1515 static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
1516 {
1517 struct net_device *netdev = adapter->netdev;
1518 s32 rc;
1519
1520 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1521 adapter->rx_ps_hdr_size = 0;
1522 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1523 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1524
1525 adapter->tx_int_delay = 8;
1526 adapter->tx_abs_int_delay = 32;
1527 adapter->rx_int_delay = 0;
1528 adapter->rx_abs_int_delay = 8;
1529 adapter->itr_setting = 3;
1530 adapter->itr = 20000;
1531
1532 /* Set various function pointers */
1533 adapter->ei->init_ops(&adapter->hw);
1534
1535 rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1536 if (rc)
1537 return rc;
1538
1539 rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1540 if (rc)
1541 return rc;
1542
1543 igbvf_set_interrupt_capability(adapter);
1544
1545 if (igbvf_alloc_queues(adapter))
1546 return -ENOMEM;
1547
1548 spin_lock_init(&adapter->tx_queue_lock);
1549
1550 /* Explicitly disable IRQ since the NIC can be in any state. */
1551 igbvf_irq_disable(adapter);
1552
1553 spin_lock_init(&adapter->stats_lock);
1554
1555 set_bit(__IGBVF_DOWN, &adapter->state);
1556 return 0;
1557 }
1558
1559 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1560 {
1561 struct e1000_hw *hw = &adapter->hw;
1562
1563 adapter->stats.last_gprc = er32(VFGPRC);
1564 adapter->stats.last_gorc = er32(VFGORC);
1565 adapter->stats.last_gptc = er32(VFGPTC);
1566 adapter->stats.last_gotc = er32(VFGOTC);
1567 adapter->stats.last_mprc = er32(VFMPRC);
1568 adapter->stats.last_gotlbc = er32(VFGOTLBC);
1569 adapter->stats.last_gptlbc = er32(VFGPTLBC);
1570 adapter->stats.last_gorlbc = er32(VFGORLBC);
1571 adapter->stats.last_gprlbc = er32(VFGPRLBC);
1572
1573 adapter->stats.base_gprc = er32(VFGPRC);
1574 adapter->stats.base_gorc = er32(VFGORC);
1575 adapter->stats.base_gptc = er32(VFGPTC);
1576 adapter->stats.base_gotc = er32(VFGOTC);
1577 adapter->stats.base_mprc = er32(VFMPRC);
1578 adapter->stats.base_gotlbc = er32(VFGOTLBC);
1579 adapter->stats.base_gptlbc = er32(VFGPTLBC);
1580 adapter->stats.base_gorlbc = er32(VFGORLBC);
1581 adapter->stats.base_gprlbc = er32(VFGPRLBC);
1582 }
1583
1584 /**
1585 * igbvf_open - Called when a network interface is made active
1586 * @netdev: network interface device structure
1587 *
1588 * Returns 0 on success, negative value on failure
1589 *
1590 * The open entry point is called when a network interface is made
1591 * active by the system (IFF_UP). At this point all resources needed
1592 * for transmit and receive operations are allocated, the interrupt
1593 * handler is registered with the OS, the watchdog timer is started,
1594 * and the stack is notified that the interface is ready.
1595 **/
1596 static int igbvf_open(struct net_device *netdev)
1597 {
1598 struct igbvf_adapter *adapter = netdev_priv(netdev);
1599 struct e1000_hw *hw = &adapter->hw;
1600 int err;
1601
1602 /* disallow open during test */
1603 if (test_bit(__IGBVF_TESTING, &adapter->state))
1604 return -EBUSY;
1605
1606 /* allocate transmit descriptors */
1607 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1608 if (err)
1609 goto err_setup_tx;
1610
1611 /* allocate receive descriptors */
1612 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1613 if (err)
1614 goto err_setup_rx;
1615
1616 /*
1617 * before we allocate an interrupt, we must be ready to handle it.
1618 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1619 * as soon as we call pci_request_irq, so we have to setup our
1620 * clean_rx handler before we do so.
1621 */
1622 igbvf_configure(adapter);
1623
1624 err = igbvf_request_irq(adapter);
1625 if (err)
1626 goto err_req_irq;
1627
1628 /* From here on the code is the same as igbvf_up() */
1629 clear_bit(__IGBVF_DOWN, &adapter->state);
1630
1631 napi_enable(&adapter->rx_ring->napi);
1632
1633 /* clear any pending interrupts */
1634 er32(EICR);
1635
1636 igbvf_irq_enable(adapter);
1637
1638 /* start the watchdog */
1639 hw->mac.get_link_status = 1;
1640 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1641
1642 return 0;
1643
1644 err_req_irq:
1645 igbvf_free_rx_resources(adapter->rx_ring);
1646 err_setup_rx:
1647 igbvf_free_tx_resources(adapter->tx_ring);
1648 err_setup_tx:
1649 igbvf_reset(adapter);
1650
1651 return err;
1652 }
1653
1654 /**
1655 * igbvf_close - Disables a network interface
1656 * @netdev: network interface device structure
1657 *
1658 * Returns 0, this is not allowed to fail
1659 *
1660 * The close entry point is called when an interface is de-activated
1661 * by the OS. The hardware is still under the drivers control, but
1662 * needs to be disabled. A global MAC reset is issued to stop the
1663 * hardware, and all transmit and receive resources are freed.
1664 **/
1665 static int igbvf_close(struct net_device *netdev)
1666 {
1667 struct igbvf_adapter *adapter = netdev_priv(netdev);
1668
1669 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1670 igbvf_down(adapter);
1671
1672 igbvf_free_irq(adapter);
1673
1674 igbvf_free_tx_resources(adapter->tx_ring);
1675 igbvf_free_rx_resources(adapter->rx_ring);
1676
1677 return 0;
1678 }
1679 /**
1680 * igbvf_set_mac - Change the Ethernet Address of the NIC
1681 * @netdev: network interface device structure
1682 * @p: pointer to an address structure
1683 *
1684 * Returns 0 on success, negative on failure
1685 **/
1686 static int igbvf_set_mac(struct net_device *netdev, void *p)
1687 {
1688 struct igbvf_adapter *adapter = netdev_priv(netdev);
1689 struct e1000_hw *hw = &adapter->hw;
1690 struct sockaddr *addr = p;
1691
1692 if (!is_valid_ether_addr(addr->sa_data))
1693 return -EADDRNOTAVAIL;
1694
1695 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1696
1697 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1698
1699 if (memcmp(addr->sa_data, hw->mac.addr, 6))
1700 return -EADDRNOTAVAIL;
1701
1702 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1703
1704 return 0;
1705 }
1706
1707 #define UPDATE_VF_COUNTER(reg, name) \
1708 { \
1709 u32 current_counter = er32(reg); \
1710 if (current_counter < adapter->stats.last_##name) \
1711 adapter->stats.name += 0x100000000LL; \
1712 adapter->stats.last_##name = current_counter; \
1713 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1714 adapter->stats.name |= current_counter; \
1715 }
1716
1717 /**
1718 * igbvf_update_stats - Update the board statistics counters
1719 * @adapter: board private structure
1720 **/
1721 void igbvf_update_stats(struct igbvf_adapter *adapter)
1722 {
1723 struct e1000_hw *hw = &adapter->hw;
1724 struct pci_dev *pdev = adapter->pdev;
1725
1726 /*
1727 * Prevent stats update while adapter is being reset, link is down
1728 * or if the pci connection is down.
1729 */
1730 if (adapter->link_speed == 0)
1731 return;
1732
1733 if (test_bit(__IGBVF_RESETTING, &adapter->state))
1734 return;
1735
1736 if (pci_channel_offline(pdev))
1737 return;
1738
1739 UPDATE_VF_COUNTER(VFGPRC, gprc);
1740 UPDATE_VF_COUNTER(VFGORC, gorc);
1741 UPDATE_VF_COUNTER(VFGPTC, gptc);
1742 UPDATE_VF_COUNTER(VFGOTC, gotc);
1743 UPDATE_VF_COUNTER(VFMPRC, mprc);
1744 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1745 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1746 UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1747 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1748
1749 /* Fill out the OS statistics structure */
1750 adapter->net_stats.multicast = adapter->stats.mprc;
1751 }
1752
1753 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1754 {
1755 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1756 adapter->link_speed,
1757 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
1758 }
1759
1760 static bool igbvf_has_link(struct igbvf_adapter *adapter)
1761 {
1762 struct e1000_hw *hw = &adapter->hw;
1763 s32 ret_val = E1000_SUCCESS;
1764 bool link_active;
1765
1766 /* If interface is down, stay link down */
1767 if (test_bit(__IGBVF_DOWN, &adapter->state))
1768 return false;
1769
1770 ret_val = hw->mac.ops.check_for_link(hw);
1771 link_active = !hw->mac.get_link_status;
1772
1773 /* if check for link returns error we will need to reset */
1774 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1775 schedule_work(&adapter->reset_task);
1776
1777 return link_active;
1778 }
1779
1780 /**
1781 * igbvf_watchdog - Timer Call-back
1782 * @data: pointer to adapter cast into an unsigned long
1783 **/
1784 static void igbvf_watchdog(unsigned long data)
1785 {
1786 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
1787
1788 /* Do the rest outside of interrupt context */
1789 schedule_work(&adapter->watchdog_task);
1790 }
1791
1792 static void igbvf_watchdog_task(struct work_struct *work)
1793 {
1794 struct igbvf_adapter *adapter = container_of(work,
1795 struct igbvf_adapter,
1796 watchdog_task);
1797 struct net_device *netdev = adapter->netdev;
1798 struct e1000_mac_info *mac = &adapter->hw.mac;
1799 struct igbvf_ring *tx_ring = adapter->tx_ring;
1800 struct e1000_hw *hw = &adapter->hw;
1801 u32 link;
1802 int tx_pending = 0;
1803
1804 link = igbvf_has_link(adapter);
1805
1806 if (link) {
1807 if (!netif_carrier_ok(netdev)) {
1808 mac->ops.get_link_up_info(&adapter->hw,
1809 &adapter->link_speed,
1810 &adapter->link_duplex);
1811 igbvf_print_link_info(adapter);
1812
1813 netif_carrier_on(netdev);
1814 netif_wake_queue(netdev);
1815 }
1816 } else {
1817 if (netif_carrier_ok(netdev)) {
1818 adapter->link_speed = 0;
1819 adapter->link_duplex = 0;
1820 dev_info(&adapter->pdev->dev, "Link is Down\n");
1821 netif_carrier_off(netdev);
1822 netif_stop_queue(netdev);
1823 }
1824 }
1825
1826 if (netif_carrier_ok(netdev)) {
1827 igbvf_update_stats(adapter);
1828 } else {
1829 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1830 tx_ring->count);
1831 if (tx_pending) {
1832 /*
1833 * We've lost link, so the controller stops DMA,
1834 * but we've got queued Tx work that's never going
1835 * to get done, so reset controller to flush Tx.
1836 * (Do the reset outside of interrupt context).
1837 */
1838 adapter->tx_timeout_count++;
1839 schedule_work(&adapter->reset_task);
1840 }
1841 }
1842
1843 /* Cause software interrupt to ensure Rx ring is cleaned */
1844 ew32(EICS, adapter->rx_ring->eims_value);
1845
1846 /* Reset the timer */
1847 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1848 mod_timer(&adapter->watchdog_timer,
1849 round_jiffies(jiffies + (2 * HZ)));
1850 }
1851
1852 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1853 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1854 #define IGBVF_TX_FLAGS_TSO 0x00000004
1855 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1856 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1857 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1858
1859 static int igbvf_tso(struct igbvf_adapter *adapter,
1860 struct igbvf_ring *tx_ring,
1861 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1862 {
1863 struct e1000_adv_tx_context_desc *context_desc;
1864 unsigned int i;
1865 int err;
1866 struct igbvf_buffer *buffer_info;
1867 u32 info = 0, tu_cmd = 0;
1868 u32 mss_l4len_idx, l4len;
1869 *hdr_len = 0;
1870
1871 if (skb_header_cloned(skb)) {
1872 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1873 if (err) {
1874 dev_err(&adapter->pdev->dev,
1875 "igbvf_tso returning an error\n");
1876 return err;
1877 }
1878 }
1879
1880 l4len = tcp_hdrlen(skb);
1881 *hdr_len += l4len;
1882
1883 if (skb->protocol == htons(ETH_P_IP)) {
1884 struct iphdr *iph = ip_hdr(skb);
1885 iph->tot_len = 0;
1886 iph->check = 0;
1887 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1888 iph->daddr, 0,
1889 IPPROTO_TCP,
1890 0);
1891 } else if (skb_is_gso_v6(skb)) {
1892 ipv6_hdr(skb)->payload_len = 0;
1893 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1894 &ipv6_hdr(skb)->daddr,
1895 0, IPPROTO_TCP, 0);
1896 }
1897
1898 i = tx_ring->next_to_use;
1899
1900 buffer_info = &tx_ring->buffer_info[i];
1901 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1902 /* VLAN MACLEN IPLEN */
1903 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1904 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1905 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1906 *hdr_len += skb_network_offset(skb);
1907 info |= (skb_transport_header(skb) - skb_network_header(skb));
1908 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
1909 context_desc->vlan_macip_lens = cpu_to_le32(info);
1910
1911 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1912 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1913
1914 if (skb->protocol == htons(ETH_P_IP))
1915 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1916 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1917
1918 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1919
1920 /* MSS L4LEN IDX */
1921 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
1922 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
1923
1924 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1925 context_desc->seqnum_seed = 0;
1926
1927 buffer_info->time_stamp = jiffies;
1928 buffer_info->next_to_watch = i;
1929 buffer_info->dma = 0;
1930 i++;
1931 if (i == tx_ring->count)
1932 i = 0;
1933
1934 tx_ring->next_to_use = i;
1935
1936 return true;
1937 }
1938
1939 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1940 struct igbvf_ring *tx_ring,
1941 struct sk_buff *skb, u32 tx_flags)
1942 {
1943 struct e1000_adv_tx_context_desc *context_desc;
1944 unsigned int i;
1945 struct igbvf_buffer *buffer_info;
1946 u32 info = 0, tu_cmd = 0;
1947
1948 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
1949 (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
1950 i = tx_ring->next_to_use;
1951 buffer_info = &tx_ring->buffer_info[i];
1952 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1953
1954 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1955 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1956
1957 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1958 if (skb->ip_summed == CHECKSUM_PARTIAL)
1959 info |= (skb_transport_header(skb) -
1960 skb_network_header(skb));
1961
1962
1963 context_desc->vlan_macip_lens = cpu_to_le32(info);
1964
1965 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1966
1967 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1968 switch (skb->protocol) {
1969 case __constant_htons(ETH_P_IP):
1970 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1971 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1972 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1973 break;
1974 case __constant_htons(ETH_P_IPV6):
1975 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1976 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1977 break;
1978 default:
1979 break;
1980 }
1981 }
1982
1983 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1984 context_desc->seqnum_seed = 0;
1985 context_desc->mss_l4len_idx = 0;
1986
1987 buffer_info->time_stamp = jiffies;
1988 buffer_info->next_to_watch = i;
1989 buffer_info->dma = 0;
1990 i++;
1991 if (i == tx_ring->count)
1992 i = 0;
1993 tx_ring->next_to_use = i;
1994
1995 return true;
1996 }
1997
1998 return false;
1999 }
2000
2001 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2002 {
2003 struct igbvf_adapter *adapter = netdev_priv(netdev);
2004
2005 /* there is enough descriptors then we don't need to worry */
2006 if (igbvf_desc_unused(adapter->tx_ring) >= size)
2007 return 0;
2008
2009 netif_stop_queue(netdev);
2010
2011 smp_mb();
2012
2013 /* We need to check again just in case room has been made available */
2014 if (igbvf_desc_unused(adapter->tx_ring) < size)
2015 return -EBUSY;
2016
2017 netif_wake_queue(netdev);
2018
2019 ++adapter->restart_queue;
2020 return 0;
2021 }
2022
2023 #define IGBVF_MAX_TXD_PWR 16
2024 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2025
2026 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2027 struct igbvf_ring *tx_ring,
2028 struct sk_buff *skb,
2029 unsigned int first)
2030 {
2031 struct igbvf_buffer *buffer_info;
2032 struct pci_dev *pdev = adapter->pdev;
2033 unsigned int len = skb_headlen(skb);
2034 unsigned int count = 0, i;
2035 unsigned int f;
2036
2037 i = tx_ring->next_to_use;
2038
2039 buffer_info = &tx_ring->buffer_info[i];
2040 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2041 buffer_info->length = len;
2042 /* set time_stamp *before* dma to help avoid a possible race */
2043 buffer_info->time_stamp = jiffies;
2044 buffer_info->next_to_watch = i;
2045 buffer_info->mapped_as_page = false;
2046 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2047 DMA_TO_DEVICE);
2048 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2049 goto dma_error;
2050
2051
2052 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2053 const struct skb_frag_struct *frag;
2054
2055 count++;
2056 i++;
2057 if (i == tx_ring->count)
2058 i = 0;
2059
2060 frag = &skb_shinfo(skb)->frags[f];
2061 len = skb_frag_size(frag);
2062
2063 buffer_info = &tx_ring->buffer_info[i];
2064 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2065 buffer_info->length = len;
2066 buffer_info->time_stamp = jiffies;
2067 buffer_info->next_to_watch = i;
2068 buffer_info->mapped_as_page = true;
2069 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2070 DMA_TO_DEVICE);
2071 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2072 goto dma_error;
2073 }
2074
2075 tx_ring->buffer_info[i].skb = skb;
2076 tx_ring->buffer_info[first].next_to_watch = i;
2077
2078 return ++count;
2079
2080 dma_error:
2081 dev_err(&pdev->dev, "TX DMA map failed\n");
2082
2083 /* clear timestamp and dma mappings for failed buffer_info mapping */
2084 buffer_info->dma = 0;
2085 buffer_info->time_stamp = 0;
2086 buffer_info->length = 0;
2087 buffer_info->next_to_watch = 0;
2088 buffer_info->mapped_as_page = false;
2089 if (count)
2090 count--;
2091
2092 /* clear timestamp and dma mappings for remaining portion of packet */
2093 while (count--) {
2094 if (i==0)
2095 i += tx_ring->count;
2096 i--;
2097 buffer_info = &tx_ring->buffer_info[i];
2098 igbvf_put_txbuf(adapter, buffer_info);
2099 }
2100
2101 return 0;
2102 }
2103
2104 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2105 struct igbvf_ring *tx_ring,
2106 int tx_flags, int count, u32 paylen,
2107 u8 hdr_len)
2108 {
2109 union e1000_adv_tx_desc *tx_desc = NULL;
2110 struct igbvf_buffer *buffer_info;
2111 u32 olinfo_status = 0, cmd_type_len;
2112 unsigned int i;
2113
2114 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2115 E1000_ADVTXD_DCMD_DEXT);
2116
2117 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2118 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2119
2120 if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2121 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2122
2123 /* insert tcp checksum */
2124 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2125
2126 /* insert ip checksum */
2127 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2128 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2129
2130 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2131 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2132 }
2133
2134 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2135
2136 i = tx_ring->next_to_use;
2137 while (count--) {
2138 buffer_info = &tx_ring->buffer_info[i];
2139 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2140 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2141 tx_desc->read.cmd_type_len =
2142 cpu_to_le32(cmd_type_len | buffer_info->length);
2143 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2144 i++;
2145 if (i == tx_ring->count)
2146 i = 0;
2147 }
2148
2149 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2150 /* Force memory writes to complete before letting h/w
2151 * know there are new descriptors to fetch. (Only
2152 * applicable for weak-ordered memory model archs,
2153 * such as IA-64). */
2154 wmb();
2155
2156 tx_ring->next_to_use = i;
2157 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2158 /* we need this if more than one processor can write to our tail
2159 * at a time, it syncronizes IO on IA64/Altix systems */
2160 mmiowb();
2161 }
2162
2163 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2164 struct net_device *netdev,
2165 struct igbvf_ring *tx_ring)
2166 {
2167 struct igbvf_adapter *adapter = netdev_priv(netdev);
2168 unsigned int first, tx_flags = 0;
2169 u8 hdr_len = 0;
2170 int count = 0;
2171 int tso = 0;
2172
2173 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2174 dev_kfree_skb_any(skb);
2175 return NETDEV_TX_OK;
2176 }
2177
2178 if (skb->len <= 0) {
2179 dev_kfree_skb_any(skb);
2180 return NETDEV_TX_OK;
2181 }
2182
2183 /*
2184 * need: count + 4 desc gap to keep tail from touching
2185 * + 2 desc gap to keep tail from touching head,
2186 * + 1 desc for skb->data,
2187 * + 1 desc for context descriptor,
2188 * head, otherwise try next time
2189 */
2190 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2191 /* this is a hard error */
2192 return NETDEV_TX_BUSY;
2193 }
2194
2195 if (vlan_tx_tag_present(skb)) {
2196 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2197 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2198 }
2199
2200 if (skb->protocol == htons(ETH_P_IP))
2201 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2202
2203 first = tx_ring->next_to_use;
2204
2205 tso = skb_is_gso(skb) ?
2206 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
2207 if (unlikely(tso < 0)) {
2208 dev_kfree_skb_any(skb);
2209 return NETDEV_TX_OK;
2210 }
2211
2212 if (tso)
2213 tx_flags |= IGBVF_TX_FLAGS_TSO;
2214 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2215 (skb->ip_summed == CHECKSUM_PARTIAL))
2216 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2217
2218 /*
2219 * count reflects descriptors mapped, if 0 then mapping error
2220 * has occurred and we need to rewind the descriptor queue
2221 */
2222 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
2223
2224 if (count) {
2225 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2226 skb->len, hdr_len);
2227 /* Make sure there is space in the ring for the next send. */
2228 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2229 } else {
2230 dev_kfree_skb_any(skb);
2231 tx_ring->buffer_info[first].time_stamp = 0;
2232 tx_ring->next_to_use = first;
2233 }
2234
2235 return NETDEV_TX_OK;
2236 }
2237
2238 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2239 struct net_device *netdev)
2240 {
2241 struct igbvf_adapter *adapter = netdev_priv(netdev);
2242 struct igbvf_ring *tx_ring;
2243
2244 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2245 dev_kfree_skb_any(skb);
2246 return NETDEV_TX_OK;
2247 }
2248
2249 tx_ring = &adapter->tx_ring[0];
2250
2251 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2252 }
2253
2254 /**
2255 * igbvf_tx_timeout - Respond to a Tx Hang
2256 * @netdev: network interface device structure
2257 **/
2258 static void igbvf_tx_timeout(struct net_device *netdev)
2259 {
2260 struct igbvf_adapter *adapter = netdev_priv(netdev);
2261
2262 /* Do the reset outside of interrupt context */
2263 adapter->tx_timeout_count++;
2264 schedule_work(&adapter->reset_task);
2265 }
2266
2267 static void igbvf_reset_task(struct work_struct *work)
2268 {
2269 struct igbvf_adapter *adapter;
2270 adapter = container_of(work, struct igbvf_adapter, reset_task);
2271
2272 igbvf_reinit_locked(adapter);
2273 }
2274
2275 /**
2276 * igbvf_get_stats - Get System Network Statistics
2277 * @netdev: network interface device structure
2278 *
2279 * Returns the address of the device statistics structure.
2280 * The statistics are actually updated from the timer callback.
2281 **/
2282 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2283 {
2284 struct igbvf_adapter *adapter = netdev_priv(netdev);
2285
2286 /* only return the current stats */
2287 return &adapter->net_stats;
2288 }
2289
2290 /**
2291 * igbvf_change_mtu - Change the Maximum Transfer Unit
2292 * @netdev: network interface device structure
2293 * @new_mtu: new value for maximum frame size
2294 *
2295 * Returns 0 on success, negative on failure
2296 **/
2297 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2298 {
2299 struct igbvf_adapter *adapter = netdev_priv(netdev);
2300 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2301
2302 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2303 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2304 return -EINVAL;
2305 }
2306
2307 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2308 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2309 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2310 return -EINVAL;
2311 }
2312
2313 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2314 msleep(1);
2315 /* igbvf_down has a dependency on max_frame_size */
2316 adapter->max_frame_size = max_frame;
2317 if (netif_running(netdev))
2318 igbvf_down(adapter);
2319
2320 /*
2321 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2322 * means we reserve 2 more, this pushes us to allocate from the next
2323 * larger slab size.
2324 * i.e. RXBUFFER_2048 --> size-4096 slab
2325 * However with the new *_jumbo_rx* routines, jumbo receives will use
2326 * fragmented skbs
2327 */
2328
2329 if (max_frame <= 1024)
2330 adapter->rx_buffer_len = 1024;
2331 else if (max_frame <= 2048)
2332 adapter->rx_buffer_len = 2048;
2333 else
2334 #if (PAGE_SIZE / 2) > 16384
2335 adapter->rx_buffer_len = 16384;
2336 #else
2337 adapter->rx_buffer_len = PAGE_SIZE / 2;
2338 #endif
2339
2340
2341 /* adjust allocation if LPE protects us, and we aren't using SBP */
2342 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2343 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2344 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2345 ETH_FCS_LEN;
2346
2347 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2348 netdev->mtu, new_mtu);
2349 netdev->mtu = new_mtu;
2350
2351 if (netif_running(netdev))
2352 igbvf_up(adapter);
2353 else
2354 igbvf_reset(adapter);
2355
2356 clear_bit(__IGBVF_RESETTING, &adapter->state);
2357
2358 return 0;
2359 }
2360
2361 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2362 {
2363 switch (cmd) {
2364 default:
2365 return -EOPNOTSUPP;
2366 }
2367 }
2368
2369 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2370 {
2371 struct net_device *netdev = pci_get_drvdata(pdev);
2372 struct igbvf_adapter *adapter = netdev_priv(netdev);
2373 #ifdef CONFIG_PM
2374 int retval = 0;
2375 #endif
2376
2377 netif_device_detach(netdev);
2378
2379 if (netif_running(netdev)) {
2380 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2381 igbvf_down(adapter);
2382 igbvf_free_irq(adapter);
2383 }
2384
2385 #ifdef CONFIG_PM
2386 retval = pci_save_state(pdev);
2387 if (retval)
2388 return retval;
2389 #endif
2390
2391 pci_disable_device(pdev);
2392
2393 return 0;
2394 }
2395
2396 #ifdef CONFIG_PM
2397 static int igbvf_resume(struct pci_dev *pdev)
2398 {
2399 struct net_device *netdev = pci_get_drvdata(pdev);
2400 struct igbvf_adapter *adapter = netdev_priv(netdev);
2401 u32 err;
2402
2403 pci_restore_state(pdev);
2404 err = pci_enable_device_mem(pdev);
2405 if (err) {
2406 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2407 return err;
2408 }
2409
2410 pci_set_master(pdev);
2411
2412 if (netif_running(netdev)) {
2413 err = igbvf_request_irq(adapter);
2414 if (err)
2415 return err;
2416 }
2417
2418 igbvf_reset(adapter);
2419
2420 if (netif_running(netdev))
2421 igbvf_up(adapter);
2422
2423 netif_device_attach(netdev);
2424
2425 return 0;
2426 }
2427 #endif
2428
2429 static void igbvf_shutdown(struct pci_dev *pdev)
2430 {
2431 igbvf_suspend(pdev, PMSG_SUSPEND);
2432 }
2433
2434 #ifdef CONFIG_NET_POLL_CONTROLLER
2435 /*
2436 * Polling 'interrupt' - used by things like netconsole to send skbs
2437 * without having to re-enable interrupts. It's not called while
2438 * the interrupt routine is executing.
2439 */
2440 static void igbvf_netpoll(struct net_device *netdev)
2441 {
2442 struct igbvf_adapter *adapter = netdev_priv(netdev);
2443
2444 disable_irq(adapter->pdev->irq);
2445
2446 igbvf_clean_tx_irq(adapter->tx_ring);
2447
2448 enable_irq(adapter->pdev->irq);
2449 }
2450 #endif
2451
2452 /**
2453 * igbvf_io_error_detected - called when PCI error is detected
2454 * @pdev: Pointer to PCI device
2455 * @state: The current pci connection state
2456 *
2457 * This function is called after a PCI bus error affecting
2458 * this device has been detected.
2459 */
2460 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2461 pci_channel_state_t state)
2462 {
2463 struct net_device *netdev = pci_get_drvdata(pdev);
2464 struct igbvf_adapter *adapter = netdev_priv(netdev);
2465
2466 netif_device_detach(netdev);
2467
2468 if (state == pci_channel_io_perm_failure)
2469 return PCI_ERS_RESULT_DISCONNECT;
2470
2471 if (netif_running(netdev))
2472 igbvf_down(adapter);
2473 pci_disable_device(pdev);
2474
2475 /* Request a slot slot reset. */
2476 return PCI_ERS_RESULT_NEED_RESET;
2477 }
2478
2479 /**
2480 * igbvf_io_slot_reset - called after the pci bus has been reset.
2481 * @pdev: Pointer to PCI device
2482 *
2483 * Restart the card from scratch, as if from a cold-boot. Implementation
2484 * resembles the first-half of the igbvf_resume routine.
2485 */
2486 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2487 {
2488 struct net_device *netdev = pci_get_drvdata(pdev);
2489 struct igbvf_adapter *adapter = netdev_priv(netdev);
2490
2491 if (pci_enable_device_mem(pdev)) {
2492 dev_err(&pdev->dev,
2493 "Cannot re-enable PCI device after reset.\n");
2494 return PCI_ERS_RESULT_DISCONNECT;
2495 }
2496 pci_set_master(pdev);
2497
2498 igbvf_reset(adapter);
2499
2500 return PCI_ERS_RESULT_RECOVERED;
2501 }
2502
2503 /**
2504 * igbvf_io_resume - called when traffic can start flowing again.
2505 * @pdev: Pointer to PCI device
2506 *
2507 * This callback is called when the error recovery driver tells us that
2508 * its OK to resume normal operation. Implementation resembles the
2509 * second-half of the igbvf_resume routine.
2510 */
2511 static void igbvf_io_resume(struct pci_dev *pdev)
2512 {
2513 struct net_device *netdev = pci_get_drvdata(pdev);
2514 struct igbvf_adapter *adapter = netdev_priv(netdev);
2515
2516 if (netif_running(netdev)) {
2517 if (igbvf_up(adapter)) {
2518 dev_err(&pdev->dev,
2519 "can't bring device back up after reset\n");
2520 return;
2521 }
2522 }
2523
2524 netif_device_attach(netdev);
2525 }
2526
2527 static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2528 {
2529 struct e1000_hw *hw = &adapter->hw;
2530 struct net_device *netdev = adapter->netdev;
2531 struct pci_dev *pdev = adapter->pdev;
2532
2533 if (hw->mac.type == e1000_vfadapt_i350)
2534 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2535 else
2536 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2537 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2538 }
2539
2540 static int igbvf_set_features(struct net_device *netdev,
2541 netdev_features_t features)
2542 {
2543 struct igbvf_adapter *adapter = netdev_priv(netdev);
2544
2545 if (features & NETIF_F_RXCSUM)
2546 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2547 else
2548 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2549
2550 return 0;
2551 }
2552
2553 static const struct net_device_ops igbvf_netdev_ops = {
2554 .ndo_open = igbvf_open,
2555 .ndo_stop = igbvf_close,
2556 .ndo_start_xmit = igbvf_xmit_frame,
2557 .ndo_get_stats = igbvf_get_stats,
2558 .ndo_set_rx_mode = igbvf_set_multi,
2559 .ndo_set_mac_address = igbvf_set_mac,
2560 .ndo_change_mtu = igbvf_change_mtu,
2561 .ndo_do_ioctl = igbvf_ioctl,
2562 .ndo_tx_timeout = igbvf_tx_timeout,
2563 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2564 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2565 #ifdef CONFIG_NET_POLL_CONTROLLER
2566 .ndo_poll_controller = igbvf_netpoll,
2567 #endif
2568 .ndo_set_features = igbvf_set_features,
2569 };
2570
2571 /**
2572 * igbvf_probe - Device Initialization Routine
2573 * @pdev: PCI device information struct
2574 * @ent: entry in igbvf_pci_tbl
2575 *
2576 * Returns 0 on success, negative on failure
2577 *
2578 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2579 * The OS initialization, configuring of the adapter private structure,
2580 * and a hardware reset occur.
2581 **/
2582 static int __devinit igbvf_probe(struct pci_dev *pdev,
2583 const struct pci_device_id *ent)
2584 {
2585 struct net_device *netdev;
2586 struct igbvf_adapter *adapter;
2587 struct e1000_hw *hw;
2588 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2589
2590 static int cards_found;
2591 int err, pci_using_dac;
2592
2593 err = pci_enable_device_mem(pdev);
2594 if (err)
2595 return err;
2596
2597 pci_using_dac = 0;
2598 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2599 if (!err) {
2600 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2601 if (!err)
2602 pci_using_dac = 1;
2603 } else {
2604 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2605 if (err) {
2606 err = dma_set_coherent_mask(&pdev->dev,
2607 DMA_BIT_MASK(32));
2608 if (err) {
2609 dev_err(&pdev->dev, "No usable DMA "
2610 "configuration, aborting\n");
2611 goto err_dma;
2612 }
2613 }
2614 }
2615
2616 err = pci_request_regions(pdev, igbvf_driver_name);
2617 if (err)
2618 goto err_pci_reg;
2619
2620 pci_set_master(pdev);
2621
2622 err = -ENOMEM;
2623 netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2624 if (!netdev)
2625 goto err_alloc_etherdev;
2626
2627 SET_NETDEV_DEV(netdev, &pdev->dev);
2628
2629 pci_set_drvdata(pdev, netdev);
2630 adapter = netdev_priv(netdev);
2631 hw = &adapter->hw;
2632 adapter->netdev = netdev;
2633 adapter->pdev = pdev;
2634 adapter->ei = ei;
2635 adapter->pba = ei->pba;
2636 adapter->flags = ei->flags;
2637 adapter->hw.back = adapter;
2638 adapter->hw.mac.type = ei->mac;
2639 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
2640
2641 /* PCI config space info */
2642
2643 hw->vendor_id = pdev->vendor;
2644 hw->device_id = pdev->device;
2645 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2646 hw->subsystem_device_id = pdev->subsystem_device;
2647 hw->revision_id = pdev->revision;
2648
2649 err = -EIO;
2650 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2651 pci_resource_len(pdev, 0));
2652
2653 if (!adapter->hw.hw_addr)
2654 goto err_ioremap;
2655
2656 if (ei->get_variants) {
2657 err = ei->get_variants(adapter);
2658 if (err)
2659 goto err_ioremap;
2660 }
2661
2662 /* setup adapter struct */
2663 err = igbvf_sw_init(adapter);
2664 if (err)
2665 goto err_sw_init;
2666
2667 /* construct the net_device struct */
2668 netdev->netdev_ops = &igbvf_netdev_ops;
2669
2670 igbvf_set_ethtool_ops(netdev);
2671 netdev->watchdog_timeo = 5 * HZ;
2672 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2673
2674 adapter->bd_number = cards_found++;
2675
2676 netdev->hw_features = NETIF_F_SG |
2677 NETIF_F_IP_CSUM |
2678 NETIF_F_IPV6_CSUM |
2679 NETIF_F_TSO |
2680 NETIF_F_TSO6 |
2681 NETIF_F_RXCSUM;
2682
2683 netdev->features = netdev->hw_features |
2684 NETIF_F_HW_VLAN_TX |
2685 NETIF_F_HW_VLAN_RX |
2686 NETIF_F_HW_VLAN_FILTER;
2687
2688 if (pci_using_dac)
2689 netdev->features |= NETIF_F_HIGHDMA;
2690
2691 netdev->vlan_features |= NETIF_F_TSO;
2692 netdev->vlan_features |= NETIF_F_TSO6;
2693 netdev->vlan_features |= NETIF_F_IP_CSUM;
2694 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2695 netdev->vlan_features |= NETIF_F_SG;
2696
2697 /*reset the controller to put the device in a known good state */
2698 err = hw->mac.ops.reset_hw(hw);
2699 if (err) {
2700 dev_info(&pdev->dev,
2701 "PF still in reset state, assigning new address."
2702 " Is the PF interface up?\n");
2703 dev_hw_addr_random(adapter->netdev, hw->mac.addr);
2704 } else {
2705 err = hw->mac.ops.read_mac_addr(hw);
2706 if (err) {
2707 dev_err(&pdev->dev, "Error reading MAC address\n");
2708 goto err_hw_init;
2709 }
2710 }
2711
2712 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2713 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2714
2715 if (!is_valid_ether_addr(netdev->perm_addr)) {
2716 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2717 netdev->dev_addr);
2718 err = -EIO;
2719 goto err_hw_init;
2720 }
2721
2722 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2723 (unsigned long) adapter);
2724
2725 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2726 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2727
2728 /* ring size defaults */
2729 adapter->rx_ring->count = 1024;
2730 adapter->tx_ring->count = 1024;
2731
2732 /* reset the hardware with the new settings */
2733 igbvf_reset(adapter);
2734
2735 strcpy(netdev->name, "eth%d");
2736 err = register_netdev(netdev);
2737 if (err)
2738 goto err_hw_init;
2739
2740 /* tell the stack to leave us alone until igbvf_open() is called */
2741 netif_carrier_off(netdev);
2742 netif_stop_queue(netdev);
2743
2744 igbvf_print_device_info(adapter);
2745
2746 igbvf_initialize_last_counter_stats(adapter);
2747
2748 return 0;
2749
2750 err_hw_init:
2751 kfree(adapter->tx_ring);
2752 kfree(adapter->rx_ring);
2753 err_sw_init:
2754 igbvf_reset_interrupt_capability(adapter);
2755 iounmap(adapter->hw.hw_addr);
2756 err_ioremap:
2757 free_netdev(netdev);
2758 err_alloc_etherdev:
2759 pci_release_regions(pdev);
2760 err_pci_reg:
2761 err_dma:
2762 pci_disable_device(pdev);
2763 return err;
2764 }
2765
2766 /**
2767 * igbvf_remove - Device Removal Routine
2768 * @pdev: PCI device information struct
2769 *
2770 * igbvf_remove is called by the PCI subsystem to alert the driver
2771 * that it should release a PCI device. The could be caused by a
2772 * Hot-Plug event, or because the driver is going to be removed from
2773 * memory.
2774 **/
2775 static void __devexit igbvf_remove(struct pci_dev *pdev)
2776 {
2777 struct net_device *netdev = pci_get_drvdata(pdev);
2778 struct igbvf_adapter *adapter = netdev_priv(netdev);
2779 struct e1000_hw *hw = &adapter->hw;
2780
2781 /*
2782 * The watchdog timer may be rescheduled, so explicitly
2783 * disable it from being rescheduled.
2784 */
2785 set_bit(__IGBVF_DOWN, &adapter->state);
2786 del_timer_sync(&adapter->watchdog_timer);
2787
2788 cancel_work_sync(&adapter->reset_task);
2789 cancel_work_sync(&adapter->watchdog_task);
2790
2791 unregister_netdev(netdev);
2792
2793 igbvf_reset_interrupt_capability(adapter);
2794
2795 /*
2796 * it is important to delete the napi struct prior to freeing the
2797 * rx ring so that you do not end up with null pointer refs
2798 */
2799 netif_napi_del(&adapter->rx_ring->napi);
2800 kfree(adapter->tx_ring);
2801 kfree(adapter->rx_ring);
2802
2803 iounmap(hw->hw_addr);
2804 if (hw->flash_address)
2805 iounmap(hw->flash_address);
2806 pci_release_regions(pdev);
2807
2808 free_netdev(netdev);
2809
2810 pci_disable_device(pdev);
2811 }
2812
2813 /* PCI Error Recovery (ERS) */
2814 static struct pci_error_handlers igbvf_err_handler = {
2815 .error_detected = igbvf_io_error_detected,
2816 .slot_reset = igbvf_io_slot_reset,
2817 .resume = igbvf_io_resume,
2818 };
2819
2820 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2821 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2822 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2823 { } /* terminate list */
2824 };
2825 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2826
2827 /* PCI Device API Driver */
2828 static struct pci_driver igbvf_driver = {
2829 .name = igbvf_driver_name,
2830 .id_table = igbvf_pci_tbl,
2831 .probe = igbvf_probe,
2832 .remove = __devexit_p(igbvf_remove),
2833 #ifdef CONFIG_PM
2834 /* Power Management Hooks */
2835 .suspend = igbvf_suspend,
2836 .resume = igbvf_resume,
2837 #endif
2838 .shutdown = igbvf_shutdown,
2839 .err_handler = &igbvf_err_handler
2840 };
2841
2842 /**
2843 * igbvf_init_module - Driver Registration Routine
2844 *
2845 * igbvf_init_module is the first routine called when the driver is
2846 * loaded. All it does is register with the PCI subsystem.
2847 **/
2848 static int __init igbvf_init_module(void)
2849 {
2850 int ret;
2851 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
2852 pr_info("%s\n", igbvf_copyright);
2853
2854 ret = pci_register_driver(&igbvf_driver);
2855
2856 return ret;
2857 }
2858 module_init(igbvf_init_module);
2859
2860 /**
2861 * igbvf_exit_module - Driver Exit Cleanup Routine
2862 *
2863 * igbvf_exit_module is called just before the driver is removed
2864 * from memory.
2865 **/
2866 static void __exit igbvf_exit_module(void)
2867 {
2868 pci_unregister_driver(&igbvf_driver);
2869 }
2870 module_exit(igbvf_exit_module);
2871
2872
2873 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2874 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2875 MODULE_LICENSE("GPL");
2876 MODULE_VERSION(DRV_VERSION);
2877
2878 /* netdev.c */