net: add support for STMicroelectronics Ethernet controllers.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / vmxnet3 / vmxnet3_drv.c
CommitLineData
d1a890fa
SB
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#include "vmxnet3_int.h"
28
29char vmxnet3_driver_name[] = "vmxnet3";
30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31
32
33/*
34 * PCI Device ID Table
35 * Last entry must be all 0s
36 */
37static const struct pci_device_id vmxnet3_pciid_table[] = {
38 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
39 {0}
40};
41
42MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
43
44static atomic_t devices_found;
45
46
47/*
48 * Enable/Disable the given intr
49 */
50static void
51vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
52{
53 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
54}
55
56
57static void
58vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
59{
60 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
61}
62
63
64/*
65 * Enable/Disable all intrs used by the device
66 */
67static void
68vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
69{
70 int i;
71
72 for (i = 0; i < adapter->intr.num_intrs; i++)
73 vmxnet3_enable_intr(adapter, i);
74}
75
76
77static void
78vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
79{
80 int i;
81
82 for (i = 0; i < adapter->intr.num_intrs; i++)
83 vmxnet3_disable_intr(adapter, i);
84}
85
86
87static void
88vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
89{
90 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
91}
92
93
94static bool
95vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
96{
97 return netif_queue_stopped(adapter->netdev);
98}
99
100
101static void
102vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
103{
104 tq->stopped = false;
105 netif_start_queue(adapter->netdev);
106}
107
108
109static void
110vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111{
112 tq->stopped = false;
113 netif_wake_queue(adapter->netdev);
114}
115
116
117static void
118vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119{
120 tq->stopped = true;
121 tq->num_stop++;
122 netif_stop_queue(adapter->netdev);
123}
124
125
126/*
127 * Check the link state. This may start or stop the tx queue.
128 */
129static void
130vmxnet3_check_link(struct vmxnet3_adapter *adapter)
131{
132 u32 ret;
133
134 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
135 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
136 adapter->link_speed = ret >> 16;
137 if (ret & 1) { /* Link is up. */
138 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
139 adapter->netdev->name, adapter->link_speed);
140 if (!netif_carrier_ok(adapter->netdev))
141 netif_carrier_on(adapter->netdev);
142
143 vmxnet3_tq_start(&adapter->tx_queue, adapter);
144 } else {
145 printk(KERN_INFO "%s: NIC Link is Down\n",
146 adapter->netdev->name);
147 if (netif_carrier_ok(adapter->netdev))
148 netif_carrier_off(adapter->netdev);
149
150 vmxnet3_tq_stop(&adapter->tx_queue, adapter);
151 }
152}
153
154
155static void
156vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157{
158 u32 events = adapter->shared->ecr;
159 if (!events)
160 return;
161
162 vmxnet3_ack_events(adapter, events);
163
164 /* Check if link state has changed */
165 if (events & VMXNET3_ECR_LINK)
166 vmxnet3_check_link(adapter);
167
168 /* Check if there is an error on xmit/recv queues */
169 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
170 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
171 VMXNET3_CMD_GET_QUEUE_STATUS);
172
173 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name,
176 adapter->tqd_start->status.error);
177 }
178 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n",
180 adapter->netdev->name,
181 adapter->rqd_start->status.error);
182 }
183
184 schedule_work(&adapter->work);
185 }
186}
187
188
189static void
190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
191 struct pci_dev *pdev)
192{
193 if (tbi->map_type == VMXNET3_MAP_SINGLE)
194 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
195 PCI_DMA_TODEVICE);
196 else if (tbi->map_type == VMXNET3_MAP_PAGE)
197 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
198 PCI_DMA_TODEVICE);
199 else
200 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
201
202 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
203}
204
205
206static int
207vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
208 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
209{
210 struct sk_buff *skb;
211 int entries = 0;
212
213 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
216
217 skb = tq->buf_info[eop_idx].skb;
218 BUG_ON(skb == NULL);
219 tq->buf_info[eop_idx].skb = NULL;
220
221 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
222
223 while (tq->tx_ring.next2comp != eop_idx) {
224 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
225 pdev);
226
227 /* update next2comp w/o tx_lock. Since we are marking more,
228 * instead of less, tx ring entries avail, the worst case is
229 * that the tx routine incorrectly re-queues a pkt due to
230 * insufficient tx ring entries.
231 */
232 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
233 entries++;
234 }
235
236 dev_kfree_skb_any(skb);
237 return entries;
238}
239
240
241static int
242vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
243 struct vmxnet3_adapter *adapter)
244{
245 int completed = 0;
246 union Vmxnet3_GenericDesc *gdesc;
247
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
251 adapter->pdev, adapter);
252
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
255 }
256
257 if (completed) {
258 spin_lock(&tq->tx_lock);
259 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
260 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
261 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
262 netif_carrier_ok(adapter->netdev))) {
263 vmxnet3_tq_wake(tq, adapter);
264 }
265 spin_unlock(&tq->tx_lock);
266 }
267 return completed;
268}
269
270
271static void
272vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
273 struct vmxnet3_adapter *adapter)
274{
275 int i;
276
277 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
278 struct vmxnet3_tx_buf_info *tbi;
279 union Vmxnet3_GenericDesc *gdesc;
280
281 tbi = tq->buf_info + tq->tx_ring.next2comp;
282 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
283
284 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
285 if (tbi->skb) {
286 dev_kfree_skb_any(tbi->skb);
287 tbi->skb = NULL;
288 }
289 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
290 }
291
292 /* sanity check, verify all buffers are indeed unmapped and freed */
293 for (i = 0; i < tq->tx_ring.size; i++) {
294 BUG_ON(tq->buf_info[i].skb != NULL ||
295 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
296 }
297
298 tq->tx_ring.gen = VMXNET3_INIT_GEN;
299 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
300
301 tq->comp_ring.gen = VMXNET3_INIT_GEN;
302 tq->comp_ring.next2proc = 0;
303}
304
305
306void
307vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
308 struct vmxnet3_adapter *adapter)
309{
310 if (tq->tx_ring.base) {
311 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
312 sizeof(struct Vmxnet3_TxDesc),
313 tq->tx_ring.base, tq->tx_ring.basePA);
314 tq->tx_ring.base = NULL;
315 }
316 if (tq->data_ring.base) {
317 pci_free_consistent(adapter->pdev, tq->data_ring.size *
318 sizeof(struct Vmxnet3_TxDataDesc),
319 tq->data_ring.base, tq->data_ring.basePA);
320 tq->data_ring.base = NULL;
321 }
322 if (tq->comp_ring.base) {
323 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
324 sizeof(struct Vmxnet3_TxCompDesc),
325 tq->comp_ring.base, tq->comp_ring.basePA);
326 tq->comp_ring.base = NULL;
327 }
328 kfree(tq->buf_info);
329 tq->buf_info = NULL;
330}
331
332
333static void
334vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
335 struct vmxnet3_adapter *adapter)
336{
337 int i;
338
339 /* reset the tx ring contents to 0 and reset the tx ring states */
340 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
341 sizeof(struct Vmxnet3_TxDesc));
342 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
343 tq->tx_ring.gen = VMXNET3_INIT_GEN;
344
345 memset(tq->data_ring.base, 0, tq->data_ring.size *
346 sizeof(struct Vmxnet3_TxDataDesc));
347
348 /* reset the tx comp ring contents to 0 and reset comp ring states */
349 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
350 sizeof(struct Vmxnet3_TxCompDesc));
351 tq->comp_ring.next2proc = 0;
352 tq->comp_ring.gen = VMXNET3_INIT_GEN;
353
354 /* reset the bookkeeping data */
355 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
356 for (i = 0; i < tq->tx_ring.size; i++)
357 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
358
359 /* stats are not reset */
360}
361
362
363static int
364vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
366{
367 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
368 tq->comp_ring.base || tq->buf_info);
369
370 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
371 * sizeof(struct Vmxnet3_TxDesc),
372 &tq->tx_ring.basePA);
373 if (!tq->tx_ring.base) {
374 printk(KERN_ERR "%s: failed to allocate tx ring\n",
375 adapter->netdev->name);
376 goto err;
377 }
378
379 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
380 tq->data_ring.size *
381 sizeof(struct Vmxnet3_TxDataDesc),
382 &tq->data_ring.basePA);
383 if (!tq->data_ring.base) {
384 printk(KERN_ERR "%s: failed to allocate data ring\n",
385 adapter->netdev->name);
386 goto err;
387 }
388
389 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
390 tq->comp_ring.size *
391 sizeof(struct Vmxnet3_TxCompDesc),
392 &tq->comp_ring.basePA);
393 if (!tq->comp_ring.base) {
394 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
395 adapter->netdev->name);
396 goto err;
397 }
398
399 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
400 GFP_KERNEL);
401 if (!tq->buf_info) {
402 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
403 adapter->netdev->name);
404 goto err;
405 }
406
407 return 0;
408
409err:
410 vmxnet3_tq_destroy(tq, adapter);
411 return -ENOMEM;
412}
413
414
415/*
416 * starting from ring->next2fill, allocate rx buffers for the given ring
417 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
418 * are allocated or allocation fails
419 */
420
421static int
422vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
423 int num_to_alloc, struct vmxnet3_adapter *adapter)
424{
425 int num_allocated = 0;
426 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
427 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
428 u32 val;
429
430 while (num_allocated < num_to_alloc) {
431 struct vmxnet3_rx_buf_info *rbi;
432 union Vmxnet3_GenericDesc *gd;
433
434 rbi = rbi_base + ring->next2fill;
435 gd = ring->base + ring->next2fill;
436
437 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
438 if (rbi->skb == NULL) {
439 rbi->skb = dev_alloc_skb(rbi->len +
440 NET_IP_ALIGN);
441 if (unlikely(rbi->skb == NULL)) {
442 rq->stats.rx_buf_alloc_failure++;
443 break;
444 }
445 rbi->skb->dev = adapter->netdev;
446
447 skb_reserve(rbi->skb, NET_IP_ALIGN);
448 rbi->dma_addr = pci_map_single(adapter->pdev,
449 rbi->skb->data, rbi->len,
450 PCI_DMA_FROMDEVICE);
451 } else {
452 /* rx buffer skipped by the device */
453 }
454 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
455 } else {
456 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
457 rbi->len != PAGE_SIZE);
458
459 if (rbi->page == NULL) {
460 rbi->page = alloc_page(GFP_ATOMIC);
461 if (unlikely(rbi->page == NULL)) {
462 rq->stats.rx_buf_alloc_failure++;
463 break;
464 }
465 rbi->dma_addr = pci_map_page(adapter->pdev,
466 rbi->page, 0, PAGE_SIZE,
467 PCI_DMA_FROMDEVICE);
468 } else {
469 /* rx buffers skipped by the device */
470 }
471 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
472 }
473
474 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr;
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
477 rbi->len;
478
479 num_allocated++;
480 vmxnet3_cmd_ring_adv_next2fill(ring);
481 }
482 rq->uncommitted[ring_idx] += num_allocated;
483
484 dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
485 "%u, uncommited %u\n", num_allocated, ring->next2fill,
486 ring->next2comp, rq->uncommitted[ring_idx]);
487
488 /* so that the device can distinguish a full ring and an empty ring */
489 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
490
491 return num_allocated;
492}
493
494
495static void
496vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
497 struct vmxnet3_rx_buf_info *rbi)
498{
499 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
500 skb_shinfo(skb)->nr_frags;
501
502 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
503
504 frag->page = rbi->page;
505 frag->page_offset = 0;
506 frag->size = rcd->len;
507 skb->data_len += frag->size;
508 skb_shinfo(skb)->nr_frags++;
509}
510
511
512static void
513vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
514 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
515 struct vmxnet3_adapter *adapter)
516{
517 u32 dw2, len;
518 unsigned long buf_offset;
519 int i;
520 union Vmxnet3_GenericDesc *gdesc;
521 struct vmxnet3_tx_buf_info *tbi = NULL;
522
523 BUG_ON(ctx->copy_size > skb_headlen(skb));
524
525 /* use the previous gen bit for the SOP desc */
526 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
527
528 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
529 gdesc = ctx->sop_txd; /* both loops below can be skipped */
530
531 /* no need to map the buffer if headers are copied */
532 if (ctx->copy_size) {
533 ctx->sop_txd->txd.addr = tq->data_ring.basePA +
534 tq->tx_ring.next2fill *
535 sizeof(struct Vmxnet3_TxDataDesc);
536 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
537 ctx->sop_txd->dword[3] = 0;
538
539 tbi = tq->buf_info + tq->tx_ring.next2fill;
540 tbi->map_type = VMXNET3_MAP_NONE;
541
542 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
543 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
544 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
545 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
546
547 /* use the right gen for non-SOP desc */
548 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
549 }
550
551 /* linear part can use multiple tx desc if it's big */
552 len = skb_headlen(skb) - ctx->copy_size;
553 buf_offset = ctx->copy_size;
554 while (len) {
555 u32 buf_size;
556
557 buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
558 VMXNET3_MAX_TX_BUF_SIZE : len;
559
560 tbi = tq->buf_info + tq->tx_ring.next2fill;
561 tbi->map_type = VMXNET3_MAP_SINGLE;
562 tbi->dma_addr = pci_map_single(adapter->pdev,
563 skb->data + buf_offset, buf_size,
564 PCI_DMA_TODEVICE);
565
566 tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
567
568 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
569 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
570
571 gdesc->txd.addr = tbi->dma_addr;
572 gdesc->dword[2] = dw2 | buf_size;
573 gdesc->dword[3] = 0;
574
575 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
576 tq->tx_ring.next2fill, gdesc->txd.addr,
577 gdesc->dword[2], gdesc->dword[3]);
578 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
579 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
580
581 len -= buf_size;
582 buf_offset += buf_size;
583 }
584
585 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
586 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
587
588 tbi = tq->buf_info + tq->tx_ring.next2fill;
589 tbi->map_type = VMXNET3_MAP_PAGE;
590 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
591 frag->page_offset, frag->size,
592 PCI_DMA_TODEVICE);
593
594 tbi->len = frag->size;
595
596 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
597 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
598
599 gdesc->txd.addr = tbi->dma_addr;
600 gdesc->dword[2] = dw2 | frag->size;
601 gdesc->dword[3] = 0;
602
603 dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
604 tq->tx_ring.next2fill, gdesc->txd.addr,
605 gdesc->dword[2], gdesc->dword[3]);
606 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
607 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
608 }
609
610 ctx->eop_txd = gdesc;
611
612 /* set the last buf_info for the pkt */
613 tbi->skb = skb;
614 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
615}
616
617
618/*
619 * parse and copy relevant protocol headers:
620 * For a tso pkt, relevant headers are L2/3/4 including options
621 * For a pkt requesting csum offloading, they are L2/3 and may include L4
622 * if it's a TCP/UDP pkt
623 *
624 * Returns:
625 * -1: error happens during parsing
626 * 0: protocol headers parsed, but too big to be copied
627 * 1: protocol headers parsed and copied
628 *
629 * Other effects:
630 * 1. related *ctx fields are updated.
631 * 2. ctx->copy_size is # of bytes copied
632 * 3. the portion copied is guaranteed to be in the linear part
633 *
634 */
635static int
636vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
637 struct vmxnet3_tx_ctx *ctx,
638 struct vmxnet3_adapter *adapter)
639{
640 struct Vmxnet3_TxDataDesc *tdd;
641
642 if (ctx->mss) {
643 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
644 ctx->l4_hdr_size = ((struct tcphdr *)
645 skb_transport_header(skb))->doff * 4;
646 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
647 } else {
648 unsigned int pull_size;
649
650 if (skb->ip_summed == CHECKSUM_PARTIAL) {
651 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
652
653 if (ctx->ipv4) {
654 struct iphdr *iph = (struct iphdr *)
655 skb_network_header(skb);
656 if (iph->protocol == IPPROTO_TCP) {
657 pull_size = ctx->eth_ip_hdr_size +
658 sizeof(struct tcphdr);
659
660 if (unlikely(!pskb_may_pull(skb,
661 pull_size))) {
662 goto err;
663 }
664 ctx->l4_hdr_size = ((struct tcphdr *)
665 skb_transport_header(skb))->doff * 4;
666 } else if (iph->protocol == IPPROTO_UDP) {
667 ctx->l4_hdr_size =
668 sizeof(struct udphdr);
669 } else {
670 ctx->l4_hdr_size = 0;
671 }
672 } else {
673 /* for simplicity, don't copy L4 headers */
674 ctx->l4_hdr_size = 0;
675 }
676 ctx->copy_size = ctx->eth_ip_hdr_size +
677 ctx->l4_hdr_size;
678 } else {
679 ctx->eth_ip_hdr_size = 0;
680 ctx->l4_hdr_size = 0;
681 /* copy as much as allowed */
682 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
683 , skb_headlen(skb));
684 }
685
686 /* make sure headers are accessible directly */
687 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
688 goto err;
689 }
690
691 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
692 tq->stats.oversized_hdr++;
693 ctx->copy_size = 0;
694 return 0;
695 }
696
697 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
698
699 memcpy(tdd->data, skb->data, ctx->copy_size);
700 dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
701 ctx->copy_size, tq->tx_ring.next2fill);
702 return 1;
703
704err:
705 return -1;
706}
707
708
709static void
710vmxnet3_prepare_tso(struct sk_buff *skb,
711 struct vmxnet3_tx_ctx *ctx)
712{
713 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
714 if (ctx->ipv4) {
715 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
716 iph->check = 0;
717 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
718 IPPROTO_TCP, 0);
719 } else {
720 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
721 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
722 IPPROTO_TCP, 0);
723 }
724}
725
726
727/*
728 * Transmits a pkt thru a given tq
729 * Returns:
730 * NETDEV_TX_OK: descriptors are setup successfully
731 * NETDEV_TX_OK: error occured, the pkt is dropped
732 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
733 *
734 * Side-effects:
735 * 1. tx ring may be changed
736 * 2. tq stats may be updated accordingly
737 * 3. shared->txNumDeferred may be updated
738 */
739
740static int
741vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
742 struct vmxnet3_adapter *adapter, struct net_device *netdev)
743{
744 int ret;
745 u32 count;
746 unsigned long flags;
747 struct vmxnet3_tx_ctx ctx;
748 union Vmxnet3_GenericDesc *gdesc;
749
750 /* conservatively estimate # of descriptors to use */
751 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
752 skb_shinfo(skb)->nr_frags + 1;
753
754 ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
755
756 ctx.mss = skb_shinfo(skb)->gso_size;
757 if (ctx.mss) {
758 if (skb_header_cloned(skb)) {
759 if (unlikely(pskb_expand_head(skb, 0, 0,
760 GFP_ATOMIC) != 0)) {
761 tq->stats.drop_tso++;
762 goto drop_pkt;
763 }
764 tq->stats.copy_skb_header++;
765 }
766 vmxnet3_prepare_tso(skb, &ctx);
767 } else {
768 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
769
770 /* non-tso pkts must not use more than
771 * VMXNET3_MAX_TXD_PER_PKT entries
772 */
773 if (skb_linearize(skb) != 0) {
774 tq->stats.drop_too_many_frags++;
775 goto drop_pkt;
776 }
777 tq->stats.linearized++;
778
779 /* recalculate the # of descriptors to use */
780 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
781 }
782 }
783
784 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
785 if (ret >= 0) {
786 BUG_ON(ret <= 0 && ctx.copy_size != 0);
787 /* hdrs parsed, check against other limits */
788 if (ctx.mss) {
789 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
790 VMXNET3_MAX_TX_BUF_SIZE)) {
791 goto hdr_too_big;
792 }
793 } else {
794 if (skb->ip_summed == CHECKSUM_PARTIAL) {
795 if (unlikely(ctx.eth_ip_hdr_size +
796 skb->csum_offset >
797 VMXNET3_MAX_CSUM_OFFSET)) {
798 goto hdr_too_big;
799 }
800 }
801 }
802 } else {
803 tq->stats.drop_hdr_inspect_err++;
804 goto drop_pkt;
805 }
806
807 spin_lock_irqsave(&tq->tx_lock, flags);
808
809 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
810 tq->stats.tx_ring_full++;
811 dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
812 " next2fill %u\n", adapter->netdev->name,
813 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
814
815 vmxnet3_tq_stop(tq, adapter);
816 spin_unlock_irqrestore(&tq->tx_lock, flags);
817 return NETDEV_TX_BUSY;
818 }
819
820 /* fill tx descs related to addr & len */
821 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
822
823 /* setup the EOP desc */
824 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
825
826 /* setup the SOP desc */
827 gdesc = ctx.sop_txd;
828 if (ctx.mss) {
829 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
830 gdesc->txd.om = VMXNET3_OM_TSO;
831 gdesc->txd.msscof = ctx.mss;
832 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
833 ctx.mss - 1) / ctx.mss;
834 } else {
835 if (skb->ip_summed == CHECKSUM_PARTIAL) {
836 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
837 gdesc->txd.om = VMXNET3_OM_CSUM;
838 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
839 skb->csum_offset;
840 } else {
841 gdesc->txd.om = 0;
842 gdesc->txd.msscof = 0;
843 }
844 tq->shared->txNumDeferred++;
845 }
846
847 if (vlan_tx_tag_present(skb)) {
848 gdesc->txd.ti = 1;
849 gdesc->txd.tci = vlan_tx_tag_get(skb);
850 }
851
852 wmb();
853
854 /* finally flips the GEN bit of the SOP desc */
855 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
856 dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
857 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
858 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
859 gdesc->dword[3]);
860
861 spin_unlock_irqrestore(&tq->tx_lock, flags);
862
863 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
864 tq->shared->txNumDeferred = 0;
865 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
866 tq->tx_ring.next2fill);
867 }
868 netdev->trans_start = jiffies;
869
870 return NETDEV_TX_OK;
871
872hdr_too_big:
873 tq->stats.drop_oversized_hdr++;
874drop_pkt:
875 tq->stats.drop_total++;
876 dev_kfree_skb(skb);
877 return NETDEV_TX_OK;
878}
879
880
881static netdev_tx_t
882vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
883{
884 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
885 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
886
887 return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
888}
889
890
891static void
892vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
893 struct sk_buff *skb,
894 union Vmxnet3_GenericDesc *gdesc)
895{
896 if (!gdesc->rcd.cnc && adapter->rxcsum) {
897 /* typical case: TCP/UDP over IP and both csums are correct */
898 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
899 VMXNET3_RCD_CSUM_OK) {
900 skb->ip_summed = CHECKSUM_UNNECESSARY;
901 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
902 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
903 BUG_ON(gdesc->rcd.frg);
904 } else {
905 if (gdesc->rcd.csum) {
906 skb->csum = htons(gdesc->rcd.csum);
907 skb->ip_summed = CHECKSUM_PARTIAL;
908 } else {
909 skb->ip_summed = CHECKSUM_NONE;
910 }
911 }
912 } else {
913 skb->ip_summed = CHECKSUM_NONE;
914 }
915}
916
917
918static void
919vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
920 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
921{
922 rq->stats.drop_err++;
923 if (!rcd->fcs)
924 rq->stats.drop_fcs++;
925
926 rq->stats.drop_total++;
927
928 /*
929 * We do not unmap and chain the rx buffer to the skb.
930 * We basically pretend this buffer is not used and will be recycled
931 * by vmxnet3_rq_alloc_rx_buf()
932 */
933
934 /*
935 * ctx->skb may be NULL if this is the first and the only one
936 * desc for the pkt
937 */
938 if (ctx->skb)
939 dev_kfree_skb_irq(ctx->skb);
940
941 ctx->skb = NULL;
942}
943
944
945static int
946vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
947 struct vmxnet3_adapter *adapter, int quota)
948{
949 static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
950 u32 num_rxd = 0;
951 struct Vmxnet3_RxCompDesc *rcd;
952 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
953
954 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
955 while (rcd->gen == rq->comp_ring.gen) {
956 struct vmxnet3_rx_buf_info *rbi;
957 struct sk_buff *skb;
958 int num_to_alloc;
959 struct Vmxnet3_RxDesc *rxd;
960 u32 idx, ring_idx;
961
962 if (num_rxd >= quota) {
963 /* we may stop even before we see the EOP desc of
964 * the current pkt
965 */
966 break;
967 }
968 num_rxd++;
969
970 idx = rcd->rxdIdx;
971 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
972
973 rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
974 rbi = rq->buf_info[ring_idx] + idx;
975
976 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
977
978 if (unlikely(rcd->eop && rcd->err)) {
979 vmxnet3_rx_error(rq, rcd, ctx, adapter);
980 goto rcd_done;
981 }
982
983 if (rcd->sop) { /* first buf of the pkt */
984 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
985 rcd->rqID != rq->qid);
986
987 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
988 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
989
990 if (unlikely(rcd->len == 0)) {
991 /* Pretend the rx buffer is skipped. */
992 BUG_ON(!(rcd->sop && rcd->eop));
993 dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
994 ring_idx, idx);
995 goto rcd_done;
996 }
997
998 ctx->skb = rbi->skb;
999 rbi->skb = NULL;
1000
1001 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1002 PCI_DMA_FROMDEVICE);
1003
1004 skb_put(ctx->skb, rcd->len);
1005 } else {
1006 BUG_ON(ctx->skb == NULL);
1007 /* non SOP buffer must be type 1 in most cases */
1008 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
1009 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1010
1011 if (rcd->len) {
1012 pci_unmap_page(adapter->pdev,
1013 rbi->dma_addr, rbi->len,
1014 PCI_DMA_FROMDEVICE);
1015
1016 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1017 rbi->page = NULL;
1018 }
1019 } else {
1020 /*
1021 * The only time a non-SOP buffer is type 0 is
1022 * when it's EOP and error flag is raised, which
1023 * has already been handled.
1024 */
1025 BUG_ON(true);
1026 }
1027 }
1028
1029 skb = ctx->skb;
1030 if (rcd->eop) {
1031 skb->len += skb->data_len;
1032 skb->truesize += skb->data_len;
1033
1034 vmxnet3_rx_csum(adapter, skb,
1035 (union Vmxnet3_GenericDesc *)rcd);
1036 skb->protocol = eth_type_trans(skb, adapter->netdev);
1037
1038 if (unlikely(adapter->vlan_grp && rcd->ts)) {
1039 vlan_hwaccel_receive_skb(skb,
1040 adapter->vlan_grp, rcd->tci);
1041 } else {
1042 netif_receive_skb(skb);
1043 }
1044
1045 adapter->netdev->last_rx = jiffies;
1046 ctx->skb = NULL;
1047 }
1048
1049rcd_done:
1050 /* device may skip some rx descs */
1051 rq->rx_ring[ring_idx].next2comp = idx;
1052 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
1053 rq->rx_ring[ring_idx].size);
1054
1055 /* refill rx buffers frequently to avoid starving the h/w */
1056 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
1057 ring_idx);
1058 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
1059 ring_idx, adapter))) {
1060 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
1061 adapter);
1062
1063 /* if needed, update the register */
1064 if (unlikely(rq->shared->updateRxProd)) {
1065 VMXNET3_WRITE_BAR0_REG(adapter,
1066 rxprod_reg[ring_idx] + rq->qid * 8,
1067 rq->rx_ring[ring_idx].next2fill);
1068 rq->uncommitted[ring_idx] = 0;
1069 }
1070 }
1071
1072 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1073 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
1074 }
1075
1076 return num_rxd;
1077}
1078
1079
1080static void
1081vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1082 struct vmxnet3_adapter *adapter)
1083{
1084 u32 i, ring_idx;
1085 struct Vmxnet3_RxDesc *rxd;
1086
1087 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1088 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1089 rxd = &rq->rx_ring[ring_idx].base[i].rxd;
1090
1091 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1092 rq->buf_info[ring_idx][i].skb) {
1093 pci_unmap_single(adapter->pdev, rxd->addr,
1094 rxd->len, PCI_DMA_FROMDEVICE);
1095 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1096 rq->buf_info[ring_idx][i].skb = NULL;
1097 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1098 rq->buf_info[ring_idx][i].page) {
1099 pci_unmap_page(adapter->pdev, rxd->addr,
1100 rxd->len, PCI_DMA_FROMDEVICE);
1101 put_page(rq->buf_info[ring_idx][i].page);
1102 rq->buf_info[ring_idx][i].page = NULL;
1103 }
1104 }
1105
1106 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1107 rq->rx_ring[ring_idx].next2fill =
1108 rq->rx_ring[ring_idx].next2comp = 0;
1109 rq->uncommitted[ring_idx] = 0;
1110 }
1111
1112 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1113 rq->comp_ring.next2proc = 0;
1114}
1115
1116
1117void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1118 struct vmxnet3_adapter *adapter)
1119{
1120 int i;
1121 int j;
1122
1123 /* all rx buffers must have already been freed */
1124 for (i = 0; i < 2; i++) {
1125 if (rq->buf_info[i]) {
1126 for (j = 0; j < rq->rx_ring[i].size; j++)
1127 BUG_ON(rq->buf_info[i][j].page != NULL);
1128 }
1129 }
1130
1131
1132 kfree(rq->buf_info[0]);
1133
1134 for (i = 0; i < 2; i++) {
1135 if (rq->rx_ring[i].base) {
1136 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1137 * sizeof(struct Vmxnet3_RxDesc),
1138 rq->rx_ring[i].base,
1139 rq->rx_ring[i].basePA);
1140 rq->rx_ring[i].base = NULL;
1141 }
1142 rq->buf_info[i] = NULL;
1143 }
1144
1145 if (rq->comp_ring.base) {
1146 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1147 sizeof(struct Vmxnet3_RxCompDesc),
1148 rq->comp_ring.base, rq->comp_ring.basePA);
1149 rq->comp_ring.base = NULL;
1150 }
1151}
1152
1153
1154static int
1155vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1156 struct vmxnet3_adapter *adapter)
1157{
1158 int i;
1159
1160 /* initialize buf_info */
1161 for (i = 0; i < rq->rx_ring[0].size; i++) {
1162
1163 /* 1st buf for a pkt is skbuff */
1164 if (i % adapter->rx_buf_per_pkt == 0) {
1165 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1166 rq->buf_info[0][i].len = adapter->skb_buf_size;
1167 } else { /* subsequent bufs for a pkt is frag */
1168 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1169 rq->buf_info[0][i].len = PAGE_SIZE;
1170 }
1171 }
1172 for (i = 0; i < rq->rx_ring[1].size; i++) {
1173 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1174 rq->buf_info[1][i].len = PAGE_SIZE;
1175 }
1176
1177 /* reset internal state and allocate buffers for both rings */
1178 for (i = 0; i < 2; i++) {
1179 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1180 rq->uncommitted[i] = 0;
1181
1182 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1183 sizeof(struct Vmxnet3_RxDesc));
1184 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1185 }
1186 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1187 adapter) == 0) {
1188 /* at least has 1 rx buffer for the 1st ring */
1189 return -ENOMEM;
1190 }
1191 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1192
1193 /* reset the comp ring */
1194 rq->comp_ring.next2proc = 0;
1195 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1196 sizeof(struct Vmxnet3_RxCompDesc));
1197 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1198
1199 /* reset rxctx */
1200 rq->rx_ctx.skb = NULL;
1201
1202 /* stats are not reset */
1203 return 0;
1204}
1205
1206
1207static int
1208vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1209{
1210 int i;
1211 size_t sz;
1212 struct vmxnet3_rx_buf_info *bi;
1213
1214 for (i = 0; i < 2; i++) {
1215
1216 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1217 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1218 &rq->rx_ring[i].basePA);
1219 if (!rq->rx_ring[i].base) {
1220 printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1221 adapter->netdev->name, i);
1222 goto err;
1223 }
1224 }
1225
1226 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1227 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1228 &rq->comp_ring.basePA);
1229 if (!rq->comp_ring.base) {
1230 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1231 adapter->netdev->name);
1232 goto err;
1233 }
1234
1235 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1236 rq->rx_ring[1].size);
1237 bi = kmalloc(sz, GFP_KERNEL);
1238 if (!bi) {
1239 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1240 adapter->netdev->name);
1241 goto err;
1242 }
1243 memset(bi, 0, sz);
1244 rq->buf_info[0] = bi;
1245 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1246
1247 return 0;
1248
1249err:
1250 vmxnet3_rq_destroy(rq, adapter);
1251 return -ENOMEM;
1252}
1253
1254
1255static int
1256vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1257{
1258 if (unlikely(adapter->shared->ecr))
1259 vmxnet3_process_events(adapter);
1260
1261 vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
1262 return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
1263}
1264
1265
1266static int
1267vmxnet3_poll(struct napi_struct *napi, int budget)
1268{
1269 struct vmxnet3_adapter *adapter = container_of(napi,
1270 struct vmxnet3_adapter, napi);
1271 int rxd_done;
1272
1273 rxd_done = vmxnet3_do_poll(adapter, budget);
1274
1275 if (rxd_done < budget) {
1276 napi_complete(napi);
1277 vmxnet3_enable_intr(adapter, 0);
1278 }
1279 return rxd_done;
1280}
1281
1282
1283/* Interrupt handler for vmxnet3 */
1284static irqreturn_t
1285vmxnet3_intr(int irq, void *dev_id)
1286{
1287 struct net_device *dev = dev_id;
1288 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1289
1290 if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
1291 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1292 if (unlikely(icr == 0))
1293 /* not ours */
1294 return IRQ_NONE;
1295 }
1296
1297
1298 /* disable intr if needed */
1299 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1300 vmxnet3_disable_intr(adapter, 0);
1301
1302 napi_schedule(&adapter->napi);
1303
1304 return IRQ_HANDLED;
1305}
1306
1307#ifdef CONFIG_NET_POLL_CONTROLLER
1308
1309
1310/* netpoll callback. */
1311static void
1312vmxnet3_netpoll(struct net_device *netdev)
1313{
1314 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1315 int irq;
1316
1317 if (adapter->intr.type == VMXNET3_IT_MSIX)
1318 irq = adapter->intr.msix_entries[0].vector;
1319 else
1320 irq = adapter->pdev->irq;
1321
1322 disable_irq(irq);
1323 vmxnet3_intr(irq, netdev);
1324 enable_irq(irq);
1325}
1326#endif
1327
1328static int
1329vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1330{
1331 int err;
1332
1333 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1334 /* we only use 1 MSI-X vector */
1335 err = request_irq(adapter->intr.msix_entries[0].vector,
1336 vmxnet3_intr, 0, adapter->netdev->name,
1337 adapter->netdev);
1338 } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1339 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1340 adapter->netdev->name, adapter->netdev);
1341 } else {
1342 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1343 IRQF_SHARED, adapter->netdev->name,
1344 adapter->netdev);
1345 }
1346
1347 if (err)
1348 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1349 ":%d\n", adapter->netdev->name, adapter->intr.type, err);
1350
1351
1352 if (!err) {
1353 int i;
1354 /* init our intr settings */
1355 for (i = 0; i < adapter->intr.num_intrs; i++)
1356 adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1357
1358 /* next setup intr index for all intr sources */
1359 adapter->tx_queue.comp_ring.intr_idx = 0;
1360 adapter->rx_queue.comp_ring.intr_idx = 0;
1361 adapter->intr.event_intr_idx = 0;
1362
1363 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1364 "allocated\n", adapter->netdev->name, adapter->intr.type,
1365 adapter->intr.mask_mode, adapter->intr.num_intrs);
1366 }
1367
1368 return err;
1369}
1370
1371
1372static void
1373vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1374{
1375 BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
1376 adapter->intr.num_intrs <= 0);
1377
1378 switch (adapter->intr.type) {
1379 case VMXNET3_IT_MSIX:
1380 {
1381 int i;
1382
1383 for (i = 0; i < adapter->intr.num_intrs; i++)
1384 free_irq(adapter->intr.msix_entries[i].vector,
1385 adapter->netdev);
1386 break;
1387 }
1388 case VMXNET3_IT_MSI:
1389 free_irq(adapter->pdev->irq, adapter->netdev);
1390 break;
1391 case VMXNET3_IT_INTX:
1392 free_irq(adapter->pdev->irq, adapter->netdev);
1393 break;
1394 default:
1395 BUG_ON(true);
1396 }
1397}
1398
1399
1400static void
1401vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1402{
1403 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1404 struct Vmxnet3_DriverShared *shared = adapter->shared;
1405 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1406
1407 if (grp) {
1408 /* add vlan rx stripping. */
1409 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1410 int i;
1411 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1412 adapter->vlan_grp = grp;
1413
1414 /* update FEATURES to device */
1415 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1416 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1417 VMXNET3_CMD_UPDATE_FEATURE);
1418 /*
1419 * Clear entire vfTable; then enable untagged pkts.
1420 * Note: setting one entry in vfTable to non-zero turns
1421 * on VLAN rx filtering.
1422 */
1423 for (i = 0; i < VMXNET3_VFT_SIZE; i++)
1424 vfTable[i] = 0;
1425
1426 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1427 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1428 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1429 } else {
1430 printk(KERN_ERR "%s: vlan_rx_register when device has "
1431 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
1432 }
1433 } else {
1434 /* remove vlan rx stripping. */
1435 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1436 adapter->vlan_grp = NULL;
1437
1438 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
1439 int i;
1440
1441 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
1442 /* clear entire vfTable; this also disables
1443 * VLAN rx filtering
1444 */
1445 vfTable[i] = 0;
1446 }
1447 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1448 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1449
1450 /* update FEATURES to device */
1451 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1452 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1453 VMXNET3_CMD_UPDATE_FEATURE);
1454 }
1455 }
1456}
1457
1458
1459static void
1460vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1461{
1462 if (adapter->vlan_grp) {
1463 u16 vid;
1464 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1465 bool activeVlan = false;
1466
1467 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1468 if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1469 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1470 activeVlan = true;
1471 }
1472 }
1473 if (activeVlan) {
1474 /* continue to allow untagged pkts */
1475 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1476 }
1477 }
1478}
1479
1480
1481static void
1482vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1483{
1484 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1485 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1486
1487 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1488 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1489 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1490}
1491
1492
1493static void
1494vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1495{
1496 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1497 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1498
1499 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1500 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1501 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1502}
1503
1504
1505static u8 *
1506vmxnet3_copy_mc(struct net_device *netdev)
1507{
1508 u8 *buf = NULL;
1509 u32 sz = netdev->mc_count * ETH_ALEN;
1510
1511 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1512 if (sz <= 0xffff) {
1513 /* We may be called with BH disabled */
1514 buf = kmalloc(sz, GFP_ATOMIC);
1515 if (buf) {
1516 int i;
1517 struct dev_mc_list *mc = netdev->mc_list;
1518
1519 for (i = 0; i < netdev->mc_count; i++) {
1520 BUG_ON(!mc);
1521 memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
1522 ETH_ALEN);
1523 mc = mc->next;
1524 }
1525 }
1526 }
1527 return buf;
1528}
1529
1530
1531static void
1532vmxnet3_set_mc(struct net_device *netdev)
1533{
1534 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1535 struct Vmxnet3_RxFilterConf *rxConf =
1536 &adapter->shared->devRead.rxFilterConf;
1537 u8 *new_table = NULL;
1538 u32 new_mode = VMXNET3_RXM_UCAST;
1539
1540 if (netdev->flags & IFF_PROMISC)
1541 new_mode |= VMXNET3_RXM_PROMISC;
1542
1543 if (netdev->flags & IFF_BROADCAST)
1544 new_mode |= VMXNET3_RXM_BCAST;
1545
1546 if (netdev->flags & IFF_ALLMULTI)
1547 new_mode |= VMXNET3_RXM_ALL_MULTI;
1548 else
1549 if (netdev->mc_count > 0) {
1550 new_table = vmxnet3_copy_mc(netdev);
1551 if (new_table) {
1552 new_mode |= VMXNET3_RXM_MCAST;
1553 rxConf->mfTableLen = netdev->mc_count *
1554 ETH_ALEN;
1555 rxConf->mfTablePA = virt_to_phys(new_table);
1556 } else {
1557 printk(KERN_INFO "%s: failed to copy mcast list"
1558 ", setting ALL_MULTI\n", netdev->name);
1559 new_mode |= VMXNET3_RXM_ALL_MULTI;
1560 }
1561 }
1562
1563
1564 if (!(new_mode & VMXNET3_RXM_MCAST)) {
1565 rxConf->mfTableLen = 0;
1566 rxConf->mfTablePA = 0;
1567 }
1568
1569 if (new_mode != rxConf->rxMode) {
1570 rxConf->rxMode = new_mode;
1571 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1572 VMXNET3_CMD_UPDATE_RX_MODE);
1573 }
1574
1575 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1576 VMXNET3_CMD_UPDATE_MAC_FILTERS);
1577
1578 kfree(new_table);
1579}
1580
1581
1582/*
1583 * Set up driver_shared based on settings in adapter.
1584 */
1585
1586static void
1587vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1588{
1589 struct Vmxnet3_DriverShared *shared = adapter->shared;
1590 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1591 struct Vmxnet3_TxQueueConf *tqc;
1592 struct Vmxnet3_RxQueueConf *rqc;
1593 int i;
1594
1595 memset(shared, 0, sizeof(*shared));
1596
1597 /* driver settings */
1598 shared->magic = VMXNET3_REV1_MAGIC;
1599 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
1600 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1601 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1602 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1603 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
1604 devRead->misc.driverInfo.uptVerSpt = 1;
1605
1606 devRead->misc.ddPA = virt_to_phys(adapter);
1607 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
1608
1609 /* set up feature flags */
1610 if (adapter->rxcsum)
1611 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
1612
1613 if (adapter->lro) {
1614 devRead->misc.uptFeatures |= UPT1_F_LRO;
1615 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
1616 }
1617 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1618 && adapter->vlan_grp) {
1619 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1620 }
1621
1622 devRead->misc.mtu = adapter->netdev->mtu;
1623 devRead->misc.queueDescPA = adapter->queue_desc_pa;
1624 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
1625 sizeof(struct Vmxnet3_RxQueueDesc);
1626
1627 /* tx queue settings */
1628 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1629
1630 devRead->misc.numTxQueues = 1;
1631 tqc = &adapter->tqd_start->conf;
1632 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA;
1633 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
1634 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
1635 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info);
1636 tqc->txRingSize = adapter->tx_queue.tx_ring.size;
1637 tqc->dataRingSize = adapter->tx_queue.data_ring.size;
1638 tqc->compRingSize = adapter->tx_queue.comp_ring.size;
1639 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) *
1640 tqc->txRingSize;
1641 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1642
1643 /* rx queue settings */
1644 devRead->misc.numRxQueues = 1;
1645 rqc = &adapter->rqd_start->conf;
1646 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
1647 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
1648 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA;
1649 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info);
1650 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size;
1651 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size;
1652 rqc->compRingSize = adapter->rx_queue.comp_ring.size;
1653 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) *
1654 (rqc->rxRingSize[0] + rqc->rxRingSize[1]);
1655 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1656
1657 /* intr settings */
1658 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
1659 VMXNET3_IMM_AUTO;
1660 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
1661 for (i = 0; i < adapter->intr.num_intrs; i++)
1662 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
1663
1664 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
1665
1666 /* rx filter settings */
1667 devRead->rxFilterConf.rxMode = 0;
1668 vmxnet3_restore_vlan(adapter);
1669 /* the rest are already zeroed */
1670}
1671
1672
1673int
1674vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1675{
1676 int err;
1677 u32 ret;
1678
1679 dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1680 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1681 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1682 adapter->rx_queue.rx_ring[0].size,
1683 adapter->rx_queue.rx_ring[1].size);
1684
1685 vmxnet3_tq_init(&adapter->tx_queue, adapter);
1686 err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
1687 if (err) {
1688 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1689 adapter->netdev->name, err);
1690 goto rq_err;
1691 }
1692
1693 err = vmxnet3_request_irqs(adapter);
1694 if (err) {
1695 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
1696 adapter->netdev->name, err);
1697 goto irq_err;
1698 }
1699
1700 vmxnet3_setup_driver_shared(adapter);
1701
1702 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
1703 VMXNET3_GET_ADDR_LO(adapter->shared_pa));
1704 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
1705 VMXNET3_GET_ADDR_HI(adapter->shared_pa));
1706
1707 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1708 VMXNET3_CMD_ACTIVATE_DEV);
1709 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
1710
1711 if (ret != 0) {
1712 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
1713 adapter->netdev->name, ret);
1714 err = -EINVAL;
1715 goto activate_err;
1716 }
1717 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
1718 adapter->rx_queue.rx_ring[0].next2fill);
1719 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
1720 adapter->rx_queue.rx_ring[1].next2fill);
1721
1722 /* Apply the rx filter settins last. */
1723 vmxnet3_set_mc(adapter->netdev);
1724
1725 /*
1726 * Check link state when first activating device. It will start the
1727 * tx queue if the link is up.
1728 */
1729 vmxnet3_check_link(adapter);
1730
1731 napi_enable(&adapter->napi);
1732 vmxnet3_enable_all_intrs(adapter);
1733 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1734 return 0;
1735
1736activate_err:
1737 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
1738 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
1739 vmxnet3_free_irqs(adapter);
1740irq_err:
1741rq_err:
1742 /* free up buffers we allocated */
1743 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1744 return err;
1745}
1746
1747
1748void
1749vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1750{
1751 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1752}
1753
1754
1755int
1756vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1757{
1758 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1759 return 0;
1760
1761
1762 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1763 VMXNET3_CMD_QUIESCE_DEV);
1764 vmxnet3_disable_all_intrs(adapter);
1765
1766 napi_disable(&adapter->napi);
1767 netif_tx_disable(adapter->netdev);
1768 adapter->link_speed = 0;
1769 netif_carrier_off(adapter->netdev);
1770
1771 vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
1772 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1773 vmxnet3_free_irqs(adapter);
1774 return 0;
1775}
1776
1777
1778static void
1779vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
1780{
1781 u32 tmp;
1782
1783 tmp = *(u32 *)mac;
1784 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
1785
1786 tmp = (mac[5] << 8) | mac[4];
1787 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
1788}
1789
1790
1791static int
1792vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
1793{
1794 struct sockaddr *addr = p;
1795 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1796
1797 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1798 vmxnet3_write_mac_addr(adapter, addr->sa_data);
1799
1800 return 0;
1801}
1802
1803
1804/* ==================== initialization and cleanup routines ============ */
1805
1806static int
1807vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
1808{
1809 int err;
1810 unsigned long mmio_start, mmio_len;
1811 struct pci_dev *pdev = adapter->pdev;
1812
1813 err = pci_enable_device(pdev);
1814 if (err) {
1815 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
1816 pci_name(pdev), err);
1817 return err;
1818 }
1819
1820 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1821 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
1822 printk(KERN_ERR "pci_set_consistent_dma_mask failed "
1823 "for adapter %s\n", pci_name(pdev));
1824 err = -EIO;
1825 goto err_set_mask;
1826 }
1827 *dma64 = true;
1828 } else {
1829 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
1830 printk(KERN_ERR "pci_set_dma_mask failed for adapter "
1831 "%s\n", pci_name(pdev));
1832 err = -EIO;
1833 goto err_set_mask;
1834 }
1835 *dma64 = false;
1836 }
1837
1838 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
1839 vmxnet3_driver_name);
1840 if (err) {
1841 printk(KERN_ERR "Failed to request region for adapter %s: "
1842 "error %d\n", pci_name(pdev), err);
1843 goto err_set_mask;
1844 }
1845
1846 pci_set_master(pdev);
1847
1848 mmio_start = pci_resource_start(pdev, 0);
1849 mmio_len = pci_resource_len(pdev, 0);
1850 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
1851 if (!adapter->hw_addr0) {
1852 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
1853 pci_name(pdev));
1854 err = -EIO;
1855 goto err_ioremap;
1856 }
1857
1858 mmio_start = pci_resource_start(pdev, 1);
1859 mmio_len = pci_resource_len(pdev, 1);
1860 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
1861 if (!adapter->hw_addr1) {
1862 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
1863 pci_name(pdev));
1864 err = -EIO;
1865 goto err_bar1;
1866 }
1867 return 0;
1868
1869err_bar1:
1870 iounmap(adapter->hw_addr0);
1871err_ioremap:
1872 pci_release_selected_regions(pdev, (1 << 2) - 1);
1873err_set_mask:
1874 pci_disable_device(pdev);
1875 return err;
1876}
1877
1878
1879static void
1880vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
1881{
1882 BUG_ON(!adapter->pdev);
1883
1884 iounmap(adapter->hw_addr0);
1885 iounmap(adapter->hw_addr1);
1886 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
1887 pci_disable_device(adapter->pdev);
1888}
1889
1890
1891static void
1892vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
1893{
1894 size_t sz;
1895
1896 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
1897 VMXNET3_MAX_ETH_HDR_SIZE) {
1898 adapter->skb_buf_size = adapter->netdev->mtu +
1899 VMXNET3_MAX_ETH_HDR_SIZE;
1900 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
1901 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
1902
1903 adapter->rx_buf_per_pkt = 1;
1904 } else {
1905 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
1906 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
1907 VMXNET3_MAX_ETH_HDR_SIZE;
1908 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
1909 }
1910
1911 /*
1912 * for simplicity, force the ring0 size to be a multiple of
1913 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
1914 */
1915 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
1916 adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
1917 sz - 1) / sz * sz;
1918 adapter->rx_queue.rx_ring[0].size = min_t(u32,
1919 adapter->rx_queue.rx_ring[0].size,
1920 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
1921}
1922
1923
1924int
1925vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
1926 u32 rx_ring_size, u32 rx_ring2_size)
1927{
1928 int err;
1929
1930 adapter->tx_queue.tx_ring.size = tx_ring_size;
1931 adapter->tx_queue.data_ring.size = tx_ring_size;
1932 adapter->tx_queue.comp_ring.size = tx_ring_size;
1933 adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
1934 adapter->tx_queue.stopped = true;
1935 err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
1936 if (err)
1937 return err;
1938
1939 adapter->rx_queue.rx_ring[0].size = rx_ring_size;
1940 adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
1941 vmxnet3_adjust_rx_ring_size(adapter);
1942 adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
1943 adapter->rx_queue.rx_ring[1].size;
1944 adapter->rx_queue.qid = 0;
1945 adapter->rx_queue.qid2 = 1;
1946 adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
1947 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
1948 if (err)
1949 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1950
1951 return err;
1952}
1953
1954static int
1955vmxnet3_open(struct net_device *netdev)
1956{
1957 struct vmxnet3_adapter *adapter;
1958 int err;
1959
1960 adapter = netdev_priv(netdev);
1961
1962 spin_lock_init(&adapter->tx_queue.tx_lock);
1963
1964 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
1965 VMXNET3_DEF_RX_RING_SIZE,
1966 VMXNET3_DEF_RX_RING_SIZE);
1967 if (err)
1968 goto queue_err;
1969
1970 err = vmxnet3_activate_dev(adapter);
1971 if (err)
1972 goto activate_err;
1973
1974 return 0;
1975
1976activate_err:
1977 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
1978 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1979queue_err:
1980 return err;
1981}
1982
1983
1984static int
1985vmxnet3_close(struct net_device *netdev)
1986{
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1988
1989 /*
1990 * Reset_work may be in the middle of resetting the device, wait for its
1991 * completion.
1992 */
1993 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
1994 msleep(1);
1995
1996 vmxnet3_quiesce_dev(adapter);
1997
1998 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
1999 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
2000
2001 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2002
2003
2004 return 0;
2005}
2006
2007
2008void
2009vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2010{
2011 /*
2012 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2013 * vmxnet3_close() will deadlock.
2014 */
2015 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2016
2017 /* we need to enable NAPI, otherwise dev_close will deadlock */
2018 napi_enable(&adapter->napi);
2019 dev_close(adapter->netdev);
2020}
2021
2022
2023static int
2024vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2025{
2026 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2027 int err = 0;
2028
2029 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2030 return -EINVAL;
2031
2032 if (new_mtu > 1500 && !adapter->jumbo_frame)
2033 return -EINVAL;
2034
2035 netdev->mtu = new_mtu;
2036
2037 /*
2038 * Reset_work may be in the middle of resetting the device, wait for its
2039 * completion.
2040 */
2041 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2042 msleep(1);
2043
2044 if (netif_running(netdev)) {
2045 vmxnet3_quiesce_dev(adapter);
2046 vmxnet3_reset_dev(adapter);
2047
2048 /* we need to re-create the rx queue based on the new mtu */
2049 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2050 vmxnet3_adjust_rx_ring_size(adapter);
2051 adapter->rx_queue.comp_ring.size =
2052 adapter->rx_queue.rx_ring[0].size +
2053 adapter->rx_queue.rx_ring[1].size;
2054 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2055 if (err) {
2056 printk(KERN_ERR "%s: failed to re-create rx queue,"
2057 " error %d. Closing it.\n", netdev->name, err);
2058 goto out;
2059 }
2060
2061 err = vmxnet3_activate_dev(adapter);
2062 if (err) {
2063 printk(KERN_ERR "%s: failed to re-activate, error %d. "
2064 "Closing it\n", netdev->name, err);
2065 goto out;
2066 }
2067 }
2068
2069out:
2070 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2071 if (err)
2072 vmxnet3_force_close(adapter);
2073
2074 return err;
2075}
2076
2077
2078static void
2079vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2080{
2081 struct net_device *netdev = adapter->netdev;
2082
2083 netdev->features = NETIF_F_SG |
2084 NETIF_F_HW_CSUM |
2085 NETIF_F_HW_VLAN_TX |
2086 NETIF_F_HW_VLAN_RX |
2087 NETIF_F_HW_VLAN_FILTER |
2088 NETIF_F_TSO |
2089 NETIF_F_TSO6 |
2090 NETIF_F_LRO;
2091
2092 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2093
2094 adapter->rxcsum = true;
2095 adapter->jumbo_frame = true;
2096 adapter->lro = true;
2097
2098 if (dma64) {
2099 netdev->features |= NETIF_F_HIGHDMA;
2100 printk(" highDMA");
2101 }
2102
2103 netdev->vlan_features = netdev->features;
2104 printk("\n");
2105}
2106
2107
2108static void
2109vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2110{
2111 u32 tmp;
2112
2113 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2114 *(u32 *)mac = tmp;
2115
2116 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2117 mac[4] = tmp & 0xff;
2118 mac[5] = (tmp >> 8) & 0xff;
2119}
2120
2121
2122static void
2123vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2124{
2125 u32 cfg;
2126
2127 /* intr settings */
2128 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2129 VMXNET3_CMD_GET_CONF_INTR);
2130 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2131 adapter->intr.type = cfg & 0x3;
2132 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2133
2134 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2135 int err;
2136
2137 adapter->intr.msix_entries[0].entry = 0;
2138 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2139 VMXNET3_LINUX_MAX_MSIX_VECT);
2140 if (!err) {
2141 adapter->intr.num_intrs = 1;
2142 adapter->intr.type = VMXNET3_IT_MSIX;
2143 return;
2144 }
2145
2146 err = pci_enable_msi(adapter->pdev);
2147 if (!err) {
2148 adapter->intr.num_intrs = 1;
2149 adapter->intr.type = VMXNET3_IT_MSI;
2150 return;
2151 }
2152 }
2153
2154 adapter->intr.type = VMXNET3_IT_INTX;
2155
2156 /* INT-X related setting */
2157 adapter->intr.num_intrs = 1;
2158}
2159
2160
2161static void
2162vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2163{
2164 if (adapter->intr.type == VMXNET3_IT_MSIX)
2165 pci_disable_msix(adapter->pdev);
2166 else if (adapter->intr.type == VMXNET3_IT_MSI)
2167 pci_disable_msi(adapter->pdev);
2168 else
2169 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2170}
2171
2172
2173static void
2174vmxnet3_tx_timeout(struct net_device *netdev)
2175{
2176 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2177 adapter->tx_timeout_count++;
2178
2179 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2180 schedule_work(&adapter->work);
2181}
2182
2183
2184static void
2185vmxnet3_reset_work(struct work_struct *data)
2186{
2187 struct vmxnet3_adapter *adapter;
2188
2189 adapter = container_of(data, struct vmxnet3_adapter, work);
2190
2191 /* if another thread is resetting the device, no need to proceed */
2192 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2193 return;
2194
2195 /* if the device is closed, we must leave it alone */
2196 if (netif_running(adapter->netdev)) {
2197 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2198 vmxnet3_quiesce_dev(adapter);
2199 vmxnet3_reset_dev(adapter);
2200 vmxnet3_activate_dev(adapter);
2201 } else {
2202 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2203 }
2204
2205 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2206}
2207
2208
2209static int __devinit
2210vmxnet3_probe_device(struct pci_dev *pdev,
2211 const struct pci_device_id *id)
2212{
2213 static const struct net_device_ops vmxnet3_netdev_ops = {
2214 .ndo_open = vmxnet3_open,
2215 .ndo_stop = vmxnet3_close,
2216 .ndo_start_xmit = vmxnet3_xmit_frame,
2217 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2218 .ndo_change_mtu = vmxnet3_change_mtu,
2219 .ndo_get_stats = vmxnet3_get_stats,
2220 .ndo_tx_timeout = vmxnet3_tx_timeout,
2221 .ndo_set_multicast_list = vmxnet3_set_mc,
2222 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
2223 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2224 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2225#ifdef CONFIG_NET_POLL_CONTROLLER
2226 .ndo_poll_controller = vmxnet3_netpoll,
2227#endif
2228 };
2229 int err;
2230 bool dma64 = false; /* stupid gcc */
2231 u32 ver;
2232 struct net_device *netdev;
2233 struct vmxnet3_adapter *adapter;
2234 u8 mac[ETH_ALEN];
2235
2236 netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2237 if (!netdev) {
2238 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2239 "%s\n", pci_name(pdev));
2240 return -ENOMEM;
2241 }
2242
2243 pci_set_drvdata(pdev, netdev);
2244 adapter = netdev_priv(netdev);
2245 adapter->netdev = netdev;
2246 adapter->pdev = pdev;
2247
2248 adapter->shared = pci_alloc_consistent(adapter->pdev,
2249 sizeof(struct Vmxnet3_DriverShared),
2250 &adapter->shared_pa);
2251 if (!adapter->shared) {
2252 printk(KERN_ERR "Failed to allocate memory for %s\n",
2253 pci_name(pdev));
2254 err = -ENOMEM;
2255 goto err_alloc_shared;
2256 }
2257
2258 adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
2259 sizeof(struct Vmxnet3_TxQueueDesc) +
2260 sizeof(struct Vmxnet3_RxQueueDesc),
2261 &adapter->queue_desc_pa);
2262
2263 if (!adapter->tqd_start) {
2264 printk(KERN_ERR "Failed to allocate memory for %s\n",
2265 pci_name(pdev));
2266 err = -ENOMEM;
2267 goto err_alloc_queue_desc;
2268 }
2269 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
2270 + 1);
2271
2272 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2273 if (adapter->pm_conf == NULL) {
2274 printk(KERN_ERR "Failed to allocate memory for %s\n",
2275 pci_name(pdev));
2276 err = -ENOMEM;
2277 goto err_alloc_pm;
2278 }
2279
2280 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2281 if (err < 0)
2282 goto err_alloc_pci;
2283
2284 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2285 if (ver & 1) {
2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2287 } else {
2288 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2289 " %s\n", ver, pci_name(pdev));
2290 err = -EBUSY;
2291 goto err_ver;
2292 }
2293
2294 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2295 if (ver & 1) {
2296 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2297 } else {
2298 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2299 "adapter %s\n", ver, pci_name(pdev));
2300 err = -EBUSY;
2301 goto err_ver;
2302 }
2303
2304 vmxnet3_declare_features(adapter, dma64);
2305
2306 adapter->dev_number = atomic_read(&devices_found);
2307 vmxnet3_alloc_intr_resources(adapter);
2308
2309 vmxnet3_read_mac_addr(adapter, mac);
2310 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2311
2312 netdev->netdev_ops = &vmxnet3_netdev_ops;
2313 netdev->watchdog_timeo = 5 * HZ;
2314 vmxnet3_set_ethtool_ops(netdev);
2315
2316 INIT_WORK(&adapter->work, vmxnet3_reset_work);
2317
2318 netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
2319 SET_NETDEV_DEV(netdev, &pdev->dev);
2320 err = register_netdev(netdev);
2321
2322 if (err) {
2323 printk(KERN_ERR "Failed to register adapter %s\n",
2324 pci_name(pdev));
2325 goto err_register;
2326 }
2327
2328 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2329 atomic_inc(&devices_found);
2330 return 0;
2331
2332err_register:
2333 vmxnet3_free_intr_resources(adapter);
2334err_ver:
2335 vmxnet3_free_pci_resources(adapter);
2336err_alloc_pci:
2337 kfree(adapter->pm_conf);
2338err_alloc_pm:
2339 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2340 sizeof(struct Vmxnet3_RxQueueDesc),
2341 adapter->tqd_start, adapter->queue_desc_pa);
2342err_alloc_queue_desc:
2343 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2344 adapter->shared, adapter->shared_pa);
2345err_alloc_shared:
2346 pci_set_drvdata(pdev, NULL);
2347 free_netdev(netdev);
2348 return err;
2349}
2350
2351
2352static void __devexit
2353vmxnet3_remove_device(struct pci_dev *pdev)
2354{
2355 struct net_device *netdev = pci_get_drvdata(pdev);
2356 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2357
2358 flush_scheduled_work();
2359
2360 unregister_netdev(netdev);
2361
2362 vmxnet3_free_intr_resources(adapter);
2363 vmxnet3_free_pci_resources(adapter);
2364 kfree(adapter->pm_conf);
2365 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2366 sizeof(struct Vmxnet3_RxQueueDesc),
2367 adapter->tqd_start, adapter->queue_desc_pa);
2368 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2369 adapter->shared, adapter->shared_pa);
2370 free_netdev(netdev);
2371}
2372
2373
2374#ifdef CONFIG_PM
2375
2376static int
2377vmxnet3_suspend(struct device *device)
2378{
2379 struct pci_dev *pdev = to_pci_dev(device);
2380 struct net_device *netdev = pci_get_drvdata(pdev);
2381 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2382 struct Vmxnet3_PMConf *pmConf;
2383 struct ethhdr *ehdr;
2384 struct arphdr *ahdr;
2385 u8 *arpreq;
2386 struct in_device *in_dev;
2387 struct in_ifaddr *ifa;
2388 int i = 0;
2389
2390 if (!netif_running(netdev))
2391 return 0;
2392
2393 vmxnet3_disable_all_intrs(adapter);
2394 vmxnet3_free_irqs(adapter);
2395 vmxnet3_free_intr_resources(adapter);
2396
2397 netif_device_detach(netdev);
2398 netif_stop_queue(netdev);
2399
2400 /* Create wake-up filters. */
2401 pmConf = adapter->pm_conf;
2402 memset(pmConf, 0, sizeof(*pmConf));
2403
2404 if (adapter->wol & WAKE_UCAST) {
2405 pmConf->filters[i].patternSize = ETH_ALEN;
2406 pmConf->filters[i].maskSize = 1;
2407 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2408 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2409
2410 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2411 i++;
2412 }
2413
2414 if (adapter->wol & WAKE_ARP) {
2415 in_dev = in_dev_get(netdev);
2416 if (!in_dev)
2417 goto skip_arp;
2418
2419 ifa = (struct in_ifaddr *)in_dev->ifa_list;
2420 if (!ifa)
2421 goto skip_arp;
2422
2423 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
2424 sizeof(struct arphdr) + /* ARP header */
2425 2 * ETH_ALEN + /* 2 Ethernet addresses*/
2426 2 * sizeof(u32); /*2 IPv4 addresses */
2427 pmConf->filters[i].maskSize =
2428 (pmConf->filters[i].patternSize - 1) / 8 + 1;
2429
2430 /* ETH_P_ARP in Ethernet header. */
2431 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
2432 ehdr->h_proto = htons(ETH_P_ARP);
2433
2434 /* ARPOP_REQUEST in ARP header. */
2435 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
2436 ahdr->ar_op = htons(ARPOP_REQUEST);
2437 arpreq = (u8 *)(ahdr + 1);
2438
2439 /* The Unicast IPv4 address in 'tip' field. */
2440 arpreq += 2 * ETH_ALEN + sizeof(u32);
2441 *(u32 *)arpreq = ifa->ifa_address;
2442
2443 /* The mask for the relevant bits. */
2444 pmConf->filters[i].mask[0] = 0x00;
2445 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
2446 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
2447 pmConf->filters[i].mask[3] = 0x00;
2448 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
2449 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2450 in_dev_put(in_dev);
2451
2452 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2453 i++;
2454 }
2455
2456skip_arp:
2457 if (adapter->wol & WAKE_MAGIC)
2458 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
2459
2460 pmConf->numFilters = i;
2461
2462 adapter->shared->devRead.pmConfDesc.confVer = 1;
2463 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2464 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2465
2466 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2467 VMXNET3_CMD_UPDATE_PMCFG);
2468
2469 pci_save_state(pdev);
2470 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
2471 adapter->wol);
2472 pci_disable_device(pdev);
2473 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
2474
2475 return 0;
2476}
2477
2478
2479static int
2480vmxnet3_resume(struct device *device)
2481{
2482 int err;
2483 struct pci_dev *pdev = to_pci_dev(device);
2484 struct net_device *netdev = pci_get_drvdata(pdev);
2485 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2486 struct Vmxnet3_PMConf *pmConf;
2487
2488 if (!netif_running(netdev))
2489 return 0;
2490
2491 /* Destroy wake-up filters. */
2492 pmConf = adapter->pm_conf;
2493 memset(pmConf, 0, sizeof(*pmConf));
2494
2495 adapter->shared->devRead.pmConfDesc.confVer = 1;
2496 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2497 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2498
2499 netif_device_attach(netdev);
2500 pci_set_power_state(pdev, PCI_D0);
2501 pci_restore_state(pdev);
2502 err = pci_enable_device_mem(pdev);
2503 if (err != 0)
2504 return err;
2505
2506 pci_enable_wake(pdev, PCI_D0, 0);
2507
2508 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2509 VMXNET3_CMD_UPDATE_PMCFG);
2510 vmxnet3_alloc_intr_resources(adapter);
2511 vmxnet3_request_irqs(adapter);
2512 vmxnet3_enable_all_intrs(adapter);
2513
2514 return 0;
2515}
2516
2517static struct dev_pm_ops vmxnet3_pm_ops = {
2518 .suspend = vmxnet3_suspend,
2519 .resume = vmxnet3_resume,
2520};
2521#endif
2522
2523static struct pci_driver vmxnet3_driver = {
2524 .name = vmxnet3_driver_name,
2525 .id_table = vmxnet3_pciid_table,
2526 .probe = vmxnet3_probe_device,
2527 .remove = __devexit_p(vmxnet3_remove_device),
2528#ifdef CONFIG_PM
2529 .driver.pm = &vmxnet3_pm_ops,
2530#endif
2531};
2532
2533
2534static int __init
2535vmxnet3_init_module(void)
2536{
2537 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
2538 VMXNET3_DRIVER_VERSION_REPORT);
2539 return pci_register_driver(&vmxnet3_driver);
2540}
2541
2542module_init(vmxnet3_init_module);
2543
2544
2545static void
2546vmxnet3_exit_module(void)
2547{
2548 pci_unregister_driver(&vmxnet3_driver);
2549}
2550
2551module_exit(vmxnet3_exit_module);
2552
2553MODULE_AUTHOR("VMware, Inc.");
2554MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
2555MODULE_LICENSE("GPL v2");
2556MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);