hv: make vmbus_loglevel writeable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / hv / netvsc_drv.c
CommitLineData
fceaf24a 1/*
fceaf24a
HJ
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
d0e94d17 18 * Haiyang Zhang <haiyangz@microsoft.com>
fceaf24a 19 * Hank Janssen <hjanssen@microsoft.com>
fceaf24a 20 */
eb335bc4
HJ
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
fceaf24a
HJ
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
fceaf24a 27#include <linux/io.h>
fceaf24a
HJ
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/in.h>
5a0e3ad6 34#include <linux/slab.h>
06e719d8
S
35#include <linux/dmi.h>
36#include <linux/pci.h>
fceaf24a
HJ
37#include <net/arp.h>
38#include <net/route.h>
39#include <net/sock.h>
40#include <net/pkt_sched.h>
e3fe0bb6 41#include "hv_api.h"
645954c5 42#include "logging.h"
2d82f6c7 43#include "version_info.h"
870cde80 44#include "vmbus.h"
a82c7a2a 45#include "netvsc_api.h"
fceaf24a 46
fceaf24a 47struct net_device_context {
02fafbc6 48 /* point back to our device context */
6bad88da 49 struct hv_device *device_ctx;
b220f5f9 50 unsigned long avail;
c996edcf 51 struct work_struct work;
fceaf24a
HJ
52};
53
fceaf24a 54
b220f5f9
SH
55#define PACKET_PAGES_LOWATER 8
56/* Need this many pages to handle worst case fragmented packet */
57#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
58
99c8da0f 59static int ring_size = 128;
450d7a4b
SH
60module_param(ring_size, int, S_IRUGO);
61MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
fceaf24a 62
454f18a9 63/* The one and only one */
d8de5dc6 64static struct netvsc_driver g_netvsc_drv;
fceaf24a 65
0ff36f69
BP
66/* no-op so the netdev core doesn't return -EINVAL when modifying the the
67 * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
68 * when it calls RndisFilterOnOpen() */
4e9bfefa 69static void netvsc_set_multicast_list(struct net_device *net)
fceaf24a
HJ
70{
71}
72
fceaf24a
HJ
73static int netvsc_open(struct net_device *net)
74{
fceaf24a 75 struct net_device_context *net_device_ctx = netdev_priv(net);
6bad88da 76 struct hv_device *device_obj = net_device_ctx->device_ctx;
02fafbc6 77 int ret = 0;
fceaf24a 78
02fafbc6 79 if (netif_carrier_ok(net)) {
454f18a9 80 /* Open up the device */
9c26aa0d 81 ret = rndis_filter_open(device_obj);
02fafbc6 82 if (ret != 0) {
eb335bc4
HJ
83 netdev_err(net, "unable to open device (ret %d).\n",
84 ret);
fceaf24a
HJ
85 return ret;
86 }
87
88 netif_start_queue(net);
02fafbc6 89 } else {
eb335bc4 90 netdev_err(net, "unable to open device...link is down.\n");
fceaf24a
HJ
91 }
92
fceaf24a
HJ
93 return ret;
94}
95
fceaf24a
HJ
96static int netvsc_close(struct net_device *net)
97{
fceaf24a 98 struct net_device_context *net_device_ctx = netdev_priv(net);
6bad88da 99 struct hv_device *device_obj = net_device_ctx->device_ctx;
02fafbc6 100 int ret;
fceaf24a 101
fceaf24a
HJ
102 netif_stop_queue(net);
103
9c26aa0d 104 ret = rndis_filter_close(device_obj);
fceaf24a 105 if (ret != 0)
eb335bc4 106 netdev_err(net, "unable to close device (ret %d).\n", ret);
fceaf24a 107
fceaf24a
HJ
108 return ret;
109}
110
fceaf24a
HJ
111static void netvsc_xmit_completion(void *context)
112{
4193d4f4 113 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
02fafbc6 114 struct sk_buff *skb = (struct sk_buff *)
72a2f5bd 115 (unsigned long)packet->completion.send.send_completion_tid;
fceaf24a 116
fceaf24a
HJ
117 kfree(packet);
118
02fafbc6 119 if (skb) {
7880fc54 120 struct net_device *net = skb->dev;
b220f5f9
SH
121 struct net_device_context *net_device_ctx = netdev_priv(net);
122 unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
fceaf24a 123
b220f5f9 124 dev_kfree_skb_any(skb);
fceaf24a 125
581de3b0
TH
126 net_device_ctx->avail += num_pages;
127 if (net_device_ctx->avail >= PACKET_PAGES_HIWATER)
b220f5f9 128 netif_wake_queue(net);
fceaf24a 129 }
fceaf24a
HJ
130}
131
02fafbc6 132static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
fceaf24a 133{
fceaf24a 134 struct net_device_context *net_device_ctx = netdev_priv(net);
150f9398
S
135 struct hv_driver *drv =
136 drv_to_hv_drv(net_device_ctx->device_ctx->device.driver);
d8de5dc6 137 struct netvsc_driver *net_drv_obj = drv->priv;
4193d4f4 138 struct hv_netvsc_packet *packet;
02fafbc6 139 int ret;
6048718d 140 unsigned int i, num_pages;
fceaf24a 141
6048718d
SH
142 /* Add 1 for skb->data and additional one for RNDIS */
143 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
b220f5f9
SH
144 if (num_pages > net_device_ctx->avail)
145 return NETDEV_TX_BUSY;
fceaf24a 146
454f18a9 147 /* Allocate a netvsc packet based on # of frags. */
02fafbc6 148 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
6048718d 149 (num_pages * sizeof(struct hv_page_buffer)) +
72a2f5bd 150 net_drv_obj->req_ext_size, GFP_ATOMIC);
02fafbc6 151 if (!packet) {
b220f5f9 152 /* out of memory, silently drop packet */
eb335bc4 153 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
b220f5f9
SH
154
155 dev_kfree_skb(skb);
156 net->stats.tx_dropped++;
157 return NETDEV_TX_OK;
fceaf24a
HJ
158 }
159
72a2f5bd 160 packet->extension = (void *)(unsigned long)packet +
02fafbc6 161 sizeof(struct hv_netvsc_packet) +
6048718d 162 (num_pages * sizeof(struct hv_page_buffer));
fceaf24a 163
454f18a9 164 /* Setup the rndis header */
72a2f5bd 165 packet->page_buf_cnt = num_pages;
fceaf24a 166
454f18a9
BP
167 /* TODO: Flush all write buffers/ memory fence ??? */
168 /* wmb(); */
fceaf24a 169
454f18a9 170 /* Initialize it from the skb */
72a2f5bd 171 packet->total_data_buflen = skb->len;
fceaf24a 172
6048718d 173 /* Start filling in the page buffers starting after RNDIS buffer. */
ca623ad3
HZ
174 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
175 packet->page_buf[1].offset
6048718d 176 = (unsigned long)skb->data & (PAGE_SIZE - 1);
ca623ad3 177 packet->page_buf[1].len = skb_headlen(skb);
6048718d
SH
178
179 /* Additional fragments are after SKB data */
180 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
181 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
182
ca623ad3
HZ
183 packet->page_buf[i+2].pfn = page_to_pfn(f->page);
184 packet->page_buf[i+2].offset = f->page_offset;
185 packet->page_buf[i+2].len = f->size;
fceaf24a
HJ
186 }
187
454f18a9 188 /* Set the completion routine */
72a2f5bd
HZ
189 packet->completion.send.send_completion = netvsc_xmit_completion;
190 packet->completion.send.send_completion_ctx = packet;
191 packet->completion.send.send_completion_tid = (unsigned long)skb;
fceaf24a 192
6bad88da 193 ret = net_drv_obj->send(net_device_ctx->device_ctx,
02fafbc6 194 packet);
02fafbc6 195 if (ret == 0) {
b852fdce
SH
196 net->stats.tx_bytes += skb->len;
197 net->stats.tx_packets++;
fceaf24a 198
581de3b0
TH
199 net_device_ctx->avail -= num_pages;
200 if (net_device_ctx->avail < PACKET_PAGES_LOWATER)
b220f5f9
SH
201 netif_stop_queue(net);
202 } else {
203 /* we are shutting down or bus overloaded, just drop packet */
b852fdce 204 net->stats.tx_dropped++;
b220f5f9 205 netvsc_xmit_completion(packet);
fceaf24a
HJ
206 }
207
b220f5f9 208 return NETDEV_TX_OK;
fceaf24a
HJ
209}
210
3e189519 211/*
02fafbc6
GKH
212 * netvsc_linkstatus_callback - Link up/down notification
213 */
214static void netvsc_linkstatus_callback(struct hv_device *device_obj,
215 unsigned int status)
fceaf24a 216{
6bad88da 217 struct net_device *net = dev_get_drvdata(&device_obj->device);
c996edcf 218 struct net_device_context *ndev_ctx;
fceaf24a 219
02fafbc6 220 if (!net) {
eb335bc4
HJ
221 netdev_err(net, "got link status but net device "
222 "not initialized yet\n");
fceaf24a
HJ
223 return;
224 }
225
02fafbc6 226 if (status == 1) {
fceaf24a
HJ
227 netif_carrier_on(net);
228 netif_wake_queue(net);
7c161d0b 229 netif_notify_peers(net);
c996edcf
HZ
230 ndev_ctx = netdev_priv(net);
231 schedule_work(&ndev_ctx->work);
02fafbc6 232 } else {
fceaf24a
HJ
233 netif_carrier_off(net);
234 netif_stop_queue(net);
235 }
fceaf24a
HJ
236}
237
3e189519
HJ
238/*
239 * netvsc_recv_callback - Callback when we receive a packet from the
240 * "wire" on the specified device.
02fafbc6
GKH
241 */
242static int netvsc_recv_callback(struct hv_device *device_obj,
243 struct hv_netvsc_packet *packet)
fceaf24a 244{
6bad88da 245 struct net_device *net = dev_get_drvdata(&device_obj->device);
fceaf24a
HJ
246 struct sk_buff *skb;
247 void *data;
02fafbc6 248 int i;
fceaf24a
HJ
249 unsigned long flags;
250
02fafbc6 251 if (!net) {
eb335bc4
HJ
252 netdev_err(net, "got receive callback but net device"
253 " not initialized yet\n");
fceaf24a
HJ
254 return 0;
255 }
256
9495c282 257 /* Allocate a skb - TODO direct I/O to pages? */
72a2f5bd 258 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
9495c282
SH
259 if (unlikely(!skb)) {
260 ++net->stats.rx_dropped;
261 return 0;
262 }
fceaf24a 263
454f18a9 264 /* for kmap_atomic */
fceaf24a
HJ
265 local_irq_save(flags);
266
02fafbc6
GKH
267 /*
268 * Copy to skb. This copy is needed here since the memory pointed by
269 * hv_netvsc_packet cannot be deallocated
270 */
72a2f5bd 271 for (i = 0; i < packet->page_buf_cnt; i++) {
ca623ad3 272 data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
02fafbc6
GKH
273 KM_IRQ1);
274 data = (void *)(unsigned long)data +
ca623ad3 275 packet->page_buf[i].offset;
02fafbc6 276
ca623ad3
HZ
277 memcpy(skb_put(skb, packet->page_buf[i].len), data,
278 packet->page_buf[i].len);
02fafbc6
GKH
279
280 kunmap_atomic((void *)((unsigned long)data -
ca623ad3 281 packet->page_buf[i].offset), KM_IRQ1);
fceaf24a
HJ
282 }
283
284 local_irq_restore(flags);
285
286 skb->protocol = eth_type_trans(skb, net);
fceaf24a
HJ
287 skb->ip_summed = CHECKSUM_NONE;
288
9495c282
SH
289 net->stats.rx_packets++;
290 net->stats.rx_bytes += skb->len;
291
02fafbc6
GKH
292 /*
293 * Pass the skb back up. Network stack will deallocate the skb when it
9495c282
SH
294 * is done.
295 * TODO - use NAPI?
02fafbc6 296 */
9495c282 297 netif_rx(skb);
fceaf24a 298
fceaf24a
HJ
299 return 0;
300}
301
f82f4ad7
SH
302static void netvsc_get_drvinfo(struct net_device *net,
303 struct ethtool_drvinfo *info)
304{
305 strcpy(info->driver, "hv_netvsc");
306 strcpy(info->version, HV_DRV_VERSION);
307 strcpy(info->fw_version, "N/A");
308}
309
310static const struct ethtool_ops ethtool_ops = {
311 .get_drvinfo = netvsc_get_drvinfo,
312 .get_sg = ethtool_op_get_sg,
313 .set_sg = ethtool_op_set_sg,
314 .get_link = ethtool_op_get_link,
315};
316
df2fff28
GKH
317static const struct net_device_ops device_ops = {
318 .ndo_open = netvsc_open,
319 .ndo_stop = netvsc_close,
320 .ndo_start_xmit = netvsc_start_xmit,
df2fff28 321 .ndo_set_multicast_list = netvsc_set_multicast_list,
b681b588
HZ
322 .ndo_change_mtu = eth_change_mtu,
323 .ndo_validate_addr = eth_validate_addr,
324 .ndo_set_mac_address = eth_mac_addr,
df2fff28
GKH
325};
326
c996edcf
HZ
327/*
328 * Send GARP packet to network peers after migrations.
329 * After Quick Migration, the network is not immediately operational in the
330 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
331 * another netif_notify_peers() into a scheduled work, otherwise GARP packet
332 * will not be sent after quick migration, and cause network disconnection.
333 */
334static void netvsc_send_garp(struct work_struct *w)
335{
336 struct net_device_context *ndev_ctx;
337 struct net_device *net;
338
339 msleep(20);
340 ndev_ctx = container_of(w, struct net_device_context, work);
341 net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
342 netif_notify_peers(net);
343}
344
345
df2fff28
GKH
346static int netvsc_probe(struct device *device)
347{
150f9398
S
348 struct hv_driver *drv =
349 drv_to_hv_drv(device->driver);
d8de5dc6 350 struct netvsc_driver *net_drv_obj = drv->priv;
6bad88da 351 struct hv_device *device_obj = device_to_hv_device(device);
df2fff28
GKH
352 struct net_device *net = NULL;
353 struct net_device_context *net_device_ctx;
354 struct netvsc_device_info device_info;
355 int ret;
356
ca623ad3 357 if (!net_drv_obj->base.dev_add)
df2fff28
GKH
358 return -1;
359
546d9e10 360 net = alloc_etherdev(sizeof(struct net_device_context));
df2fff28
GKH
361 if (!net)
362 return -1;
363
364 /* Set initial state */
365 netif_carrier_off(net);
df2fff28
GKH
366
367 net_device_ctx = netdev_priv(net);
6bad88da 368 net_device_ctx->device_ctx = device_obj;
b220f5f9 369 net_device_ctx->avail = ring_size;
df2fff28 370 dev_set_drvdata(device, net);
c996edcf 371 INIT_WORK(&net_device_ctx->work, netvsc_send_garp);
df2fff28
GKH
372
373 /* Notify the netvsc driver of the new device */
ca623ad3 374 ret = net_drv_obj->base.dev_add(device_obj, &device_info);
df2fff28
GKH
375 if (ret != 0) {
376 free_netdev(net);
377 dev_set_drvdata(device, NULL);
378
eb335bc4 379 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
df2fff28
GKH
380 return ret;
381 }
382
383 /*
384 * If carrier is still off ie we did not get a link status callback,
385 * update it if necessary
386 */
387 /*
388 * FIXME: We should use a atomic or test/set instead to avoid getting
389 * out of sync with the device's link status
390 */
391 if (!netif_carrier_ok(net))
72a2f5bd 392 if (!device_info.link_state)
df2fff28
GKH
393 netif_carrier_on(net);
394
72a2f5bd 395 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
df2fff28
GKH
396
397 net->netdev_ops = &device_ops;
398
6048718d
SH
399 /* TODO: Add GSO and Checksum offload */
400 net->features = NETIF_F_SG;
401
f82f4ad7 402 SET_ETHTOOL_OPS(net, &ethtool_ops);
df2fff28
GKH
403 SET_NETDEV_DEV(net, device);
404
405 ret = register_netdev(net);
406 if (ret != 0) {
407 /* Remove the device and release the resource */
ca623ad3 408 net_drv_obj->base.dev_rm(device_obj);
df2fff28
GKH
409 free_netdev(net);
410 }
411
df2fff28
GKH
412 return ret;
413}
414
415static int netvsc_remove(struct device *device)
416{
150f9398
S
417 struct hv_driver *drv =
418 drv_to_hv_drv(device->driver);
d8de5dc6 419 struct netvsc_driver *net_drv_obj = drv->priv;
6bad88da
S
420 struct hv_device *device_obj = device_to_hv_device(device);
421 struct net_device *net = dev_get_drvdata(&device_obj->device);
df2fff28
GKH
422 int ret;
423
df2fff28 424 if (net == NULL) {
eb335bc4 425 dev_err(device, "No net device to remove\n");
df2fff28
GKH
426 return 0;
427 }
428
ca623ad3 429 if (!net_drv_obj->base.dev_rm)
df2fff28 430 return -1;
df2fff28
GKH
431
432 /* Stop outbound asap */
433 netif_stop_queue(net);
434 /* netif_carrier_off(net); */
435
436 unregister_netdev(net);
437
438 /*
439 * Call to the vsc driver to let it know that the device is being
440 * removed
441 */
ca623ad3 442 ret = net_drv_obj->base.dev_rm(device_obj);
df2fff28
GKH
443 if (ret != 0) {
444 /* TODO: */
eb335bc4 445 netdev_err(net, "unable to remove vsc device (ret %d)\n", ret);
df2fff28
GKH
446 }
447
448 free_netdev(net);
df2fff28
GKH
449 return ret;
450}
451
fceaf24a
HJ
452static int netvsc_drv_exit_cb(struct device *dev, void *data)
453{
454 struct device **curr = (struct device **)data;
02fafbc6 455
fceaf24a 456 *curr = dev;
02fafbc6
GKH
457 /* stop iterating */
458 return 1;
fceaf24a
HJ
459}
460
bd1de709 461static void netvsc_drv_exit(void)
fceaf24a 462{
d8de5dc6
S
463 struct netvsc_driver *netvsc_drv_obj = &g_netvsc_drv;
464 struct hv_driver *drv = &g_netvsc_drv.base;
02fafbc6 465 struct device *current_dev;
2295ba2e 466 int ret;
fceaf24a 467
02fafbc6 468 while (1) {
fceaf24a
HJ
469 current_dev = NULL;
470
454f18a9 471 /* Get the device */
150f9398 472 ret = driver_for_each_device(&drv->driver, NULL,
02fafbc6 473 &current_dev, netvsc_drv_exit_cb);
2295ba2e 474
fceaf24a
HJ
475 if (current_dev == NULL)
476 break;
477
454f18a9 478 /* Initiate removal from the top-down */
eb335bc4
HJ
479 dev_err(current_dev, "unregistering device (%s)...\n",
480 dev_name(current_dev));
fceaf24a
HJ
481
482 device_unregister(current_dev);
483 }
484
ca623ad3
HZ
485 if (netvsc_drv_obj->base.cleanup)
486 netvsc_drv_obj->base.cleanup(&netvsc_drv_obj->base);
fceaf24a 487
150f9398 488 vmbus_child_driver_unregister(&drv->driver);
fceaf24a 489
fceaf24a
HJ
490 return;
491}
492
21707bed 493static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
df2fff28 494{
d8de5dc6
S
495 struct netvsc_driver *net_drv_obj = &g_netvsc_drv;
496 struct hv_driver *drv = &g_netvsc_drv.base;
df2fff28
GKH
497 int ret;
498
72a2f5bd
HZ
499 net_drv_obj->ring_buf_size = ring_size * PAGE_SIZE;
500 net_drv_obj->recv_cb = netvsc_recv_callback;
501 net_drv_obj->link_status_change = netvsc_linkstatus_callback;
150f9398 502 drv->priv = net_drv_obj;
df2fff28
GKH
503
504 /* Callback to client driver to complete the initialization */
72a2f5bd 505 drv_init(&net_drv_obj->base);
df2fff28 506
150f9398 507 drv->driver.name = net_drv_obj->base.name;
df2fff28 508
150f9398
S
509 drv->driver.probe = netvsc_probe;
510 drv->driver.remove = netvsc_remove;
df2fff28
GKH
511
512 /* The driver belongs to vmbus */
150f9398 513 ret = vmbus_child_driver_register(&drv->driver);
df2fff28 514
df2fff28
GKH
515 return ret;
516}
517
06e719d8
S
518static const struct dmi_system_id __initconst
519hv_netvsc_dmi_table[] __maybe_unused = {
520 {
521 .ident = "Hyper-V",
522 .matches = {
523 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
524 DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
525 DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
526 },
527 },
528 { },
529};
530MODULE_DEVICE_TABLE(dmi, hv_netvsc_dmi_table);
531
fceaf24a
HJ
532static int __init netvsc_init(void)
533{
eb335bc4 534 pr_info("initializing....");
fceaf24a 535
06e719d8
S
536 if (!dmi_check_system(hv_netvsc_dmi_table))
537 return -ENODEV;
538
5a71ae30 539 return netvsc_drv_init(netvsc_initialize);
fceaf24a
HJ
540}
541
542static void __exit netvsc_exit(void)
543{
fceaf24a 544 netvsc_drv_exit();
fceaf24a
HJ
545}
546
06e719d8
S
547static const struct pci_device_id __initconst
548hv_netvsc_pci_table[] __maybe_unused = {
549 { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
550 { 0 }
551};
552MODULE_DEVICE_TABLE(pci, hv_netvsc_pci_table);
553
26c14cc1
HJ
554MODULE_LICENSE("GPL");
555MODULE_VERSION(HV_DRV_VERSION);
7880fc54 556MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
fceaf24a
HJ
557
558module_init(netvsc_init);
559module_exit(netvsc_exit);