]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/hv/netvsc_drv.c
Staging: hv: Move the contents of vmbus.h to hyperv.h
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / hv / netvsc_drv.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
27 #include <linux/io.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/in.h>
34 #include <linux/slab.h>
35 #include <linux/dmi.h>
36 #include <linux/pci.h>
37 #include <net/arp.h>
38 #include <net/route.h>
39 #include <net/sock.h>
40 #include <net/pkt_sched.h>
41
42 #include "hyperv.h"
43 #include "hv_api.h"
44 #include "netvsc_api.h"
45
46 struct net_device_context {
47 /* point back to our device context */
48 struct hv_device *device_ctx;
49 unsigned long avail;
50 struct work_struct work;
51 };
52
53
54 #define PACKET_PAGES_LOWATER 8
55 /* Need this many pages to handle worst case fragmented packet */
56 #define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
57
58 static int ring_size = 128;
59 module_param(ring_size, int, S_IRUGO);
60 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
61
62 /* no-op so the netdev core doesn't return -EINVAL when modifying the the
63 * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
64 * when it calls RndisFilterOnOpen() */
65 static void netvsc_set_multicast_list(struct net_device *net)
66 {
67 }
68
69 static int netvsc_open(struct net_device *net)
70 {
71 struct net_device_context *net_device_ctx = netdev_priv(net);
72 struct hv_device *device_obj = net_device_ctx->device_ctx;
73 int ret = 0;
74
75 if (netif_carrier_ok(net)) {
76 /* Open up the device */
77 ret = rndis_filter_open(device_obj);
78 if (ret != 0) {
79 netdev_err(net, "unable to open device (ret %d).\n",
80 ret);
81 return ret;
82 }
83
84 netif_start_queue(net);
85 } else {
86 netdev_err(net, "unable to open device...link is down.\n");
87 }
88
89 return ret;
90 }
91
92 static int netvsc_close(struct net_device *net)
93 {
94 struct net_device_context *net_device_ctx = netdev_priv(net);
95 struct hv_device *device_obj = net_device_ctx->device_ctx;
96 int ret;
97
98 netif_stop_queue(net);
99
100 ret = rndis_filter_close(device_obj);
101 if (ret != 0)
102 netdev_err(net, "unable to close device (ret %d).\n", ret);
103
104 return ret;
105 }
106
107 static void netvsc_xmit_completion(void *context)
108 {
109 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
110 struct sk_buff *skb = (struct sk_buff *)
111 (unsigned long)packet->completion.send.send_completion_tid;
112
113 kfree(packet);
114
115 if (skb) {
116 struct net_device *net = skb->dev;
117 struct net_device_context *net_device_ctx = netdev_priv(net);
118 unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
119
120 dev_kfree_skb_any(skb);
121
122 net_device_ctx->avail += num_pages;
123 if (net_device_ctx->avail >= PACKET_PAGES_HIWATER)
124 netif_wake_queue(net);
125 }
126 }
127
128 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
129 {
130 struct net_device_context *net_device_ctx = netdev_priv(net);
131 struct netvsc_driver *net_drv_obj =
132 drv_to_netvscdrv(net_device_ctx->device_ctx->device.driver);
133 struct hv_netvsc_packet *packet;
134 int ret;
135 unsigned int i, num_pages;
136
137 /* Add 1 for skb->data and additional one for RNDIS */
138 num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
139 if (num_pages > net_device_ctx->avail)
140 return NETDEV_TX_BUSY;
141
142 /* Allocate a netvsc packet based on # of frags. */
143 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
144 (num_pages * sizeof(struct hv_page_buffer)) +
145 net_drv_obj->req_ext_size, GFP_ATOMIC);
146 if (!packet) {
147 /* out of memory, silently drop packet */
148 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
149
150 dev_kfree_skb(skb);
151 net->stats.tx_dropped++;
152 return NETDEV_TX_OK;
153 }
154
155 packet->extension = (void *)(unsigned long)packet +
156 sizeof(struct hv_netvsc_packet) +
157 (num_pages * sizeof(struct hv_page_buffer));
158
159 /* Setup the rndis header */
160 packet->page_buf_cnt = num_pages;
161
162 /* TODO: Flush all write buffers/ memory fence ??? */
163 /* wmb(); */
164
165 /* Initialize it from the skb */
166 packet->total_data_buflen = skb->len;
167
168 /* Start filling in the page buffers starting after RNDIS buffer. */
169 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
170 packet->page_buf[1].offset
171 = (unsigned long)skb->data & (PAGE_SIZE - 1);
172 packet->page_buf[1].len = skb_headlen(skb);
173
174 /* Additional fragments are after SKB data */
175 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
176 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
177
178 packet->page_buf[i+2].pfn = page_to_pfn(f->page);
179 packet->page_buf[i+2].offset = f->page_offset;
180 packet->page_buf[i+2].len = f->size;
181 }
182
183 /* Set the completion routine */
184 packet->completion.send.send_completion = netvsc_xmit_completion;
185 packet->completion.send.send_completion_ctx = packet;
186 packet->completion.send.send_completion_tid = (unsigned long)skb;
187
188 ret = net_drv_obj->send(net_device_ctx->device_ctx,
189 packet);
190 if (ret == 0) {
191 net->stats.tx_bytes += skb->len;
192 net->stats.tx_packets++;
193
194 net_device_ctx->avail -= num_pages;
195 if (net_device_ctx->avail < PACKET_PAGES_LOWATER)
196 netif_stop_queue(net);
197 } else {
198 /* we are shutting down or bus overloaded, just drop packet */
199 net->stats.tx_dropped++;
200 netvsc_xmit_completion(packet);
201 }
202
203 return NETDEV_TX_OK;
204 }
205
206 /*
207 * netvsc_linkstatus_callback - Link up/down notification
208 */
209 static void netvsc_linkstatus_callback(struct hv_device *device_obj,
210 unsigned int status)
211 {
212 struct net_device *net = dev_get_drvdata(&device_obj->device);
213 struct net_device_context *ndev_ctx;
214
215 if (!net) {
216 netdev_err(net, "got link status but net device "
217 "not initialized yet\n");
218 return;
219 }
220
221 if (status == 1) {
222 netif_carrier_on(net);
223 netif_wake_queue(net);
224 netif_notify_peers(net);
225 ndev_ctx = netdev_priv(net);
226 schedule_work(&ndev_ctx->work);
227 } else {
228 netif_carrier_off(net);
229 netif_stop_queue(net);
230 }
231 }
232
233 /*
234 * netvsc_recv_callback - Callback when we receive a packet from the
235 * "wire" on the specified device.
236 */
237 static int netvsc_recv_callback(struct hv_device *device_obj,
238 struct hv_netvsc_packet *packet)
239 {
240 struct net_device *net = dev_get_drvdata(&device_obj->device);
241 struct sk_buff *skb;
242 void *data;
243 int i;
244 unsigned long flags;
245
246 if (!net) {
247 netdev_err(net, "got receive callback but net device"
248 " not initialized yet\n");
249 return 0;
250 }
251
252 /* Allocate a skb - TODO direct I/O to pages? */
253 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
254 if (unlikely(!skb)) {
255 ++net->stats.rx_dropped;
256 return 0;
257 }
258
259 /* for kmap_atomic */
260 local_irq_save(flags);
261
262 /*
263 * Copy to skb. This copy is needed here since the memory pointed by
264 * hv_netvsc_packet cannot be deallocated
265 */
266 for (i = 0; i < packet->page_buf_cnt; i++) {
267 data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
268 KM_IRQ1);
269 data = (void *)(unsigned long)data +
270 packet->page_buf[i].offset;
271
272 memcpy(skb_put(skb, packet->page_buf[i].len), data,
273 packet->page_buf[i].len);
274
275 kunmap_atomic((void *)((unsigned long)data -
276 packet->page_buf[i].offset), KM_IRQ1);
277 }
278
279 local_irq_restore(flags);
280
281 skb->protocol = eth_type_trans(skb, net);
282 skb->ip_summed = CHECKSUM_NONE;
283
284 net->stats.rx_packets++;
285 net->stats.rx_bytes += skb->len;
286
287 /*
288 * Pass the skb back up. Network stack will deallocate the skb when it
289 * is done.
290 * TODO - use NAPI?
291 */
292 netif_rx(skb);
293
294 return 0;
295 }
296
297 static void netvsc_get_drvinfo(struct net_device *net,
298 struct ethtool_drvinfo *info)
299 {
300 strcpy(info->driver, "hv_netvsc");
301 strcpy(info->version, HV_DRV_VERSION);
302 strcpy(info->fw_version, "N/A");
303 }
304
305 static const struct ethtool_ops ethtool_ops = {
306 .get_drvinfo = netvsc_get_drvinfo,
307 .get_link = ethtool_op_get_link,
308 };
309
310 static const struct net_device_ops device_ops = {
311 .ndo_open = netvsc_open,
312 .ndo_stop = netvsc_close,
313 .ndo_start_xmit = netvsc_start_xmit,
314 .ndo_set_multicast_list = netvsc_set_multicast_list,
315 .ndo_change_mtu = eth_change_mtu,
316 .ndo_validate_addr = eth_validate_addr,
317 .ndo_set_mac_address = eth_mac_addr,
318 };
319
320 /*
321 * Send GARP packet to network peers after migrations.
322 * After Quick Migration, the network is not immediately operational in the
323 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
324 * another netif_notify_peers() into a scheduled work, otherwise GARP packet
325 * will not be sent after quick migration, and cause network disconnection.
326 */
327 static void netvsc_send_garp(struct work_struct *w)
328 {
329 struct net_device_context *ndev_ctx;
330 struct net_device *net;
331
332 msleep(20);
333 ndev_ctx = container_of(w, struct net_device_context, work);
334 net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
335 netif_notify_peers(net);
336 }
337
338
339 static int netvsc_probe(struct hv_device *dev)
340 {
341 struct net_device *net = NULL;
342 struct net_device_context *net_device_ctx;
343 struct netvsc_device_info device_info;
344 int ret;
345
346 net = alloc_etherdev(sizeof(struct net_device_context));
347 if (!net)
348 return -1;
349
350 /* Set initial state */
351 netif_carrier_off(net);
352
353 net_device_ctx = netdev_priv(net);
354 net_device_ctx->device_ctx = dev;
355 net_device_ctx->avail = ring_size;
356 dev_set_drvdata(&dev->device, net);
357 INIT_WORK(&net_device_ctx->work, netvsc_send_garp);
358
359 /* Notify the netvsc driver of the new device */
360 ret = rndis_filte_device_add(dev, &device_info);
361 if (ret != 0) {
362 free_netdev(net);
363 dev_set_drvdata(&dev->device, NULL);
364
365 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
366 return ret;
367 }
368
369 /*
370 * If carrier is still off ie we did not get a link status callback,
371 * update it if necessary
372 */
373 /*
374 * FIXME: We should use a atomic or test/set instead to avoid getting
375 * out of sync with the device's link status
376 */
377 if (!netif_carrier_ok(net))
378 if (!device_info.link_state)
379 netif_carrier_on(net);
380
381 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
382
383 net->netdev_ops = &device_ops;
384
385 /* TODO: Add GSO and Checksum offload */
386 net->hw_features = NETIF_F_SG;
387 net->features = NETIF_F_SG;
388
389 SET_ETHTOOL_OPS(net, &ethtool_ops);
390 SET_NETDEV_DEV(net, &dev->device);
391
392 ret = register_netdev(net);
393 if (ret != 0) {
394 /* Remove the device and release the resource */
395 rndis_filter_device_remove(dev);
396 free_netdev(net);
397 }
398
399 return ret;
400 }
401
402 static int netvsc_remove(struct hv_device *dev)
403 {
404 struct net_device *net = dev_get_drvdata(&dev->device);
405 int ret;
406
407 if (net == NULL) {
408 dev_err(&dev->device, "No net device to remove\n");
409 return 0;
410 }
411
412 /* Stop outbound asap */
413 netif_stop_queue(net);
414 /* netif_carrier_off(net); */
415
416 unregister_netdev(net);
417
418 /*
419 * Call to the vsc driver to let it know that the device is being
420 * removed
421 */
422 ret = rndis_filter_device_remove(dev);
423 if (ret != 0) {
424 /* TODO: */
425 netdev_err(net, "unable to remove vsc device (ret %d)\n", ret);
426 }
427
428 free_netdev(net);
429 return ret;
430 }
431
432 static int netvsc_drv_exit_cb(struct device *dev, void *data)
433 {
434 struct device **curr = (struct device **)data;
435
436 *curr = dev;
437 /* stop iterating */
438 return 1;
439 }
440
441 /* The one and only one */
442 static struct netvsc_driver netvsc_drv = {
443 .base.probe = netvsc_probe,
444 .base.remove = netvsc_remove,
445 };
446
447 static void netvsc_drv_exit(void)
448 {
449 struct hv_driver *drv = &netvsc_drv.base;
450 struct device *current_dev;
451 int ret;
452
453 while (1) {
454 current_dev = NULL;
455
456 /* Get the device */
457 ret = driver_for_each_device(&drv->driver, NULL,
458 &current_dev, netvsc_drv_exit_cb);
459
460 if (current_dev == NULL)
461 break;
462
463 /* Initiate removal from the top-down */
464 dev_err(current_dev, "unregistering device (%s)...\n",
465 dev_name(current_dev));
466
467 device_unregister(current_dev);
468 }
469
470 vmbus_child_driver_unregister(&drv->driver);
471
472 return;
473 }
474
475 static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
476 {
477 struct netvsc_driver *net_drv_obj = &netvsc_drv;
478 struct hv_driver *drv = &netvsc_drv.base;
479 int ret;
480
481 net_drv_obj->ring_buf_size = ring_size * PAGE_SIZE;
482 net_drv_obj->recv_cb = netvsc_recv_callback;
483 net_drv_obj->link_status_change = netvsc_linkstatus_callback;
484
485 /* Callback to client driver to complete the initialization */
486 drv_init(&net_drv_obj->base);
487
488 drv->driver.name = net_drv_obj->base.name;
489
490 /* The driver belongs to vmbus */
491 ret = vmbus_child_driver_register(&drv->driver);
492
493 return ret;
494 }
495
496 static const struct dmi_system_id __initconst
497 hv_netvsc_dmi_table[] __maybe_unused = {
498 {
499 .ident = "Hyper-V",
500 .matches = {
501 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
502 DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
503 DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
504 },
505 },
506 { },
507 };
508 MODULE_DEVICE_TABLE(dmi, hv_netvsc_dmi_table);
509
510 static int __init netvsc_init(void)
511 {
512 pr_info("initializing....");
513
514 if (!dmi_check_system(hv_netvsc_dmi_table))
515 return -ENODEV;
516
517 return netvsc_drv_init(netvsc_initialize);
518 }
519
520 static void __exit netvsc_exit(void)
521 {
522 netvsc_drv_exit();
523 }
524
525 static const struct pci_device_id __initconst
526 hv_netvsc_pci_table[] __maybe_unused = {
527 { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
528 { 0 }
529 };
530 MODULE_DEVICE_TABLE(pci, hv_netvsc_pci_table);
531
532 MODULE_LICENSE("GPL");
533 MODULE_VERSION(HV_DRV_VERSION);
534 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
535
536 module_init(netvsc_init);
537 module_exit(netvsc_exit);