]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/xen-netfront.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
[mirror_ubuntu-bionic-kernel.git] / drivers / net / xen-netfront.c
1 /*
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47
48 #include <asm/xen/page.h>
49 #include <xen/xen.h>
50 #include <xen/xenbus.h>
51 #include <xen/events.h>
52 #include <xen/page.h>
53 #include <xen/platform_pci.h>
54 #include <xen/grant_table.h>
55
56 #include <xen/interface/io/netif.h>
57 #include <xen/interface/memory.h>
58 #include <xen/interface/grant_table.h>
59
60 static const struct ethtool_ops xennet_ethtool_ops;
61
62 struct netfront_cb {
63 int pull_to;
64 };
65
66 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
67
68 #define RX_COPY_THRESHOLD 256
69
70 #define GRANT_INVALID_REF 0
71
72 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
73 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
74 #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
75
76 struct netfront_stats {
77 u64 rx_packets;
78 u64 tx_packets;
79 u64 rx_bytes;
80 u64 tx_bytes;
81 struct u64_stats_sync syncp;
82 };
83
84 struct netfront_info {
85 struct list_head list;
86 struct net_device *netdev;
87
88 struct napi_struct napi;
89
90 /* Split event channels support, tx_* == rx_* when using
91 * single event channel.
92 */
93 unsigned int tx_evtchn, rx_evtchn;
94 unsigned int tx_irq, rx_irq;
95 /* Only used when split event channels support is enabled */
96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
98
99 struct xenbus_device *xbdev;
100
101 spinlock_t tx_lock;
102 struct xen_netif_tx_front_ring tx;
103 int tx_ring_ref;
104
105 /*
106 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
107 * are linked from tx_skb_freelist through skb_entry.link.
108 *
109 * NB. Freelist index entries are always going to be less than
110 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
111 * greater than PAGE_OFFSET: we use this property to distinguish
112 * them.
113 */
114 union skb_entry {
115 struct sk_buff *skb;
116 unsigned long link;
117 } tx_skbs[NET_TX_RING_SIZE];
118 grant_ref_t gref_tx_head;
119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
120 unsigned tx_skb_freelist;
121
122 spinlock_t rx_lock ____cacheline_aligned_in_smp;
123 struct xen_netif_rx_front_ring rx;
124 int rx_ring_ref;
125
126 /* Receive-ring batched refills. */
127 #define RX_MIN_TARGET 8
128 #define RX_DFL_MIN_TARGET 64
129 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
130 unsigned rx_min_target, rx_max_target, rx_target;
131 struct sk_buff_head rx_batch;
132
133 struct timer_list rx_refill_timer;
134
135 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
136 grant_ref_t gref_rx_head;
137 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
138
139 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
140 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
141 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
142
143 /* Statistics */
144 struct netfront_stats __percpu *stats;
145
146 unsigned long rx_gso_checksum_fixup;
147 };
148
149 struct netfront_rx_info {
150 struct xen_netif_rx_response rx;
151 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
152 };
153
154 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
155 {
156 list->link = id;
157 }
158
159 static int skb_entry_is_link(const union skb_entry *list)
160 {
161 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
162 return (unsigned long)list->skb < PAGE_OFFSET;
163 }
164
165 /*
166 * Access macros for acquiring freeing slots in tx_skbs[].
167 */
168
169 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
170 unsigned short id)
171 {
172 skb_entry_set_link(&list[id], *head);
173 *head = id;
174 }
175
176 static unsigned short get_id_from_freelist(unsigned *head,
177 union skb_entry *list)
178 {
179 unsigned int id = *head;
180 *head = list[id].link;
181 return id;
182 }
183
184 static int xennet_rxidx(RING_IDX idx)
185 {
186 return idx & (NET_RX_RING_SIZE - 1);
187 }
188
189 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
190 RING_IDX ri)
191 {
192 int i = xennet_rxidx(ri);
193 struct sk_buff *skb = np->rx_skbs[i];
194 np->rx_skbs[i] = NULL;
195 return skb;
196 }
197
198 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
199 RING_IDX ri)
200 {
201 int i = xennet_rxidx(ri);
202 grant_ref_t ref = np->grant_rx_ref[i];
203 np->grant_rx_ref[i] = GRANT_INVALID_REF;
204 return ref;
205 }
206
207 #ifdef CONFIG_SYSFS
208 static int xennet_sysfs_addif(struct net_device *netdev);
209 static void xennet_sysfs_delif(struct net_device *netdev);
210 #else /* !CONFIG_SYSFS */
211 #define xennet_sysfs_addif(dev) (0)
212 #define xennet_sysfs_delif(dev) do { } while (0)
213 #endif
214
215 static bool xennet_can_sg(struct net_device *dev)
216 {
217 return dev->features & NETIF_F_SG;
218 }
219
220
221 static void rx_refill_timeout(unsigned long data)
222 {
223 struct net_device *dev = (struct net_device *)data;
224 struct netfront_info *np = netdev_priv(dev);
225 napi_schedule(&np->napi);
226 }
227
228 static int netfront_tx_slot_available(struct netfront_info *np)
229 {
230 return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
231 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
232 }
233
234 static void xennet_maybe_wake_tx(struct net_device *dev)
235 {
236 struct netfront_info *np = netdev_priv(dev);
237
238 if (unlikely(netif_queue_stopped(dev)) &&
239 netfront_tx_slot_available(np) &&
240 likely(netif_running(dev)))
241 netif_wake_queue(dev);
242 }
243
244 static void xennet_alloc_rx_buffers(struct net_device *dev)
245 {
246 unsigned short id;
247 struct netfront_info *np = netdev_priv(dev);
248 struct sk_buff *skb;
249 struct page *page;
250 int i, batch_target, notify;
251 RING_IDX req_prod = np->rx.req_prod_pvt;
252 grant_ref_t ref;
253 unsigned long pfn;
254 void *vaddr;
255 struct xen_netif_rx_request *req;
256
257 if (unlikely(!netif_carrier_ok(dev)))
258 return;
259
260 /*
261 * Allocate skbuffs greedily, even though we batch updates to the
262 * receive ring. This creates a less bursty demand on the memory
263 * allocator, so should reduce the chance of failed allocation requests
264 * both for ourself and for other kernel subsystems.
265 */
266 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
267 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
268 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
269 GFP_ATOMIC | __GFP_NOWARN);
270 if (unlikely(!skb))
271 goto no_skb;
272
273 /* Align ip header to a 16 bytes boundary */
274 skb_reserve(skb, NET_IP_ALIGN);
275
276 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
277 if (!page) {
278 kfree_skb(skb);
279 no_skb:
280 /* Any skbuffs queued for refill? Force them out. */
281 if (i != 0)
282 goto refill;
283 /* Could not allocate any skbuffs. Try again later. */
284 mod_timer(&np->rx_refill_timer,
285 jiffies + (HZ/10));
286 break;
287 }
288
289 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
290 __skb_queue_tail(&np->rx_batch, skb);
291 }
292
293 /* Is the batch large enough to be worthwhile? */
294 if (i < (np->rx_target/2)) {
295 if (req_prod > np->rx.sring->req_prod)
296 goto push;
297 return;
298 }
299
300 /* Adjust our fill target if we risked running out of buffers. */
301 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
302 ((np->rx_target *= 2) > np->rx_max_target))
303 np->rx_target = np->rx_max_target;
304
305 refill:
306 for (i = 0; ; i++) {
307 skb = __skb_dequeue(&np->rx_batch);
308 if (skb == NULL)
309 break;
310
311 skb->dev = dev;
312
313 id = xennet_rxidx(req_prod + i);
314
315 BUG_ON(np->rx_skbs[id]);
316 np->rx_skbs[id] = skb;
317
318 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
319 BUG_ON((signed short)ref < 0);
320 np->grant_rx_ref[id] = ref;
321
322 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
323 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
324
325 req = RING_GET_REQUEST(&np->rx, req_prod + i);
326 gnttab_grant_foreign_access_ref(ref,
327 np->xbdev->otherend_id,
328 pfn_to_mfn(pfn),
329 0);
330
331 req->id = id;
332 req->gref = ref;
333 }
334
335 wmb(); /* barrier so backend seens requests */
336
337 /* Above is a suitable barrier to ensure backend will see requests. */
338 np->rx.req_prod_pvt = req_prod + i;
339 push:
340 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
341 if (notify)
342 notify_remote_via_irq(np->rx_irq);
343 }
344
345 static int xennet_open(struct net_device *dev)
346 {
347 struct netfront_info *np = netdev_priv(dev);
348
349 napi_enable(&np->napi);
350
351 spin_lock_bh(&np->rx_lock);
352 if (netif_carrier_ok(dev)) {
353 xennet_alloc_rx_buffers(dev);
354 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
355 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
356 napi_schedule(&np->napi);
357 }
358 spin_unlock_bh(&np->rx_lock);
359
360 netif_start_queue(dev);
361
362 return 0;
363 }
364
365 static void xennet_tx_buf_gc(struct net_device *dev)
366 {
367 RING_IDX cons, prod;
368 unsigned short id;
369 struct netfront_info *np = netdev_priv(dev);
370 struct sk_buff *skb;
371
372 BUG_ON(!netif_carrier_ok(dev));
373
374 do {
375 prod = np->tx.sring->rsp_prod;
376 rmb(); /* Ensure we see responses up to 'rp'. */
377
378 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
379 struct xen_netif_tx_response *txrsp;
380
381 txrsp = RING_GET_RESPONSE(&np->tx, cons);
382 if (txrsp->status == XEN_NETIF_RSP_NULL)
383 continue;
384
385 id = txrsp->id;
386 skb = np->tx_skbs[id].skb;
387 if (unlikely(gnttab_query_foreign_access(
388 np->grant_tx_ref[id]) != 0)) {
389 pr_alert("%s: warning -- grant still in use by backend domain\n",
390 __func__);
391 BUG();
392 }
393 gnttab_end_foreign_access_ref(
394 np->grant_tx_ref[id], GNTMAP_readonly);
395 gnttab_release_grant_reference(
396 &np->gref_tx_head, np->grant_tx_ref[id]);
397 np->grant_tx_ref[id] = GRANT_INVALID_REF;
398 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
399 dev_kfree_skb_irq(skb);
400 }
401
402 np->tx.rsp_cons = prod;
403
404 /*
405 * Set a new event, then check for race with update of tx_cons.
406 * Note that it is essential to schedule a callback, no matter
407 * how few buffers are pending. Even if there is space in the
408 * transmit ring, higher layers may be blocked because too much
409 * data is outstanding: in such cases notification from Xen is
410 * likely to be the only kick that we'll get.
411 */
412 np->tx.sring->rsp_event =
413 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
414 mb(); /* update shared area */
415 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
416
417 xennet_maybe_wake_tx(dev);
418 }
419
420 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
421 struct xen_netif_tx_request *tx)
422 {
423 struct netfront_info *np = netdev_priv(dev);
424 char *data = skb->data;
425 unsigned long mfn;
426 RING_IDX prod = np->tx.req_prod_pvt;
427 int frags = skb_shinfo(skb)->nr_frags;
428 unsigned int offset = offset_in_page(data);
429 unsigned int len = skb_headlen(skb);
430 unsigned int id;
431 grant_ref_t ref;
432 int i;
433
434 /* While the header overlaps a page boundary (including being
435 larger than a page), split it it into page-sized chunks. */
436 while (len > PAGE_SIZE - offset) {
437 tx->size = PAGE_SIZE - offset;
438 tx->flags |= XEN_NETTXF_more_data;
439 len -= tx->size;
440 data += tx->size;
441 offset = 0;
442
443 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
444 np->tx_skbs[id].skb = skb_get(skb);
445 tx = RING_GET_REQUEST(&np->tx, prod++);
446 tx->id = id;
447 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
448 BUG_ON((signed short)ref < 0);
449
450 mfn = virt_to_mfn(data);
451 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
452 mfn, GNTMAP_readonly);
453
454 tx->gref = np->grant_tx_ref[id] = ref;
455 tx->offset = offset;
456 tx->size = len;
457 tx->flags = 0;
458 }
459
460 /* Grant backend access to each skb fragment page. */
461 for (i = 0; i < frags; i++) {
462 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
463 struct page *page = skb_frag_page(frag);
464
465 len = skb_frag_size(frag);
466 offset = frag->page_offset;
467
468 /* Data must not cross a page boundary. */
469 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
470
471 /* Skip unused frames from start of page */
472 page += offset >> PAGE_SHIFT;
473 offset &= ~PAGE_MASK;
474
475 while (len > 0) {
476 unsigned long bytes;
477
478 BUG_ON(offset >= PAGE_SIZE);
479
480 bytes = PAGE_SIZE - offset;
481 if (bytes > len)
482 bytes = len;
483
484 tx->flags |= XEN_NETTXF_more_data;
485
486 id = get_id_from_freelist(&np->tx_skb_freelist,
487 np->tx_skbs);
488 np->tx_skbs[id].skb = skb_get(skb);
489 tx = RING_GET_REQUEST(&np->tx, prod++);
490 tx->id = id;
491 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
492 BUG_ON((signed short)ref < 0);
493
494 mfn = pfn_to_mfn(page_to_pfn(page));
495 gnttab_grant_foreign_access_ref(ref,
496 np->xbdev->otherend_id,
497 mfn, GNTMAP_readonly);
498
499 tx->gref = np->grant_tx_ref[id] = ref;
500 tx->offset = offset;
501 tx->size = bytes;
502 tx->flags = 0;
503
504 offset += bytes;
505 len -= bytes;
506
507 /* Next frame */
508 if (offset == PAGE_SIZE && len) {
509 BUG_ON(!PageCompound(page));
510 page++;
511 offset = 0;
512 }
513 }
514 }
515
516 np->tx.req_prod_pvt = prod;
517 }
518
519 /*
520 * Count how many ring slots are required to send the frags of this
521 * skb. Each frag might be a compound page.
522 */
523 static int xennet_count_skb_frag_slots(struct sk_buff *skb)
524 {
525 int i, frags = skb_shinfo(skb)->nr_frags;
526 int pages = 0;
527
528 for (i = 0; i < frags; i++) {
529 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
530 unsigned long size = skb_frag_size(frag);
531 unsigned long offset = frag->page_offset;
532
533 /* Skip unused frames from start of page */
534 offset &= ~PAGE_MASK;
535
536 pages += PFN_UP(offset + size);
537 }
538
539 return pages;
540 }
541
542 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
543 {
544 unsigned short id;
545 struct netfront_info *np = netdev_priv(dev);
546 struct netfront_stats *stats = this_cpu_ptr(np->stats);
547 struct xen_netif_tx_request *tx;
548 char *data = skb->data;
549 RING_IDX i;
550 grant_ref_t ref;
551 unsigned long mfn;
552 int notify;
553 int slots;
554 unsigned int offset = offset_in_page(data);
555 unsigned int len = skb_headlen(skb);
556 unsigned long flags;
557
558 /* If skb->len is too big for wire format, drop skb and alert
559 * user about misconfiguration.
560 */
561 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
562 net_alert_ratelimited(
563 "xennet: skb->len = %u, too big for wire format\n",
564 skb->len);
565 goto drop;
566 }
567
568 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
569 xennet_count_skb_frag_slots(skb);
570 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
571 net_alert_ratelimited(
572 "xennet: skb rides the rocket: %d slots\n", slots);
573 goto drop;
574 }
575
576 spin_lock_irqsave(&np->tx_lock, flags);
577
578 if (unlikely(!netif_carrier_ok(dev) ||
579 (slots > 1 && !xennet_can_sg(dev)) ||
580 netif_needs_gso(skb, netif_skb_features(skb)))) {
581 spin_unlock_irqrestore(&np->tx_lock, flags);
582 goto drop;
583 }
584
585 i = np->tx.req_prod_pvt;
586
587 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
588 np->tx_skbs[id].skb = skb;
589
590 tx = RING_GET_REQUEST(&np->tx, i);
591
592 tx->id = id;
593 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
594 BUG_ON((signed short)ref < 0);
595 mfn = virt_to_mfn(data);
596 gnttab_grant_foreign_access_ref(
597 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
598 tx->gref = np->grant_tx_ref[id] = ref;
599 tx->offset = offset;
600 tx->size = len;
601
602 tx->flags = 0;
603 if (skb->ip_summed == CHECKSUM_PARTIAL)
604 /* local packet? */
605 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
606 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
607 /* remote but checksummed. */
608 tx->flags |= XEN_NETTXF_data_validated;
609
610 if (skb_shinfo(skb)->gso_size) {
611 struct xen_netif_extra_info *gso;
612
613 gso = (struct xen_netif_extra_info *)
614 RING_GET_REQUEST(&np->tx, ++i);
615
616 tx->flags |= XEN_NETTXF_extra_info;
617
618 gso->u.gso.size = skb_shinfo(skb)->gso_size;
619 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
620 gso->u.gso.pad = 0;
621 gso->u.gso.features = 0;
622
623 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
624 gso->flags = 0;
625 }
626
627 np->tx.req_prod_pvt = i + 1;
628
629 xennet_make_frags(skb, dev, tx);
630 tx->size = skb->len;
631
632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
633 if (notify)
634 notify_remote_via_irq(np->tx_irq);
635
636 u64_stats_update_begin(&stats->syncp);
637 stats->tx_bytes += skb->len;
638 stats->tx_packets++;
639 u64_stats_update_end(&stats->syncp);
640
641 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
642 xennet_tx_buf_gc(dev);
643
644 if (!netfront_tx_slot_available(np))
645 netif_stop_queue(dev);
646
647 spin_unlock_irqrestore(&np->tx_lock, flags);
648
649 return NETDEV_TX_OK;
650
651 drop:
652 dev->stats.tx_dropped++;
653 dev_kfree_skb(skb);
654 return NETDEV_TX_OK;
655 }
656
657 static int xennet_close(struct net_device *dev)
658 {
659 struct netfront_info *np = netdev_priv(dev);
660 netif_stop_queue(np->netdev);
661 napi_disable(&np->napi);
662 return 0;
663 }
664
665 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
666 grant_ref_t ref)
667 {
668 int new = xennet_rxidx(np->rx.req_prod_pvt);
669
670 BUG_ON(np->rx_skbs[new]);
671 np->rx_skbs[new] = skb;
672 np->grant_rx_ref[new] = ref;
673 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
674 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
675 np->rx.req_prod_pvt++;
676 }
677
678 static int xennet_get_extras(struct netfront_info *np,
679 struct xen_netif_extra_info *extras,
680 RING_IDX rp)
681
682 {
683 struct xen_netif_extra_info *extra;
684 struct device *dev = &np->netdev->dev;
685 RING_IDX cons = np->rx.rsp_cons;
686 int err = 0;
687
688 do {
689 struct sk_buff *skb;
690 grant_ref_t ref;
691
692 if (unlikely(cons + 1 == rp)) {
693 if (net_ratelimit())
694 dev_warn(dev, "Missing extra info\n");
695 err = -EBADR;
696 break;
697 }
698
699 extra = (struct xen_netif_extra_info *)
700 RING_GET_RESPONSE(&np->rx, ++cons);
701
702 if (unlikely(!extra->type ||
703 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
704 if (net_ratelimit())
705 dev_warn(dev, "Invalid extra type: %d\n",
706 extra->type);
707 err = -EINVAL;
708 } else {
709 memcpy(&extras[extra->type - 1], extra,
710 sizeof(*extra));
711 }
712
713 skb = xennet_get_rx_skb(np, cons);
714 ref = xennet_get_rx_ref(np, cons);
715 xennet_move_rx_slot(np, skb, ref);
716 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
717
718 np->rx.rsp_cons = cons;
719 return err;
720 }
721
722 static int xennet_get_responses(struct netfront_info *np,
723 struct netfront_rx_info *rinfo, RING_IDX rp,
724 struct sk_buff_head *list)
725 {
726 struct xen_netif_rx_response *rx = &rinfo->rx;
727 struct xen_netif_extra_info *extras = rinfo->extras;
728 struct device *dev = &np->netdev->dev;
729 RING_IDX cons = np->rx.rsp_cons;
730 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
731 grant_ref_t ref = xennet_get_rx_ref(np, cons);
732 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
733 int slots = 1;
734 int err = 0;
735 unsigned long ret;
736
737 if (rx->flags & XEN_NETRXF_extra_info) {
738 err = xennet_get_extras(np, extras, rp);
739 cons = np->rx.rsp_cons;
740 }
741
742 for (;;) {
743 if (unlikely(rx->status < 0 ||
744 rx->offset + rx->status > PAGE_SIZE)) {
745 if (net_ratelimit())
746 dev_warn(dev, "rx->offset: %x, size: %u\n",
747 rx->offset, rx->status);
748 xennet_move_rx_slot(np, skb, ref);
749 err = -EINVAL;
750 goto next;
751 }
752
753 /*
754 * This definitely indicates a bug, either in this driver or in
755 * the backend driver. In future this should flag the bad
756 * situation to the system controller to reboot the backend.
757 */
758 if (ref == GRANT_INVALID_REF) {
759 if (net_ratelimit())
760 dev_warn(dev, "Bad rx response id %d.\n",
761 rx->id);
762 err = -EINVAL;
763 goto next;
764 }
765
766 ret = gnttab_end_foreign_access_ref(ref, 0);
767 BUG_ON(!ret);
768
769 gnttab_release_grant_reference(&np->gref_rx_head, ref);
770
771 __skb_queue_tail(list, skb);
772
773 next:
774 if (!(rx->flags & XEN_NETRXF_more_data))
775 break;
776
777 if (cons + slots == rp) {
778 if (net_ratelimit())
779 dev_warn(dev, "Need more slots\n");
780 err = -ENOENT;
781 break;
782 }
783
784 rx = RING_GET_RESPONSE(&np->rx, cons + slots);
785 skb = xennet_get_rx_skb(np, cons + slots);
786 ref = xennet_get_rx_ref(np, cons + slots);
787 slots++;
788 }
789
790 if (unlikely(slots > max)) {
791 if (net_ratelimit())
792 dev_warn(dev, "Too many slots\n");
793 err = -E2BIG;
794 }
795
796 if (unlikely(err))
797 np->rx.rsp_cons = cons + slots;
798
799 return err;
800 }
801
802 static int xennet_set_skb_gso(struct sk_buff *skb,
803 struct xen_netif_extra_info *gso)
804 {
805 if (!gso->u.gso.size) {
806 if (net_ratelimit())
807 pr_warn("GSO size must not be zero\n");
808 return -EINVAL;
809 }
810
811 /* Currently only TCPv4 S.O. is supported. */
812 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
813 if (net_ratelimit())
814 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
815 return -EINVAL;
816 }
817
818 skb_shinfo(skb)->gso_size = gso->u.gso.size;
819 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
820
821 /* Header must be checked, and gso_segs computed. */
822 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
823 skb_shinfo(skb)->gso_segs = 0;
824
825 return 0;
826 }
827
828 static RING_IDX xennet_fill_frags(struct netfront_info *np,
829 struct sk_buff *skb,
830 struct sk_buff_head *list)
831 {
832 struct skb_shared_info *shinfo = skb_shinfo(skb);
833 RING_IDX cons = np->rx.rsp_cons;
834 struct sk_buff *nskb;
835
836 while ((nskb = __skb_dequeue(list))) {
837 struct xen_netif_rx_response *rx =
838 RING_GET_RESPONSE(&np->rx, ++cons);
839 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
840
841 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
842 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
843
844 BUG_ON(pull_to <= skb_headlen(skb));
845 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
846 }
847 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
848
849 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
850 rx->offset, rx->status, PAGE_SIZE);
851
852 skb_shinfo(nskb)->nr_frags = 0;
853 kfree_skb(nskb);
854 }
855
856 return cons;
857 }
858
859 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
860 {
861 struct iphdr *iph;
862 int err = -EPROTO;
863 int recalculate_partial_csum = 0;
864
865 /*
866 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
867 * peers can fail to set NETRXF_csum_blank when sending a GSO
868 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
869 * recalculate the partial checksum.
870 */
871 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
872 struct netfront_info *np = netdev_priv(dev);
873 np->rx_gso_checksum_fixup++;
874 skb->ip_summed = CHECKSUM_PARTIAL;
875 recalculate_partial_csum = 1;
876 }
877
878 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
879 if (skb->ip_summed != CHECKSUM_PARTIAL)
880 return 0;
881
882 if (skb->protocol != htons(ETH_P_IP))
883 goto out;
884
885 iph = (void *)skb->data;
886
887 switch (iph->protocol) {
888 case IPPROTO_TCP:
889 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
890 offsetof(struct tcphdr, check)))
891 goto out;
892
893 if (recalculate_partial_csum) {
894 struct tcphdr *tcph = tcp_hdr(skb);
895 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
896 skb->len - iph->ihl*4,
897 IPPROTO_TCP, 0);
898 }
899 break;
900 case IPPROTO_UDP:
901 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
902 offsetof(struct udphdr, check)))
903 goto out;
904
905 if (recalculate_partial_csum) {
906 struct udphdr *udph = udp_hdr(skb);
907 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
908 skb->len - iph->ihl*4,
909 IPPROTO_UDP, 0);
910 }
911 break;
912 default:
913 if (net_ratelimit())
914 pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
915 iph->protocol);
916 goto out;
917 }
918
919 err = 0;
920
921 out:
922 return err;
923 }
924
925 static int handle_incoming_queue(struct net_device *dev,
926 struct sk_buff_head *rxq)
927 {
928 struct netfront_info *np = netdev_priv(dev);
929 struct netfront_stats *stats = this_cpu_ptr(np->stats);
930 int packets_dropped = 0;
931 struct sk_buff *skb;
932
933 while ((skb = __skb_dequeue(rxq)) != NULL) {
934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
935
936 if (pull_to > skb_headlen(skb))
937 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
938
939 /* Ethernet work: Delayed to here as it peeks the header. */
940 skb->protocol = eth_type_trans(skb, dev);
941
942 if (checksum_setup(dev, skb)) {
943 kfree_skb(skb);
944 packets_dropped++;
945 dev->stats.rx_errors++;
946 continue;
947 }
948
949 u64_stats_update_begin(&stats->syncp);
950 stats->rx_packets++;
951 stats->rx_bytes += skb->len;
952 u64_stats_update_end(&stats->syncp);
953
954 /* Pass it up. */
955 napi_gro_receive(&np->napi, skb);
956 }
957
958 return packets_dropped;
959 }
960
961 static int xennet_poll(struct napi_struct *napi, int budget)
962 {
963 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
964 struct net_device *dev = np->netdev;
965 struct sk_buff *skb;
966 struct netfront_rx_info rinfo;
967 struct xen_netif_rx_response *rx = &rinfo.rx;
968 struct xen_netif_extra_info *extras = rinfo.extras;
969 RING_IDX i, rp;
970 int work_done;
971 struct sk_buff_head rxq;
972 struct sk_buff_head errq;
973 struct sk_buff_head tmpq;
974 unsigned long flags;
975 int err;
976
977 spin_lock(&np->rx_lock);
978
979 skb_queue_head_init(&rxq);
980 skb_queue_head_init(&errq);
981 skb_queue_head_init(&tmpq);
982
983 rp = np->rx.sring->rsp_prod;
984 rmb(); /* Ensure we see queued responses up to 'rp'. */
985
986 i = np->rx.rsp_cons;
987 work_done = 0;
988 while ((i != rp) && (work_done < budget)) {
989 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
990 memset(extras, 0, sizeof(rinfo.extras));
991
992 err = xennet_get_responses(np, &rinfo, rp, &tmpq);
993
994 if (unlikely(err)) {
995 err:
996 while ((skb = __skb_dequeue(&tmpq)))
997 __skb_queue_tail(&errq, skb);
998 dev->stats.rx_errors++;
999 i = np->rx.rsp_cons;
1000 continue;
1001 }
1002
1003 skb = __skb_dequeue(&tmpq);
1004
1005 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1006 struct xen_netif_extra_info *gso;
1007 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1008
1009 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1010 __skb_queue_head(&tmpq, skb);
1011 np->rx.rsp_cons += skb_queue_len(&tmpq);
1012 goto err;
1013 }
1014 }
1015
1016 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1017 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1018 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1019
1020 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1021 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1022 skb->data_len = rx->status;
1023 skb->len += rx->status;
1024
1025 i = xennet_fill_frags(np, skb, &tmpq);
1026
1027 if (rx->flags & XEN_NETRXF_csum_blank)
1028 skb->ip_summed = CHECKSUM_PARTIAL;
1029 else if (rx->flags & XEN_NETRXF_data_validated)
1030 skb->ip_summed = CHECKSUM_UNNECESSARY;
1031
1032 __skb_queue_tail(&rxq, skb);
1033
1034 np->rx.rsp_cons = ++i;
1035 work_done++;
1036 }
1037
1038 __skb_queue_purge(&errq);
1039
1040 work_done -= handle_incoming_queue(dev, &rxq);
1041
1042 /* If we get a callback with very few responses, reduce fill target. */
1043 /* NB. Note exponential increase, linear decrease. */
1044 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1045 ((3*np->rx_target) / 4)) &&
1046 (--np->rx_target < np->rx_min_target))
1047 np->rx_target = np->rx_min_target;
1048
1049 xennet_alloc_rx_buffers(dev);
1050
1051 if (work_done < budget) {
1052 int more_to_do = 0;
1053
1054 napi_gro_flush(napi, false);
1055
1056 local_irq_save(flags);
1057
1058 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1059 if (!more_to_do)
1060 __napi_complete(napi);
1061
1062 local_irq_restore(flags);
1063 }
1064
1065 spin_unlock(&np->rx_lock);
1066
1067 return work_done;
1068 }
1069
1070 static int xennet_change_mtu(struct net_device *dev, int mtu)
1071 {
1072 int max = xennet_can_sg(dev) ?
1073 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1074
1075 if (mtu > max)
1076 return -EINVAL;
1077 dev->mtu = mtu;
1078 return 0;
1079 }
1080
1081 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1082 struct rtnl_link_stats64 *tot)
1083 {
1084 struct netfront_info *np = netdev_priv(dev);
1085 int cpu;
1086
1087 for_each_possible_cpu(cpu) {
1088 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1089 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1090 unsigned int start;
1091
1092 do {
1093 start = u64_stats_fetch_begin_bh(&stats->syncp);
1094
1095 rx_packets = stats->rx_packets;
1096 tx_packets = stats->tx_packets;
1097 rx_bytes = stats->rx_bytes;
1098 tx_bytes = stats->tx_bytes;
1099 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1100
1101 tot->rx_packets += rx_packets;
1102 tot->tx_packets += tx_packets;
1103 tot->rx_bytes += rx_bytes;
1104 tot->tx_bytes += tx_bytes;
1105 }
1106
1107 tot->rx_errors = dev->stats.rx_errors;
1108 tot->tx_dropped = dev->stats.tx_dropped;
1109
1110 return tot;
1111 }
1112
1113 static void xennet_release_tx_bufs(struct netfront_info *np)
1114 {
1115 struct sk_buff *skb;
1116 int i;
1117
1118 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1119 /* Skip over entries which are actually freelist references */
1120 if (skb_entry_is_link(&np->tx_skbs[i]))
1121 continue;
1122
1123 skb = np->tx_skbs[i].skb;
1124 gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1125 GNTMAP_readonly);
1126 gnttab_release_grant_reference(&np->gref_tx_head,
1127 np->grant_tx_ref[i]);
1128 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1129 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1130 dev_kfree_skb_irq(skb);
1131 }
1132 }
1133
1134 static void xennet_release_rx_bufs(struct netfront_info *np)
1135 {
1136 struct mmu_update *mmu = np->rx_mmu;
1137 struct multicall_entry *mcl = np->rx_mcl;
1138 struct sk_buff_head free_list;
1139 struct sk_buff *skb;
1140 unsigned long mfn;
1141 int xfer = 0, noxfer = 0, unused = 0;
1142 int id, ref;
1143
1144 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1145 __func__);
1146 return;
1147
1148 skb_queue_head_init(&free_list);
1149
1150 spin_lock_bh(&np->rx_lock);
1151
1152 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1153 ref = np->grant_rx_ref[id];
1154 if (ref == GRANT_INVALID_REF) {
1155 unused++;
1156 continue;
1157 }
1158
1159 skb = np->rx_skbs[id];
1160 mfn = gnttab_end_foreign_transfer_ref(ref);
1161 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1162 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1163
1164 if (0 == mfn) {
1165 skb_shinfo(skb)->nr_frags = 0;
1166 dev_kfree_skb(skb);
1167 noxfer++;
1168 continue;
1169 }
1170
1171 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1172 /* Remap the page. */
1173 const struct page *page =
1174 skb_frag_page(&skb_shinfo(skb)->frags[0]);
1175 unsigned long pfn = page_to_pfn(page);
1176 void *vaddr = page_address(page);
1177
1178 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1179 mfn_pte(mfn, PAGE_KERNEL),
1180 0);
1181 mcl++;
1182 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1183 | MMU_MACHPHYS_UPDATE;
1184 mmu->val = pfn;
1185 mmu++;
1186
1187 set_phys_to_machine(pfn, mfn);
1188 }
1189 __skb_queue_tail(&free_list, skb);
1190 xfer++;
1191 }
1192
1193 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1194 __func__, xfer, noxfer, unused);
1195
1196 if (xfer) {
1197 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1198 /* Do all the remapping work and M2P updates. */
1199 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1200 NULL, DOMID_SELF);
1201 mcl++;
1202 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1203 }
1204 }
1205
1206 __skb_queue_purge(&free_list);
1207
1208 spin_unlock_bh(&np->rx_lock);
1209 }
1210
1211 static void xennet_uninit(struct net_device *dev)
1212 {
1213 struct netfront_info *np = netdev_priv(dev);
1214 xennet_release_tx_bufs(np);
1215 xennet_release_rx_bufs(np);
1216 gnttab_free_grant_references(np->gref_tx_head);
1217 gnttab_free_grant_references(np->gref_rx_head);
1218 }
1219
1220 static netdev_features_t xennet_fix_features(struct net_device *dev,
1221 netdev_features_t features)
1222 {
1223 struct netfront_info *np = netdev_priv(dev);
1224 int val;
1225
1226 if (features & NETIF_F_SG) {
1227 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1228 "%d", &val) < 0)
1229 val = 0;
1230
1231 if (!val)
1232 features &= ~NETIF_F_SG;
1233 }
1234
1235 if (features & NETIF_F_TSO) {
1236 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1237 "feature-gso-tcpv4", "%d", &val) < 0)
1238 val = 0;
1239
1240 if (!val)
1241 features &= ~NETIF_F_TSO;
1242 }
1243
1244 return features;
1245 }
1246
1247 static int xennet_set_features(struct net_device *dev,
1248 netdev_features_t features)
1249 {
1250 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1251 netdev_info(dev, "Reducing MTU because no SG offload");
1252 dev->mtu = ETH_DATA_LEN;
1253 }
1254
1255 return 0;
1256 }
1257
1258 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1259 {
1260 struct netfront_info *np = dev_id;
1261 struct net_device *dev = np->netdev;
1262 unsigned long flags;
1263
1264 spin_lock_irqsave(&np->tx_lock, flags);
1265 xennet_tx_buf_gc(dev);
1266 spin_unlock_irqrestore(&np->tx_lock, flags);
1267
1268 return IRQ_HANDLED;
1269 }
1270
1271 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1272 {
1273 struct netfront_info *np = dev_id;
1274 struct net_device *dev = np->netdev;
1275
1276 if (likely(netif_carrier_ok(dev) &&
1277 RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1278 napi_schedule(&np->napi);
1279
1280 return IRQ_HANDLED;
1281 }
1282
1283 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1284 {
1285 xennet_tx_interrupt(irq, dev_id);
1286 xennet_rx_interrupt(irq, dev_id);
1287 return IRQ_HANDLED;
1288 }
1289
1290 #ifdef CONFIG_NET_POLL_CONTROLLER
1291 static void xennet_poll_controller(struct net_device *dev)
1292 {
1293 xennet_interrupt(0, dev);
1294 }
1295 #endif
1296
1297 static const struct net_device_ops xennet_netdev_ops = {
1298 .ndo_open = xennet_open,
1299 .ndo_uninit = xennet_uninit,
1300 .ndo_stop = xennet_close,
1301 .ndo_start_xmit = xennet_start_xmit,
1302 .ndo_change_mtu = xennet_change_mtu,
1303 .ndo_get_stats64 = xennet_get_stats64,
1304 .ndo_set_mac_address = eth_mac_addr,
1305 .ndo_validate_addr = eth_validate_addr,
1306 .ndo_fix_features = xennet_fix_features,
1307 .ndo_set_features = xennet_set_features,
1308 #ifdef CONFIG_NET_POLL_CONTROLLER
1309 .ndo_poll_controller = xennet_poll_controller,
1310 #endif
1311 };
1312
1313 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1314 {
1315 int i, err;
1316 struct net_device *netdev;
1317 struct netfront_info *np;
1318
1319 netdev = alloc_etherdev(sizeof(struct netfront_info));
1320 if (!netdev)
1321 return ERR_PTR(-ENOMEM);
1322
1323 np = netdev_priv(netdev);
1324 np->xbdev = dev;
1325
1326 spin_lock_init(&np->tx_lock);
1327 spin_lock_init(&np->rx_lock);
1328
1329 skb_queue_head_init(&np->rx_batch);
1330 np->rx_target = RX_DFL_MIN_TARGET;
1331 np->rx_min_target = RX_DFL_MIN_TARGET;
1332 np->rx_max_target = RX_MAX_TARGET;
1333
1334 init_timer(&np->rx_refill_timer);
1335 np->rx_refill_timer.data = (unsigned long)netdev;
1336 np->rx_refill_timer.function = rx_refill_timeout;
1337
1338 err = -ENOMEM;
1339 np->stats = alloc_percpu(struct netfront_stats);
1340 if (np->stats == NULL)
1341 goto exit;
1342
1343 for_each_possible_cpu(i) {
1344 struct netfront_stats *xen_nf_stats;
1345 xen_nf_stats = per_cpu_ptr(np->stats, i);
1346 u64_stats_init(&xen_nf_stats->syncp);
1347 }
1348
1349 /* Initialise tx_skbs as a free chain containing every entry. */
1350 np->tx_skb_freelist = 0;
1351 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1352 skb_entry_set_link(&np->tx_skbs[i], i+1);
1353 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1354 }
1355
1356 /* Clear out rx_skbs */
1357 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1358 np->rx_skbs[i] = NULL;
1359 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1360 }
1361
1362 /* A grant for every tx ring slot */
1363 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1364 &np->gref_tx_head) < 0) {
1365 pr_alert("can't alloc tx grant refs\n");
1366 err = -ENOMEM;
1367 goto exit_free_stats;
1368 }
1369 /* A grant for every rx ring slot */
1370 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1371 &np->gref_rx_head) < 0) {
1372 pr_alert("can't alloc rx grant refs\n");
1373 err = -ENOMEM;
1374 goto exit_free_tx;
1375 }
1376
1377 netdev->netdev_ops = &xennet_netdev_ops;
1378
1379 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1380 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1381 NETIF_F_GSO_ROBUST;
1382 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
1383
1384 /*
1385 * Assume that all hw features are available for now. This set
1386 * will be adjusted by the call to netdev_update_features() in
1387 * xennet_connect() which is the earliest point where we can
1388 * negotiate with the backend regarding supported features.
1389 */
1390 netdev->features |= netdev->hw_features;
1391
1392 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1393 SET_NETDEV_DEV(netdev, &dev->dev);
1394
1395 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1396
1397 np->netdev = netdev;
1398
1399 netif_carrier_off(netdev);
1400
1401 return netdev;
1402
1403 exit_free_tx:
1404 gnttab_free_grant_references(np->gref_tx_head);
1405 exit_free_stats:
1406 free_percpu(np->stats);
1407 exit:
1408 free_netdev(netdev);
1409 return ERR_PTR(err);
1410 }
1411
1412 /**
1413 * Entry point to this code when a new device is created. Allocate the basic
1414 * structures and the ring buffers for communication with the backend, and
1415 * inform the backend of the appropriate details for those.
1416 */
1417 static int netfront_probe(struct xenbus_device *dev,
1418 const struct xenbus_device_id *id)
1419 {
1420 int err;
1421 struct net_device *netdev;
1422 struct netfront_info *info;
1423
1424 netdev = xennet_create_dev(dev);
1425 if (IS_ERR(netdev)) {
1426 err = PTR_ERR(netdev);
1427 xenbus_dev_fatal(dev, err, "creating netdev");
1428 return err;
1429 }
1430
1431 info = netdev_priv(netdev);
1432 dev_set_drvdata(&dev->dev, info);
1433
1434 err = register_netdev(info->netdev);
1435 if (err) {
1436 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1437 goto fail;
1438 }
1439
1440 err = xennet_sysfs_addif(info->netdev);
1441 if (err) {
1442 unregister_netdev(info->netdev);
1443 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1444 goto fail;
1445 }
1446
1447 return 0;
1448
1449 fail:
1450 free_netdev(netdev);
1451 dev_set_drvdata(&dev->dev, NULL);
1452 return err;
1453 }
1454
1455 static void xennet_end_access(int ref, void *page)
1456 {
1457 /* This frees the page as a side-effect */
1458 if (ref != GRANT_INVALID_REF)
1459 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1460 }
1461
1462 static void xennet_disconnect_backend(struct netfront_info *info)
1463 {
1464 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1465 spin_lock_bh(&info->rx_lock);
1466 spin_lock_irq(&info->tx_lock);
1467 netif_carrier_off(info->netdev);
1468 spin_unlock_irq(&info->tx_lock);
1469 spin_unlock_bh(&info->rx_lock);
1470
1471 if (info->tx_irq && (info->tx_irq == info->rx_irq))
1472 unbind_from_irqhandler(info->tx_irq, info);
1473 if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1474 unbind_from_irqhandler(info->tx_irq, info);
1475 unbind_from_irqhandler(info->rx_irq, info);
1476 }
1477 info->tx_evtchn = info->rx_evtchn = 0;
1478 info->tx_irq = info->rx_irq = 0;
1479
1480 /* End access and free the pages */
1481 xennet_end_access(info->tx_ring_ref, info->tx.sring);
1482 xennet_end_access(info->rx_ring_ref, info->rx.sring);
1483
1484 info->tx_ring_ref = GRANT_INVALID_REF;
1485 info->rx_ring_ref = GRANT_INVALID_REF;
1486 info->tx.sring = NULL;
1487 info->rx.sring = NULL;
1488 }
1489
1490 /**
1491 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1492 * driver restart. We tear down our netif structure and recreate it, but
1493 * leave the device-layer structures intact so that this is transparent to the
1494 * rest of the kernel.
1495 */
1496 static int netfront_resume(struct xenbus_device *dev)
1497 {
1498 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1499
1500 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1501
1502 xennet_disconnect_backend(info);
1503 return 0;
1504 }
1505
1506 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1507 {
1508 char *s, *e, *macstr;
1509 int i;
1510
1511 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1512 if (IS_ERR(macstr))
1513 return PTR_ERR(macstr);
1514
1515 for (i = 0; i < ETH_ALEN; i++) {
1516 mac[i] = simple_strtoul(s, &e, 16);
1517 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1518 kfree(macstr);
1519 return -ENOENT;
1520 }
1521 s = e+1;
1522 }
1523
1524 kfree(macstr);
1525 return 0;
1526 }
1527
1528 static int setup_netfront_single(struct netfront_info *info)
1529 {
1530 int err;
1531
1532 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1533 if (err < 0)
1534 goto fail;
1535
1536 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1537 xennet_interrupt,
1538 0, info->netdev->name, info);
1539 if (err < 0)
1540 goto bind_fail;
1541 info->rx_evtchn = info->tx_evtchn;
1542 info->rx_irq = info->tx_irq = err;
1543
1544 return 0;
1545
1546 bind_fail:
1547 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1548 info->tx_evtchn = 0;
1549 fail:
1550 return err;
1551 }
1552
1553 static int setup_netfront_split(struct netfront_info *info)
1554 {
1555 int err;
1556
1557 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1558 if (err < 0)
1559 goto fail;
1560 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1561 if (err < 0)
1562 goto alloc_rx_evtchn_fail;
1563
1564 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1565 "%s-tx", info->netdev->name);
1566 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1567 xennet_tx_interrupt,
1568 0, info->tx_irq_name, info);
1569 if (err < 0)
1570 goto bind_tx_fail;
1571 info->tx_irq = err;
1572
1573 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1574 "%s-rx", info->netdev->name);
1575 err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1576 xennet_rx_interrupt,
1577 0, info->rx_irq_name, info);
1578 if (err < 0)
1579 goto bind_rx_fail;
1580 info->rx_irq = err;
1581
1582 return 0;
1583
1584 bind_rx_fail:
1585 unbind_from_irqhandler(info->tx_irq, info);
1586 info->tx_irq = 0;
1587 bind_tx_fail:
1588 xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1589 info->rx_evtchn = 0;
1590 alloc_rx_evtchn_fail:
1591 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1592 info->tx_evtchn = 0;
1593 fail:
1594 return err;
1595 }
1596
1597 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1598 {
1599 struct xen_netif_tx_sring *txs;
1600 struct xen_netif_rx_sring *rxs;
1601 int err;
1602 struct net_device *netdev = info->netdev;
1603 unsigned int feature_split_evtchn;
1604
1605 info->tx_ring_ref = GRANT_INVALID_REF;
1606 info->rx_ring_ref = GRANT_INVALID_REF;
1607 info->rx.sring = NULL;
1608 info->tx.sring = NULL;
1609 netdev->irq = 0;
1610
1611 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1612 "feature-split-event-channels", "%u",
1613 &feature_split_evtchn);
1614 if (err < 0)
1615 feature_split_evtchn = 0;
1616
1617 err = xen_net_read_mac(dev, netdev->dev_addr);
1618 if (err) {
1619 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1620 goto fail;
1621 }
1622
1623 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1624 if (!txs) {
1625 err = -ENOMEM;
1626 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1627 goto fail;
1628 }
1629 SHARED_RING_INIT(txs);
1630 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1631
1632 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1633 if (err < 0)
1634 goto grant_tx_ring_fail;
1635
1636 info->tx_ring_ref = err;
1637 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1638 if (!rxs) {
1639 err = -ENOMEM;
1640 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1641 goto alloc_rx_ring_fail;
1642 }
1643 SHARED_RING_INIT(rxs);
1644 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1645
1646 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1647 if (err < 0)
1648 goto grant_rx_ring_fail;
1649 info->rx_ring_ref = err;
1650
1651 if (feature_split_evtchn)
1652 err = setup_netfront_split(info);
1653 /* setup single event channel if
1654 * a) feature-split-event-channels == 0
1655 * b) feature-split-event-channels == 1 but failed to setup
1656 */
1657 if (!feature_split_evtchn || (feature_split_evtchn && err))
1658 err = setup_netfront_single(info);
1659
1660 if (err)
1661 goto alloc_evtchn_fail;
1662
1663 return 0;
1664
1665 /* If we fail to setup netfront, it is safe to just revoke access to
1666 * granted pages because backend is not accessing it at this point.
1667 */
1668 alloc_evtchn_fail:
1669 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1670 grant_rx_ring_fail:
1671 free_page((unsigned long)rxs);
1672 alloc_rx_ring_fail:
1673 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1674 grant_tx_ring_fail:
1675 free_page((unsigned long)txs);
1676 fail:
1677 return err;
1678 }
1679
1680 /* Common code used when first setting up, and when resuming. */
1681 static int talk_to_netback(struct xenbus_device *dev,
1682 struct netfront_info *info)
1683 {
1684 const char *message;
1685 struct xenbus_transaction xbt;
1686 int err;
1687
1688 /* Create shared ring, alloc event channel. */
1689 err = setup_netfront(dev, info);
1690 if (err)
1691 goto out;
1692
1693 again:
1694 err = xenbus_transaction_start(&xbt);
1695 if (err) {
1696 xenbus_dev_fatal(dev, err, "starting transaction");
1697 goto destroy_ring;
1698 }
1699
1700 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1701 info->tx_ring_ref);
1702 if (err) {
1703 message = "writing tx ring-ref";
1704 goto abort_transaction;
1705 }
1706 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1707 info->rx_ring_ref);
1708 if (err) {
1709 message = "writing rx ring-ref";
1710 goto abort_transaction;
1711 }
1712
1713 if (info->tx_evtchn == info->rx_evtchn) {
1714 err = xenbus_printf(xbt, dev->nodename,
1715 "event-channel", "%u", info->tx_evtchn);
1716 if (err) {
1717 message = "writing event-channel";
1718 goto abort_transaction;
1719 }
1720 } else {
1721 err = xenbus_printf(xbt, dev->nodename,
1722 "event-channel-tx", "%u", info->tx_evtchn);
1723 if (err) {
1724 message = "writing event-channel-tx";
1725 goto abort_transaction;
1726 }
1727 err = xenbus_printf(xbt, dev->nodename,
1728 "event-channel-rx", "%u", info->rx_evtchn);
1729 if (err) {
1730 message = "writing event-channel-rx";
1731 goto abort_transaction;
1732 }
1733 }
1734
1735 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1736 1);
1737 if (err) {
1738 message = "writing request-rx-copy";
1739 goto abort_transaction;
1740 }
1741
1742 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1743 if (err) {
1744 message = "writing feature-rx-notify";
1745 goto abort_transaction;
1746 }
1747
1748 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1749 if (err) {
1750 message = "writing feature-sg";
1751 goto abort_transaction;
1752 }
1753
1754 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1755 if (err) {
1756 message = "writing feature-gso-tcpv4";
1757 goto abort_transaction;
1758 }
1759
1760 err = xenbus_transaction_end(xbt, 0);
1761 if (err) {
1762 if (err == -EAGAIN)
1763 goto again;
1764 xenbus_dev_fatal(dev, err, "completing transaction");
1765 goto destroy_ring;
1766 }
1767
1768 return 0;
1769
1770 abort_transaction:
1771 xenbus_transaction_end(xbt, 1);
1772 xenbus_dev_fatal(dev, err, "%s", message);
1773 destroy_ring:
1774 xennet_disconnect_backend(info);
1775 out:
1776 return err;
1777 }
1778
1779 static int xennet_connect(struct net_device *dev)
1780 {
1781 struct netfront_info *np = netdev_priv(dev);
1782 int i, requeue_idx, err;
1783 struct sk_buff *skb;
1784 grant_ref_t ref;
1785 struct xen_netif_rx_request *req;
1786 unsigned int feature_rx_copy;
1787
1788 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1789 "feature-rx-copy", "%u", &feature_rx_copy);
1790 if (err != 1)
1791 feature_rx_copy = 0;
1792
1793 if (!feature_rx_copy) {
1794 dev_info(&dev->dev,
1795 "backend does not support copying receive path\n");
1796 return -ENODEV;
1797 }
1798
1799 err = talk_to_netback(np->xbdev, np);
1800 if (err)
1801 return err;
1802
1803 rtnl_lock();
1804 netdev_update_features(dev);
1805 rtnl_unlock();
1806
1807 spin_lock_bh(&np->rx_lock);
1808 spin_lock_irq(&np->tx_lock);
1809
1810 /* Step 1: Discard all pending TX packet fragments. */
1811 xennet_release_tx_bufs(np);
1812
1813 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1814 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1815 skb_frag_t *frag;
1816 const struct page *page;
1817 if (!np->rx_skbs[i])
1818 continue;
1819
1820 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1821 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1822 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1823
1824 frag = &skb_shinfo(skb)->frags[0];
1825 page = skb_frag_page(frag);
1826 gnttab_grant_foreign_access_ref(
1827 ref, np->xbdev->otherend_id,
1828 pfn_to_mfn(page_to_pfn(page)),
1829 0);
1830 req->gref = ref;
1831 req->id = requeue_idx;
1832
1833 requeue_idx++;
1834 }
1835
1836 np->rx.req_prod_pvt = requeue_idx;
1837
1838 /*
1839 * Step 3: All public and private state should now be sane. Get
1840 * ready to start sending and receiving packets and give the driver
1841 * domain a kick because we've probably just requeued some
1842 * packets.
1843 */
1844 netif_carrier_on(np->netdev);
1845 notify_remote_via_irq(np->tx_irq);
1846 if (np->tx_irq != np->rx_irq)
1847 notify_remote_via_irq(np->rx_irq);
1848 xennet_tx_buf_gc(dev);
1849 xennet_alloc_rx_buffers(dev);
1850
1851 spin_unlock_irq(&np->tx_lock);
1852 spin_unlock_bh(&np->rx_lock);
1853
1854 return 0;
1855 }
1856
1857 /**
1858 * Callback received when the backend's state changes.
1859 */
1860 static void netback_changed(struct xenbus_device *dev,
1861 enum xenbus_state backend_state)
1862 {
1863 struct netfront_info *np = dev_get_drvdata(&dev->dev);
1864 struct net_device *netdev = np->netdev;
1865
1866 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1867
1868 switch (backend_state) {
1869 case XenbusStateInitialising:
1870 case XenbusStateInitialised:
1871 case XenbusStateReconfiguring:
1872 case XenbusStateReconfigured:
1873 case XenbusStateUnknown:
1874 case XenbusStateClosed:
1875 break;
1876
1877 case XenbusStateInitWait:
1878 if (dev->state != XenbusStateInitialising)
1879 break;
1880 if (xennet_connect(netdev) != 0)
1881 break;
1882 xenbus_switch_state(dev, XenbusStateConnected);
1883 break;
1884
1885 case XenbusStateConnected:
1886 netdev_notify_peers(netdev);
1887 break;
1888
1889 case XenbusStateClosing:
1890 xenbus_frontend_closed(dev);
1891 break;
1892 }
1893 }
1894
1895 static const struct xennet_stat {
1896 char name[ETH_GSTRING_LEN];
1897 u16 offset;
1898 } xennet_stats[] = {
1899 {
1900 "rx_gso_checksum_fixup",
1901 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1902 },
1903 };
1904
1905 static int xennet_get_sset_count(struct net_device *dev, int string_set)
1906 {
1907 switch (string_set) {
1908 case ETH_SS_STATS:
1909 return ARRAY_SIZE(xennet_stats);
1910 default:
1911 return -EINVAL;
1912 }
1913 }
1914
1915 static void xennet_get_ethtool_stats(struct net_device *dev,
1916 struct ethtool_stats *stats, u64 * data)
1917 {
1918 void *np = netdev_priv(dev);
1919 int i;
1920
1921 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1922 data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1923 }
1924
1925 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1926 {
1927 int i;
1928
1929 switch (stringset) {
1930 case ETH_SS_STATS:
1931 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1932 memcpy(data + i * ETH_GSTRING_LEN,
1933 xennet_stats[i].name, ETH_GSTRING_LEN);
1934 break;
1935 }
1936 }
1937
1938 static const struct ethtool_ops xennet_ethtool_ops =
1939 {
1940 .get_link = ethtool_op_get_link,
1941
1942 .get_sset_count = xennet_get_sset_count,
1943 .get_ethtool_stats = xennet_get_ethtool_stats,
1944 .get_strings = xennet_get_strings,
1945 };
1946
1947 #ifdef CONFIG_SYSFS
1948 static ssize_t show_rxbuf_min(struct device *dev,
1949 struct device_attribute *attr, char *buf)
1950 {
1951 struct net_device *netdev = to_net_dev(dev);
1952 struct netfront_info *info = netdev_priv(netdev);
1953
1954 return sprintf(buf, "%u\n", info->rx_min_target);
1955 }
1956
1957 static ssize_t store_rxbuf_min(struct device *dev,
1958 struct device_attribute *attr,
1959 const char *buf, size_t len)
1960 {
1961 struct net_device *netdev = to_net_dev(dev);
1962 struct netfront_info *np = netdev_priv(netdev);
1963 char *endp;
1964 unsigned long target;
1965
1966 if (!capable(CAP_NET_ADMIN))
1967 return -EPERM;
1968
1969 target = simple_strtoul(buf, &endp, 0);
1970 if (endp == buf)
1971 return -EBADMSG;
1972
1973 if (target < RX_MIN_TARGET)
1974 target = RX_MIN_TARGET;
1975 if (target > RX_MAX_TARGET)
1976 target = RX_MAX_TARGET;
1977
1978 spin_lock_bh(&np->rx_lock);
1979 if (target > np->rx_max_target)
1980 np->rx_max_target = target;
1981 np->rx_min_target = target;
1982 if (target > np->rx_target)
1983 np->rx_target = target;
1984
1985 xennet_alloc_rx_buffers(netdev);
1986
1987 spin_unlock_bh(&np->rx_lock);
1988 return len;
1989 }
1990
1991 static ssize_t show_rxbuf_max(struct device *dev,
1992 struct device_attribute *attr, char *buf)
1993 {
1994 struct net_device *netdev = to_net_dev(dev);
1995 struct netfront_info *info = netdev_priv(netdev);
1996
1997 return sprintf(buf, "%u\n", info->rx_max_target);
1998 }
1999
2000 static ssize_t store_rxbuf_max(struct device *dev,
2001 struct device_attribute *attr,
2002 const char *buf, size_t len)
2003 {
2004 struct net_device *netdev = to_net_dev(dev);
2005 struct netfront_info *np = netdev_priv(netdev);
2006 char *endp;
2007 unsigned long target;
2008
2009 if (!capable(CAP_NET_ADMIN))
2010 return -EPERM;
2011
2012 target = simple_strtoul(buf, &endp, 0);
2013 if (endp == buf)
2014 return -EBADMSG;
2015
2016 if (target < RX_MIN_TARGET)
2017 target = RX_MIN_TARGET;
2018 if (target > RX_MAX_TARGET)
2019 target = RX_MAX_TARGET;
2020
2021 spin_lock_bh(&np->rx_lock);
2022 if (target < np->rx_min_target)
2023 np->rx_min_target = target;
2024 np->rx_max_target = target;
2025 if (target < np->rx_target)
2026 np->rx_target = target;
2027
2028 xennet_alloc_rx_buffers(netdev);
2029
2030 spin_unlock_bh(&np->rx_lock);
2031 return len;
2032 }
2033
2034 static ssize_t show_rxbuf_cur(struct device *dev,
2035 struct device_attribute *attr, char *buf)
2036 {
2037 struct net_device *netdev = to_net_dev(dev);
2038 struct netfront_info *info = netdev_priv(netdev);
2039
2040 return sprintf(buf, "%u\n", info->rx_target);
2041 }
2042
2043 static struct device_attribute xennet_attrs[] = {
2044 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2045 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2046 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2047 };
2048
2049 static int xennet_sysfs_addif(struct net_device *netdev)
2050 {
2051 int i;
2052 int err;
2053
2054 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2055 err = device_create_file(&netdev->dev,
2056 &xennet_attrs[i]);
2057 if (err)
2058 goto fail;
2059 }
2060 return 0;
2061
2062 fail:
2063 while (--i >= 0)
2064 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2065 return err;
2066 }
2067
2068 static void xennet_sysfs_delif(struct net_device *netdev)
2069 {
2070 int i;
2071
2072 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2073 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2074 }
2075
2076 #endif /* CONFIG_SYSFS */
2077
2078 static const struct xenbus_device_id netfront_ids[] = {
2079 { "vif" },
2080 { "" }
2081 };
2082
2083
2084 static int xennet_remove(struct xenbus_device *dev)
2085 {
2086 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2087
2088 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2089
2090 xennet_disconnect_backend(info);
2091
2092 xennet_sysfs_delif(info->netdev);
2093
2094 unregister_netdev(info->netdev);
2095
2096 del_timer_sync(&info->rx_refill_timer);
2097
2098 free_percpu(info->stats);
2099
2100 free_netdev(info->netdev);
2101
2102 return 0;
2103 }
2104
2105 static DEFINE_XENBUS_DRIVER(netfront, ,
2106 .probe = netfront_probe,
2107 .remove = xennet_remove,
2108 .resume = netfront_resume,
2109 .otherend_changed = netback_changed,
2110 );
2111
2112 static int __init netif_init(void)
2113 {
2114 if (!xen_domain())
2115 return -ENODEV;
2116
2117 if (xen_hvm_domain() && !xen_platform_pci_unplug)
2118 return -ENODEV;
2119
2120 pr_info("Initialising Xen virtual ethernet driver\n");
2121
2122 return xenbus_register_frontend(&netfront_driver);
2123 }
2124 module_init(netif_init);
2125
2126
2127 static void __exit netif_exit(void)
2128 {
2129 xenbus_unregister_driver(&netfront_driver);
2130 }
2131 module_exit(netif_exit);
2132
2133 MODULE_DESCRIPTION("Xen virtual network device frontend");
2134 MODULE_LICENSE("GPL");
2135 MODULE_ALIAS("xen:vif");
2136 MODULE_ALIAS("xennet");