2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
45 #include <linux/slab.h>
48 #include <asm/xen/page.h>
50 #include <xen/xenbus.h>
51 #include <xen/events.h>
53 #include <xen/platform_pci.h>
54 #include <xen/grant_table.h>
56 #include <xen/interface/io/netif.h>
57 #include <xen/interface/memory.h>
58 #include <xen/interface/grant_table.h>
60 static const struct ethtool_ops xennet_ethtool_ops
;
66 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
68 #define RX_COPY_THRESHOLD 256
70 #define GRANT_INVALID_REF 0
72 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
73 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
74 #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
76 struct netfront_stats
{
81 struct u64_stats_sync syncp
;
84 struct netfront_info
{
85 struct list_head list
;
86 struct net_device
*netdev
;
88 struct napi_struct napi
;
90 /* Split event channels support, tx_* == rx_* when using
91 * single event channel.
93 unsigned int tx_evtchn
, rx_evtchn
;
94 unsigned int tx_irq
, rx_irq
;
95 /* Only used when split event channels support is enabled */
96 char tx_irq_name
[IFNAMSIZ
+4]; /* DEVNAME-tx */
97 char rx_irq_name
[IFNAMSIZ
+4]; /* DEVNAME-rx */
99 struct xenbus_device
*xbdev
;
102 struct xen_netif_tx_front_ring tx
;
106 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
107 * are linked from tx_skb_freelist through skb_entry.link.
109 * NB. Freelist index entries are always going to be less than
110 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
111 * greater than PAGE_OFFSET: we use this property to distinguish
117 } tx_skbs
[NET_TX_RING_SIZE
];
118 grant_ref_t gref_tx_head
;
119 grant_ref_t grant_tx_ref
[NET_TX_RING_SIZE
];
120 unsigned tx_skb_freelist
;
122 spinlock_t rx_lock ____cacheline_aligned_in_smp
;
123 struct xen_netif_rx_front_ring rx
;
126 /* Receive-ring batched refills. */
127 #define RX_MIN_TARGET 8
128 #define RX_DFL_MIN_TARGET 64
129 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
130 unsigned rx_min_target
, rx_max_target
, rx_target
;
131 struct sk_buff_head rx_batch
;
133 struct timer_list rx_refill_timer
;
135 struct sk_buff
*rx_skbs
[NET_RX_RING_SIZE
];
136 grant_ref_t gref_rx_head
;
137 grant_ref_t grant_rx_ref
[NET_RX_RING_SIZE
];
139 unsigned long rx_pfn_array
[NET_RX_RING_SIZE
];
140 struct multicall_entry rx_mcl
[NET_RX_RING_SIZE
+1];
141 struct mmu_update rx_mmu
[NET_RX_RING_SIZE
];
144 struct netfront_stats __percpu
*stats
;
146 unsigned long rx_gso_checksum_fixup
;
149 struct netfront_rx_info
{
150 struct xen_netif_rx_response rx
;
151 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
- 1];
154 static void skb_entry_set_link(union skb_entry
*list
, unsigned short id
)
159 static int skb_entry_is_link(const union skb_entry
*list
)
161 BUILD_BUG_ON(sizeof(list
->skb
) != sizeof(list
->link
));
162 return (unsigned long)list
->skb
< PAGE_OFFSET
;
166 * Access macros for acquiring freeing slots in tx_skbs[].
169 static void add_id_to_freelist(unsigned *head
, union skb_entry
*list
,
172 skb_entry_set_link(&list
[id
], *head
);
176 static unsigned short get_id_from_freelist(unsigned *head
,
177 union skb_entry
*list
)
179 unsigned int id
= *head
;
180 *head
= list
[id
].link
;
184 static int xennet_rxidx(RING_IDX idx
)
186 return idx
& (NET_RX_RING_SIZE
- 1);
189 static struct sk_buff
*xennet_get_rx_skb(struct netfront_info
*np
,
192 int i
= xennet_rxidx(ri
);
193 struct sk_buff
*skb
= np
->rx_skbs
[i
];
194 np
->rx_skbs
[i
] = NULL
;
198 static grant_ref_t
xennet_get_rx_ref(struct netfront_info
*np
,
201 int i
= xennet_rxidx(ri
);
202 grant_ref_t ref
= np
->grant_rx_ref
[i
];
203 np
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
208 static int xennet_sysfs_addif(struct net_device
*netdev
);
209 static void xennet_sysfs_delif(struct net_device
*netdev
);
210 #else /* !CONFIG_SYSFS */
211 #define xennet_sysfs_addif(dev) (0)
212 #define xennet_sysfs_delif(dev) do { } while (0)
215 static bool xennet_can_sg(struct net_device
*dev
)
217 return dev
->features
& NETIF_F_SG
;
221 static void rx_refill_timeout(unsigned long data
)
223 struct net_device
*dev
= (struct net_device
*)data
;
224 struct netfront_info
*np
= netdev_priv(dev
);
225 napi_schedule(&np
->napi
);
228 static int netfront_tx_slot_available(struct netfront_info
*np
)
230 return (np
->tx
.req_prod_pvt
- np
->tx
.rsp_cons
) <
231 (TX_MAX_TARGET
- MAX_SKB_FRAGS
- 2);
234 static void xennet_maybe_wake_tx(struct net_device
*dev
)
236 struct netfront_info
*np
= netdev_priv(dev
);
238 if (unlikely(netif_queue_stopped(dev
)) &&
239 netfront_tx_slot_available(np
) &&
240 likely(netif_running(dev
)))
241 netif_wake_queue(dev
);
244 static void xennet_alloc_rx_buffers(struct net_device
*dev
)
247 struct netfront_info
*np
= netdev_priv(dev
);
250 int i
, batch_target
, notify
;
251 RING_IDX req_prod
= np
->rx
.req_prod_pvt
;
255 struct xen_netif_rx_request
*req
;
257 if (unlikely(!netif_carrier_ok(dev
)))
261 * Allocate skbuffs greedily, even though we batch updates to the
262 * receive ring. This creates a less bursty demand on the memory
263 * allocator, so should reduce the chance of failed allocation requests
264 * both for ourself and for other kernel subsystems.
266 batch_target
= np
->rx_target
- (req_prod
- np
->rx
.rsp_cons
);
267 for (i
= skb_queue_len(&np
->rx_batch
); i
< batch_target
; i
++) {
268 skb
= __netdev_alloc_skb(dev
, RX_COPY_THRESHOLD
+ NET_IP_ALIGN
,
269 GFP_ATOMIC
| __GFP_NOWARN
);
273 /* Align ip header to a 16 bytes boundary */
274 skb_reserve(skb
, NET_IP_ALIGN
);
276 page
= alloc_page(GFP_ATOMIC
| __GFP_NOWARN
);
280 /* Could not allocate any skbuffs. Try again later. */
281 mod_timer(&np
->rx_refill_timer
,
284 /* Any skbuffs queued for refill? Force them out. */
290 skb_add_rx_frag(skb
, 0, page
, 0, 0, PAGE_SIZE
);
291 __skb_queue_tail(&np
->rx_batch
, skb
);
294 /* Is the batch large enough to be worthwhile? */
295 if (i
< (np
->rx_target
/2)) {
296 if (req_prod
> np
->rx
.sring
->req_prod
)
301 /* Adjust our fill target if we risked running out of buffers. */
302 if (((req_prod
- np
->rx
.sring
->rsp_prod
) < (np
->rx_target
/ 4)) &&
303 ((np
->rx_target
*= 2) > np
->rx_max_target
))
304 np
->rx_target
= np
->rx_max_target
;
308 skb
= __skb_dequeue(&np
->rx_batch
);
314 id
= xennet_rxidx(req_prod
+ i
);
316 BUG_ON(np
->rx_skbs
[id
]);
317 np
->rx_skbs
[id
] = skb
;
319 ref
= gnttab_claim_grant_reference(&np
->gref_rx_head
);
320 BUG_ON((signed short)ref
< 0);
321 np
->grant_rx_ref
[id
] = ref
;
323 pfn
= page_to_pfn(skb_frag_page(&skb_shinfo(skb
)->frags
[0]));
324 vaddr
= page_address(skb_frag_page(&skb_shinfo(skb
)->frags
[0]));
326 req
= RING_GET_REQUEST(&np
->rx
, req_prod
+ i
);
327 gnttab_grant_foreign_access_ref(ref
,
328 np
->xbdev
->otherend_id
,
336 wmb(); /* barrier so backend seens requests */
338 /* Above is a suitable barrier to ensure backend will see requests. */
339 np
->rx
.req_prod_pvt
= req_prod
+ i
;
341 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np
->rx
, notify
);
343 notify_remote_via_irq(np
->rx_irq
);
346 static int xennet_open(struct net_device
*dev
)
348 struct netfront_info
*np
= netdev_priv(dev
);
350 napi_enable(&np
->napi
);
352 spin_lock_bh(&np
->rx_lock
);
353 if (netif_carrier_ok(dev
)) {
354 xennet_alloc_rx_buffers(dev
);
355 np
->rx
.sring
->rsp_event
= np
->rx
.rsp_cons
+ 1;
356 if (RING_HAS_UNCONSUMED_RESPONSES(&np
->rx
))
357 napi_schedule(&np
->napi
);
359 spin_unlock_bh(&np
->rx_lock
);
361 netif_start_queue(dev
);
366 static void xennet_tx_buf_gc(struct net_device
*dev
)
370 struct netfront_info
*np
= netdev_priv(dev
);
373 BUG_ON(!netif_carrier_ok(dev
));
376 prod
= np
->tx
.sring
->rsp_prod
;
377 rmb(); /* Ensure we see responses up to 'rp'. */
379 for (cons
= np
->tx
.rsp_cons
; cons
!= prod
; cons
++) {
380 struct xen_netif_tx_response
*txrsp
;
382 txrsp
= RING_GET_RESPONSE(&np
->tx
, cons
);
383 if (txrsp
->status
== XEN_NETIF_RSP_NULL
)
387 skb
= np
->tx_skbs
[id
].skb
;
388 if (unlikely(gnttab_query_foreign_access(
389 np
->grant_tx_ref
[id
]) != 0)) {
390 pr_alert("%s: warning -- grant still in use by backend domain\n",
394 gnttab_end_foreign_access_ref(
395 np
->grant_tx_ref
[id
], GNTMAP_readonly
);
396 gnttab_release_grant_reference(
397 &np
->gref_tx_head
, np
->grant_tx_ref
[id
]);
398 np
->grant_tx_ref
[id
] = GRANT_INVALID_REF
;
399 add_id_to_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
, id
);
400 dev_kfree_skb_irq(skb
);
403 np
->tx
.rsp_cons
= prod
;
406 * Set a new event, then check for race with update of tx_cons.
407 * Note that it is essential to schedule a callback, no matter
408 * how few buffers are pending. Even if there is space in the
409 * transmit ring, higher layers may be blocked because too much
410 * data is outstanding: in such cases notification from Xen is
411 * likely to be the only kick that we'll get.
413 np
->tx
.sring
->rsp_event
=
414 prod
+ ((np
->tx
.sring
->req_prod
- prod
) >> 1) + 1;
415 mb(); /* update shared area */
416 } while ((cons
== prod
) && (prod
!= np
->tx
.sring
->rsp_prod
));
418 xennet_maybe_wake_tx(dev
);
421 static void xennet_make_frags(struct sk_buff
*skb
, struct net_device
*dev
,
422 struct xen_netif_tx_request
*tx
)
424 struct netfront_info
*np
= netdev_priv(dev
);
425 char *data
= skb
->data
;
427 RING_IDX prod
= np
->tx
.req_prod_pvt
;
428 int frags
= skb_shinfo(skb
)->nr_frags
;
429 unsigned int offset
= offset_in_page(data
);
430 unsigned int len
= skb_headlen(skb
);
435 /* While the header overlaps a page boundary (including being
436 larger than a page), split it it into page-sized chunks. */
437 while (len
> PAGE_SIZE
- offset
) {
438 tx
->size
= PAGE_SIZE
- offset
;
439 tx
->flags
|= XEN_NETTXF_more_data
;
444 id
= get_id_from_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
);
445 np
->tx_skbs
[id
].skb
= skb_get(skb
);
446 tx
= RING_GET_REQUEST(&np
->tx
, prod
++);
448 ref
= gnttab_claim_grant_reference(&np
->gref_tx_head
);
449 BUG_ON((signed short)ref
< 0);
451 mfn
= virt_to_mfn(data
);
452 gnttab_grant_foreign_access_ref(ref
, np
->xbdev
->otherend_id
,
453 mfn
, GNTMAP_readonly
);
455 tx
->gref
= np
->grant_tx_ref
[id
] = ref
;
461 /* Grant backend access to each skb fragment page. */
462 for (i
= 0; i
< frags
; i
++) {
463 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
464 struct page
*page
= skb_frag_page(frag
);
466 len
= skb_frag_size(frag
);
467 offset
= frag
->page_offset
;
469 /* Data must not cross a page boundary. */
470 BUG_ON(len
+ offset
> PAGE_SIZE
<<compound_order(page
));
472 /* Skip unused frames from start of page */
473 page
+= offset
>> PAGE_SHIFT
;
474 offset
&= ~PAGE_MASK
;
479 BUG_ON(offset
>= PAGE_SIZE
);
481 bytes
= PAGE_SIZE
- offset
;
485 tx
->flags
|= XEN_NETTXF_more_data
;
487 id
= get_id_from_freelist(&np
->tx_skb_freelist
,
489 np
->tx_skbs
[id
].skb
= skb_get(skb
);
490 tx
= RING_GET_REQUEST(&np
->tx
, prod
++);
492 ref
= gnttab_claim_grant_reference(&np
->gref_tx_head
);
493 BUG_ON((signed short)ref
< 0);
495 mfn
= pfn_to_mfn(page_to_pfn(page
));
496 gnttab_grant_foreign_access_ref(ref
,
497 np
->xbdev
->otherend_id
,
498 mfn
, GNTMAP_readonly
);
500 tx
->gref
= np
->grant_tx_ref
[id
] = ref
;
509 if (offset
== PAGE_SIZE
&& len
) {
510 BUG_ON(!PageCompound(page
));
517 np
->tx
.req_prod_pvt
= prod
;
521 * Count how many ring slots are required to send the frags of this
522 * skb. Each frag might be a compound page.
524 static int xennet_count_skb_frag_slots(struct sk_buff
*skb
)
526 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
529 for (i
= 0; i
< frags
; i
++) {
530 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
531 unsigned long size
= skb_frag_size(frag
);
532 unsigned long offset
= frag
->page_offset
;
534 /* Skip unused frames from start of page */
535 offset
&= ~PAGE_MASK
;
537 pages
+= PFN_UP(offset
+ size
);
543 static int xennet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
546 struct netfront_info
*np
= netdev_priv(dev
);
547 struct netfront_stats
*stats
= this_cpu_ptr(np
->stats
);
548 struct xen_netif_tx_request
*tx
;
549 char *data
= skb
->data
;
555 unsigned int offset
= offset_in_page(data
);
556 unsigned int len
= skb_headlen(skb
);
559 /* If skb->len is too big for wire format, drop skb and alert
560 * user about misconfiguration.
562 if (unlikely(skb
->len
> XEN_NETIF_MAX_TX_SIZE
)) {
563 net_alert_ratelimited(
564 "xennet: skb->len = %u, too big for wire format\n",
569 slots
= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
) +
570 xennet_count_skb_frag_slots(skb
);
571 if (unlikely(slots
> MAX_SKB_FRAGS
+ 1)) {
572 net_alert_ratelimited(
573 "xennet: skb rides the rocket: %d slots\n", slots
);
577 spin_lock_irqsave(&np
->tx_lock
, flags
);
579 if (unlikely(!netif_carrier_ok(dev
) ||
580 (slots
> 1 && !xennet_can_sg(dev
)) ||
581 netif_needs_gso(skb
, netif_skb_features(skb
)))) {
582 spin_unlock_irqrestore(&np
->tx_lock
, flags
);
586 i
= np
->tx
.req_prod_pvt
;
588 id
= get_id_from_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
);
589 np
->tx_skbs
[id
].skb
= skb
;
591 tx
= RING_GET_REQUEST(&np
->tx
, i
);
594 ref
= gnttab_claim_grant_reference(&np
->gref_tx_head
);
595 BUG_ON((signed short)ref
< 0);
596 mfn
= virt_to_mfn(data
);
597 gnttab_grant_foreign_access_ref(
598 ref
, np
->xbdev
->otherend_id
, mfn
, GNTMAP_readonly
);
599 tx
->gref
= np
->grant_tx_ref
[id
] = ref
;
604 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
606 tx
->flags
|= XEN_NETTXF_csum_blank
| XEN_NETTXF_data_validated
;
607 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
608 /* remote but checksummed. */
609 tx
->flags
|= XEN_NETTXF_data_validated
;
611 if (skb_shinfo(skb
)->gso_size
) {
612 struct xen_netif_extra_info
*gso
;
614 gso
= (struct xen_netif_extra_info
*)
615 RING_GET_REQUEST(&np
->tx
, ++i
);
617 tx
->flags
|= XEN_NETTXF_extra_info
;
619 gso
->u
.gso
.size
= skb_shinfo(skb
)->gso_size
;
620 gso
->u
.gso
.type
= (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) ?
621 XEN_NETIF_GSO_TYPE_TCPV6
:
622 XEN_NETIF_GSO_TYPE_TCPV4
;
624 gso
->u
.gso
.features
= 0;
626 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
630 np
->tx
.req_prod_pvt
= i
+ 1;
632 xennet_make_frags(skb
, dev
, tx
);
635 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np
->tx
, notify
);
637 notify_remote_via_irq(np
->tx_irq
);
639 u64_stats_update_begin(&stats
->syncp
);
640 stats
->tx_bytes
+= skb
->len
;
642 u64_stats_update_end(&stats
->syncp
);
644 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
645 xennet_tx_buf_gc(dev
);
647 if (!netfront_tx_slot_available(np
))
648 netif_stop_queue(dev
);
650 spin_unlock_irqrestore(&np
->tx_lock
, flags
);
655 dev
->stats
.tx_dropped
++;
660 static int xennet_close(struct net_device
*dev
)
662 struct netfront_info
*np
= netdev_priv(dev
);
663 netif_stop_queue(np
->netdev
);
664 napi_disable(&np
->napi
);
668 static void xennet_move_rx_slot(struct netfront_info
*np
, struct sk_buff
*skb
,
671 int new = xennet_rxidx(np
->rx
.req_prod_pvt
);
673 BUG_ON(np
->rx_skbs
[new]);
674 np
->rx_skbs
[new] = skb
;
675 np
->grant_rx_ref
[new] = ref
;
676 RING_GET_REQUEST(&np
->rx
, np
->rx
.req_prod_pvt
)->id
= new;
677 RING_GET_REQUEST(&np
->rx
, np
->rx
.req_prod_pvt
)->gref
= ref
;
678 np
->rx
.req_prod_pvt
++;
681 static int xennet_get_extras(struct netfront_info
*np
,
682 struct xen_netif_extra_info
*extras
,
686 struct xen_netif_extra_info
*extra
;
687 struct device
*dev
= &np
->netdev
->dev
;
688 RING_IDX cons
= np
->rx
.rsp_cons
;
695 if (unlikely(cons
+ 1 == rp
)) {
697 dev_warn(dev
, "Missing extra info\n");
702 extra
= (struct xen_netif_extra_info
*)
703 RING_GET_RESPONSE(&np
->rx
, ++cons
);
705 if (unlikely(!extra
->type
||
706 extra
->type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
708 dev_warn(dev
, "Invalid extra type: %d\n",
712 memcpy(&extras
[extra
->type
- 1], extra
,
716 skb
= xennet_get_rx_skb(np
, cons
);
717 ref
= xennet_get_rx_ref(np
, cons
);
718 xennet_move_rx_slot(np
, skb
, ref
);
719 } while (extra
->flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
721 np
->rx
.rsp_cons
= cons
;
725 static int xennet_get_responses(struct netfront_info
*np
,
726 struct netfront_rx_info
*rinfo
, RING_IDX rp
,
727 struct sk_buff_head
*list
)
729 struct xen_netif_rx_response
*rx
= &rinfo
->rx
;
730 struct xen_netif_extra_info
*extras
= rinfo
->extras
;
731 struct device
*dev
= &np
->netdev
->dev
;
732 RING_IDX cons
= np
->rx
.rsp_cons
;
733 struct sk_buff
*skb
= xennet_get_rx_skb(np
, cons
);
734 grant_ref_t ref
= xennet_get_rx_ref(np
, cons
);
735 int max
= MAX_SKB_FRAGS
+ (rx
->status
<= RX_COPY_THRESHOLD
);
740 if (rx
->flags
& XEN_NETRXF_extra_info
) {
741 err
= xennet_get_extras(np
, extras
, rp
);
742 cons
= np
->rx
.rsp_cons
;
746 if (unlikely(rx
->status
< 0 ||
747 rx
->offset
+ rx
->status
> PAGE_SIZE
)) {
749 dev_warn(dev
, "rx->offset: %x, size: %u\n",
750 rx
->offset
, rx
->status
);
751 xennet_move_rx_slot(np
, skb
, ref
);
757 * This definitely indicates a bug, either in this driver or in
758 * the backend driver. In future this should flag the bad
759 * situation to the system controller to reboot the backend.
761 if (ref
== GRANT_INVALID_REF
) {
763 dev_warn(dev
, "Bad rx response id %d.\n",
769 ret
= gnttab_end_foreign_access_ref(ref
, 0);
772 gnttab_release_grant_reference(&np
->gref_rx_head
, ref
);
774 __skb_queue_tail(list
, skb
);
777 if (!(rx
->flags
& XEN_NETRXF_more_data
))
780 if (cons
+ slots
== rp
) {
782 dev_warn(dev
, "Need more slots\n");
787 rx
= RING_GET_RESPONSE(&np
->rx
, cons
+ slots
);
788 skb
= xennet_get_rx_skb(np
, cons
+ slots
);
789 ref
= xennet_get_rx_ref(np
, cons
+ slots
);
793 if (unlikely(slots
> max
)) {
795 dev_warn(dev
, "Too many slots\n");
800 np
->rx
.rsp_cons
= cons
+ slots
;
805 static int xennet_set_skb_gso(struct sk_buff
*skb
,
806 struct xen_netif_extra_info
*gso
)
808 if (!gso
->u
.gso
.size
) {
810 pr_warn("GSO size must not be zero\n");
814 if (gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV4
&&
815 gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV6
) {
817 pr_warn("Bad GSO type %d\n", gso
->u
.gso
.type
);
821 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
822 skb_shinfo(skb
)->gso_type
=
823 (gso
->u
.gso
.type
== XEN_NETIF_GSO_TYPE_TCPV4
) ?
827 /* Header must be checked, and gso_segs computed. */
828 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
829 skb_shinfo(skb
)->gso_segs
= 0;
834 static RING_IDX
xennet_fill_frags(struct netfront_info
*np
,
836 struct sk_buff_head
*list
)
838 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
839 RING_IDX cons
= np
->rx
.rsp_cons
;
840 struct sk_buff
*nskb
;
842 while ((nskb
= __skb_dequeue(list
))) {
843 struct xen_netif_rx_response
*rx
=
844 RING_GET_RESPONSE(&np
->rx
, ++cons
);
845 skb_frag_t
*nfrag
= &skb_shinfo(nskb
)->frags
[0];
847 if (shinfo
->nr_frags
== MAX_SKB_FRAGS
) {
848 unsigned int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
850 BUG_ON(pull_to
<= skb_headlen(skb
));
851 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
853 BUG_ON(shinfo
->nr_frags
>= MAX_SKB_FRAGS
);
855 skb_add_rx_frag(skb
, shinfo
->nr_frags
, skb_frag_page(nfrag
),
856 rx
->offset
, rx
->status
, PAGE_SIZE
);
858 skb_shinfo(nskb
)->nr_frags
= 0;
865 static int checksum_setup(struct net_device
*dev
, struct sk_buff
*skb
)
867 bool recalculate_partial_csum
= false;
870 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
871 * peers can fail to set NETRXF_csum_blank when sending a GSO
872 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
873 * recalculate the partial checksum.
875 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
876 struct netfront_info
*np
= netdev_priv(dev
);
877 np
->rx_gso_checksum_fixup
++;
878 skb
->ip_summed
= CHECKSUM_PARTIAL
;
879 recalculate_partial_csum
= true;
882 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
883 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
886 return skb_checksum_setup(skb
, recalculate_partial_csum
);
889 static int handle_incoming_queue(struct net_device
*dev
,
890 struct sk_buff_head
*rxq
)
892 struct netfront_info
*np
= netdev_priv(dev
);
893 struct netfront_stats
*stats
= this_cpu_ptr(np
->stats
);
894 int packets_dropped
= 0;
897 while ((skb
= __skb_dequeue(rxq
)) != NULL
) {
898 int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
900 if (pull_to
> skb_headlen(skb
))
901 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
903 /* Ethernet work: Delayed to here as it peeks the header. */
904 skb
->protocol
= eth_type_trans(skb
, dev
);
906 if (checksum_setup(dev
, skb
)) {
909 dev
->stats
.rx_errors
++;
913 u64_stats_update_begin(&stats
->syncp
);
915 stats
->rx_bytes
+= skb
->len
;
916 u64_stats_update_end(&stats
->syncp
);
919 napi_gro_receive(&np
->napi
, skb
);
922 return packets_dropped
;
925 static int xennet_poll(struct napi_struct
*napi
, int budget
)
927 struct netfront_info
*np
= container_of(napi
, struct netfront_info
, napi
);
928 struct net_device
*dev
= np
->netdev
;
930 struct netfront_rx_info rinfo
;
931 struct xen_netif_rx_response
*rx
= &rinfo
.rx
;
932 struct xen_netif_extra_info
*extras
= rinfo
.extras
;
935 struct sk_buff_head rxq
;
936 struct sk_buff_head errq
;
937 struct sk_buff_head tmpq
;
941 spin_lock(&np
->rx_lock
);
943 skb_queue_head_init(&rxq
);
944 skb_queue_head_init(&errq
);
945 skb_queue_head_init(&tmpq
);
947 rp
= np
->rx
.sring
->rsp_prod
;
948 rmb(); /* Ensure we see queued responses up to 'rp'. */
952 while ((i
!= rp
) && (work_done
< budget
)) {
953 memcpy(rx
, RING_GET_RESPONSE(&np
->rx
, i
), sizeof(*rx
));
954 memset(extras
, 0, sizeof(rinfo
.extras
));
956 err
= xennet_get_responses(np
, &rinfo
, rp
, &tmpq
);
960 while ((skb
= __skb_dequeue(&tmpq
)))
961 __skb_queue_tail(&errq
, skb
);
962 dev
->stats
.rx_errors
++;
967 skb
= __skb_dequeue(&tmpq
);
969 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
970 struct xen_netif_extra_info
*gso
;
971 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
973 if (unlikely(xennet_set_skb_gso(skb
, gso
))) {
974 __skb_queue_head(&tmpq
, skb
);
975 np
->rx
.rsp_cons
+= skb_queue_len(&tmpq
);
980 NETFRONT_SKB_CB(skb
)->pull_to
= rx
->status
;
981 if (NETFRONT_SKB_CB(skb
)->pull_to
> RX_COPY_THRESHOLD
)
982 NETFRONT_SKB_CB(skb
)->pull_to
= RX_COPY_THRESHOLD
;
984 skb_shinfo(skb
)->frags
[0].page_offset
= rx
->offset
;
985 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], rx
->status
);
986 skb
->data_len
= rx
->status
;
987 skb
->len
+= rx
->status
;
989 i
= xennet_fill_frags(np
, skb
, &tmpq
);
991 if (rx
->flags
& XEN_NETRXF_csum_blank
)
992 skb
->ip_summed
= CHECKSUM_PARTIAL
;
993 else if (rx
->flags
& XEN_NETRXF_data_validated
)
994 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
996 __skb_queue_tail(&rxq
, skb
);
998 np
->rx
.rsp_cons
= ++i
;
1002 __skb_queue_purge(&errq
);
1004 work_done
-= handle_incoming_queue(dev
, &rxq
);
1006 /* If we get a callback with very few responses, reduce fill target. */
1007 /* NB. Note exponential increase, linear decrease. */
1008 if (((np
->rx
.req_prod_pvt
- np
->rx
.sring
->rsp_prod
) >
1009 ((3*np
->rx_target
) / 4)) &&
1010 (--np
->rx_target
< np
->rx_min_target
))
1011 np
->rx_target
= np
->rx_min_target
;
1013 xennet_alloc_rx_buffers(dev
);
1015 if (work_done
< budget
) {
1018 napi_gro_flush(napi
, false);
1020 local_irq_save(flags
);
1022 RING_FINAL_CHECK_FOR_RESPONSES(&np
->rx
, more_to_do
);
1024 __napi_complete(napi
);
1026 local_irq_restore(flags
);
1029 spin_unlock(&np
->rx_lock
);
1034 static int xennet_change_mtu(struct net_device
*dev
, int mtu
)
1036 int max
= xennet_can_sg(dev
) ?
1037 XEN_NETIF_MAX_TX_SIZE
- MAX_TCP_HEADER
: ETH_DATA_LEN
;
1045 static struct rtnl_link_stats64
*xennet_get_stats64(struct net_device
*dev
,
1046 struct rtnl_link_stats64
*tot
)
1048 struct netfront_info
*np
= netdev_priv(dev
);
1051 for_each_possible_cpu(cpu
) {
1052 struct netfront_stats
*stats
= per_cpu_ptr(np
->stats
, cpu
);
1053 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1057 start
= u64_stats_fetch_begin_bh(&stats
->syncp
);
1059 rx_packets
= stats
->rx_packets
;
1060 tx_packets
= stats
->tx_packets
;
1061 rx_bytes
= stats
->rx_bytes
;
1062 tx_bytes
= stats
->tx_bytes
;
1063 } while (u64_stats_fetch_retry_bh(&stats
->syncp
, start
));
1065 tot
->rx_packets
+= rx_packets
;
1066 tot
->tx_packets
+= tx_packets
;
1067 tot
->rx_bytes
+= rx_bytes
;
1068 tot
->tx_bytes
+= tx_bytes
;
1071 tot
->rx_errors
= dev
->stats
.rx_errors
;
1072 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
1077 static void xennet_release_tx_bufs(struct netfront_info
*np
)
1079 struct sk_buff
*skb
;
1082 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1083 /* Skip over entries which are actually freelist references */
1084 if (skb_entry_is_link(&np
->tx_skbs
[i
]))
1087 skb
= np
->tx_skbs
[i
].skb
;
1088 gnttab_end_foreign_access_ref(np
->grant_tx_ref
[i
],
1090 gnttab_release_grant_reference(&np
->gref_tx_head
,
1091 np
->grant_tx_ref
[i
]);
1092 np
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1093 add_id_to_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
, i
);
1094 dev_kfree_skb_irq(skb
);
1098 static void xennet_release_rx_bufs(struct netfront_info
*np
)
1100 struct mmu_update
*mmu
= np
->rx_mmu
;
1101 struct multicall_entry
*mcl
= np
->rx_mcl
;
1102 struct sk_buff_head free_list
;
1103 struct sk_buff
*skb
;
1105 int xfer
= 0, noxfer
= 0, unused
= 0;
1108 dev_warn(&np
->netdev
->dev
, "%s: fix me for copying receiver.\n",
1112 skb_queue_head_init(&free_list
);
1114 spin_lock_bh(&np
->rx_lock
);
1116 for (id
= 0; id
< NET_RX_RING_SIZE
; id
++) {
1117 ref
= np
->grant_rx_ref
[id
];
1118 if (ref
== GRANT_INVALID_REF
) {
1123 skb
= np
->rx_skbs
[id
];
1124 mfn
= gnttab_end_foreign_transfer_ref(ref
);
1125 gnttab_release_grant_reference(&np
->gref_rx_head
, ref
);
1126 np
->grant_rx_ref
[id
] = GRANT_INVALID_REF
;
1129 skb_shinfo(skb
)->nr_frags
= 0;
1135 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
1136 /* Remap the page. */
1137 const struct page
*page
=
1138 skb_frag_page(&skb_shinfo(skb
)->frags
[0]);
1139 unsigned long pfn
= page_to_pfn(page
);
1140 void *vaddr
= page_address(page
);
1142 MULTI_update_va_mapping(mcl
, (unsigned long)vaddr
,
1143 mfn_pte(mfn
, PAGE_KERNEL
),
1146 mmu
->ptr
= ((u64
)mfn
<< PAGE_SHIFT
)
1147 | MMU_MACHPHYS_UPDATE
;
1151 set_phys_to_machine(pfn
, mfn
);
1153 __skb_queue_tail(&free_list
, skb
);
1157 dev_info(&np
->netdev
->dev
, "%s: %d xfer, %d noxfer, %d unused\n",
1158 __func__
, xfer
, noxfer
, unused
);
1161 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
1162 /* Do all the remapping work and M2P updates. */
1163 MULTI_mmu_update(mcl
, np
->rx_mmu
, mmu
- np
->rx_mmu
,
1166 HYPERVISOR_multicall(np
->rx_mcl
, mcl
- np
->rx_mcl
);
1170 __skb_queue_purge(&free_list
);
1172 spin_unlock_bh(&np
->rx_lock
);
1175 static void xennet_uninit(struct net_device
*dev
)
1177 struct netfront_info
*np
= netdev_priv(dev
);
1178 xennet_release_tx_bufs(np
);
1179 xennet_release_rx_bufs(np
);
1180 gnttab_free_grant_references(np
->gref_tx_head
);
1181 gnttab_free_grant_references(np
->gref_rx_head
);
1184 static netdev_features_t
xennet_fix_features(struct net_device
*dev
,
1185 netdev_features_t features
)
1187 struct netfront_info
*np
= netdev_priv(dev
);
1190 if (features
& NETIF_F_SG
) {
1191 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
, "feature-sg",
1196 features
&= ~NETIF_F_SG
;
1199 if (features
& NETIF_F_IPV6_CSUM
) {
1200 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1201 "feature-ipv6-csum-offload", "%d", &val
) < 0)
1205 features
&= ~NETIF_F_IPV6_CSUM
;
1208 if (features
& NETIF_F_TSO
) {
1209 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1210 "feature-gso-tcpv4", "%d", &val
) < 0)
1214 features
&= ~NETIF_F_TSO
;
1217 if (features
& NETIF_F_TSO6
) {
1218 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1219 "feature-gso-tcpv6", "%d", &val
) < 0)
1223 features
&= ~NETIF_F_TSO6
;
1229 static int xennet_set_features(struct net_device
*dev
,
1230 netdev_features_t features
)
1232 if (!(features
& NETIF_F_SG
) && dev
->mtu
> ETH_DATA_LEN
) {
1233 netdev_info(dev
, "Reducing MTU because no SG offload");
1234 dev
->mtu
= ETH_DATA_LEN
;
1240 static irqreturn_t
xennet_tx_interrupt(int irq
, void *dev_id
)
1242 struct netfront_info
*np
= dev_id
;
1243 struct net_device
*dev
= np
->netdev
;
1244 unsigned long flags
;
1246 spin_lock_irqsave(&np
->tx_lock
, flags
);
1247 xennet_tx_buf_gc(dev
);
1248 spin_unlock_irqrestore(&np
->tx_lock
, flags
);
1253 static irqreturn_t
xennet_rx_interrupt(int irq
, void *dev_id
)
1255 struct netfront_info
*np
= dev_id
;
1256 struct net_device
*dev
= np
->netdev
;
1258 if (likely(netif_carrier_ok(dev
) &&
1259 RING_HAS_UNCONSUMED_RESPONSES(&np
->rx
)))
1260 napi_schedule(&np
->napi
);
1265 static irqreturn_t
xennet_interrupt(int irq
, void *dev_id
)
1267 xennet_tx_interrupt(irq
, dev_id
);
1268 xennet_rx_interrupt(irq
, dev_id
);
1272 #ifdef CONFIG_NET_POLL_CONTROLLER
1273 static void xennet_poll_controller(struct net_device
*dev
)
1275 xennet_interrupt(0, dev
);
1279 static const struct net_device_ops xennet_netdev_ops
= {
1280 .ndo_open
= xennet_open
,
1281 .ndo_uninit
= xennet_uninit
,
1282 .ndo_stop
= xennet_close
,
1283 .ndo_start_xmit
= xennet_start_xmit
,
1284 .ndo_change_mtu
= xennet_change_mtu
,
1285 .ndo_get_stats64
= xennet_get_stats64
,
1286 .ndo_set_mac_address
= eth_mac_addr
,
1287 .ndo_validate_addr
= eth_validate_addr
,
1288 .ndo_fix_features
= xennet_fix_features
,
1289 .ndo_set_features
= xennet_set_features
,
1290 #ifdef CONFIG_NET_POLL_CONTROLLER
1291 .ndo_poll_controller
= xennet_poll_controller
,
1295 static struct net_device
*xennet_create_dev(struct xenbus_device
*dev
)
1298 struct net_device
*netdev
;
1299 struct netfront_info
*np
;
1301 netdev
= alloc_etherdev(sizeof(struct netfront_info
));
1303 return ERR_PTR(-ENOMEM
);
1305 np
= netdev_priv(netdev
);
1308 spin_lock_init(&np
->tx_lock
);
1309 spin_lock_init(&np
->rx_lock
);
1311 skb_queue_head_init(&np
->rx_batch
);
1312 np
->rx_target
= RX_DFL_MIN_TARGET
;
1313 np
->rx_min_target
= RX_DFL_MIN_TARGET
;
1314 np
->rx_max_target
= RX_MAX_TARGET
;
1316 init_timer(&np
->rx_refill_timer
);
1317 np
->rx_refill_timer
.data
= (unsigned long)netdev
;
1318 np
->rx_refill_timer
.function
= rx_refill_timeout
;
1321 np
->stats
= alloc_percpu(struct netfront_stats
);
1322 if (np
->stats
== NULL
)
1325 for_each_possible_cpu(i
) {
1326 struct netfront_stats
*xen_nf_stats
;
1327 xen_nf_stats
= per_cpu_ptr(np
->stats
, i
);
1328 u64_stats_init(&xen_nf_stats
->syncp
);
1331 /* Initialise tx_skbs as a free chain containing every entry. */
1332 np
->tx_skb_freelist
= 0;
1333 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1334 skb_entry_set_link(&np
->tx_skbs
[i
], i
+1);
1335 np
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1338 /* Clear out rx_skbs */
1339 for (i
= 0; i
< NET_RX_RING_SIZE
; i
++) {
1340 np
->rx_skbs
[i
] = NULL
;
1341 np
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
1344 /* A grant for every tx ring slot */
1345 if (gnttab_alloc_grant_references(TX_MAX_TARGET
,
1346 &np
->gref_tx_head
) < 0) {
1347 pr_alert("can't alloc tx grant refs\n");
1349 goto exit_free_stats
;
1351 /* A grant for every rx ring slot */
1352 if (gnttab_alloc_grant_references(RX_MAX_TARGET
,
1353 &np
->gref_rx_head
) < 0) {
1354 pr_alert("can't alloc rx grant refs\n");
1359 netdev
->netdev_ops
= &xennet_netdev_ops
;
1361 netif_napi_add(netdev
, &np
->napi
, xennet_poll
, 64);
1362 netdev
->features
= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
|
1364 netdev
->hw_features
= NETIF_F_SG
|
1366 NETIF_F_TSO
| NETIF_F_TSO6
;
1369 * Assume that all hw features are available for now. This set
1370 * will be adjusted by the call to netdev_update_features() in
1371 * xennet_connect() which is the earliest point where we can
1372 * negotiate with the backend regarding supported features.
1374 netdev
->features
|= netdev
->hw_features
;
1376 SET_ETHTOOL_OPS(netdev
, &xennet_ethtool_ops
);
1377 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1379 netif_set_gso_max_size(netdev
, XEN_NETIF_MAX_TX_SIZE
- MAX_TCP_HEADER
);
1381 np
->netdev
= netdev
;
1383 netif_carrier_off(netdev
);
1388 gnttab_free_grant_references(np
->gref_tx_head
);
1390 free_percpu(np
->stats
);
1392 free_netdev(netdev
);
1393 return ERR_PTR(err
);
1397 * Entry point to this code when a new device is created. Allocate the basic
1398 * structures and the ring buffers for communication with the backend, and
1399 * inform the backend of the appropriate details for those.
1401 static int netfront_probe(struct xenbus_device
*dev
,
1402 const struct xenbus_device_id
*id
)
1405 struct net_device
*netdev
;
1406 struct netfront_info
*info
;
1408 netdev
= xennet_create_dev(dev
);
1409 if (IS_ERR(netdev
)) {
1410 err
= PTR_ERR(netdev
);
1411 xenbus_dev_fatal(dev
, err
, "creating netdev");
1415 info
= netdev_priv(netdev
);
1416 dev_set_drvdata(&dev
->dev
, info
);
1418 err
= register_netdev(info
->netdev
);
1420 pr_warn("%s: register_netdev err=%d\n", __func__
, err
);
1424 err
= xennet_sysfs_addif(info
->netdev
);
1426 unregister_netdev(info
->netdev
);
1427 pr_warn("%s: add sysfs failed err=%d\n", __func__
, err
);
1434 free_netdev(netdev
);
1435 dev_set_drvdata(&dev
->dev
, NULL
);
1439 static void xennet_end_access(int ref
, void *page
)
1441 /* This frees the page as a side-effect */
1442 if (ref
!= GRANT_INVALID_REF
)
1443 gnttab_end_foreign_access(ref
, 0, (unsigned long)page
);
1446 static void xennet_disconnect_backend(struct netfront_info
*info
)
1448 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1449 spin_lock_bh(&info
->rx_lock
);
1450 spin_lock_irq(&info
->tx_lock
);
1451 netif_carrier_off(info
->netdev
);
1452 spin_unlock_irq(&info
->tx_lock
);
1453 spin_unlock_bh(&info
->rx_lock
);
1455 if (info
->tx_irq
&& (info
->tx_irq
== info
->rx_irq
))
1456 unbind_from_irqhandler(info
->tx_irq
, info
);
1457 if (info
->tx_irq
&& (info
->tx_irq
!= info
->rx_irq
)) {
1458 unbind_from_irqhandler(info
->tx_irq
, info
);
1459 unbind_from_irqhandler(info
->rx_irq
, info
);
1461 info
->tx_evtchn
= info
->rx_evtchn
= 0;
1462 info
->tx_irq
= info
->rx_irq
= 0;
1464 /* End access and free the pages */
1465 xennet_end_access(info
->tx_ring_ref
, info
->tx
.sring
);
1466 xennet_end_access(info
->rx_ring_ref
, info
->rx
.sring
);
1468 info
->tx_ring_ref
= GRANT_INVALID_REF
;
1469 info
->rx_ring_ref
= GRANT_INVALID_REF
;
1470 info
->tx
.sring
= NULL
;
1471 info
->rx
.sring
= NULL
;
1475 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1476 * driver restart. We tear down our netif structure and recreate it, but
1477 * leave the device-layer structures intact so that this is transparent to the
1478 * rest of the kernel.
1480 static int netfront_resume(struct xenbus_device
*dev
)
1482 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1484 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
1486 xennet_disconnect_backend(info
);
1490 static int xen_net_read_mac(struct xenbus_device
*dev
, u8 mac
[])
1492 char *s
, *e
, *macstr
;
1495 macstr
= s
= xenbus_read(XBT_NIL
, dev
->nodename
, "mac", NULL
);
1497 return PTR_ERR(macstr
);
1499 for (i
= 0; i
< ETH_ALEN
; i
++) {
1500 mac
[i
] = simple_strtoul(s
, &e
, 16);
1501 if ((s
== e
) || (*e
!= ((i
== ETH_ALEN
-1) ? '\0' : ':'))) {
1512 static int setup_netfront_single(struct netfront_info
*info
)
1516 err
= xenbus_alloc_evtchn(info
->xbdev
, &info
->tx_evtchn
);
1520 err
= bind_evtchn_to_irqhandler(info
->tx_evtchn
,
1522 0, info
->netdev
->name
, info
);
1525 info
->rx_evtchn
= info
->tx_evtchn
;
1526 info
->rx_irq
= info
->tx_irq
= err
;
1531 xenbus_free_evtchn(info
->xbdev
, info
->tx_evtchn
);
1532 info
->tx_evtchn
= 0;
1537 static int setup_netfront_split(struct netfront_info
*info
)
1541 err
= xenbus_alloc_evtchn(info
->xbdev
, &info
->tx_evtchn
);
1544 err
= xenbus_alloc_evtchn(info
->xbdev
, &info
->rx_evtchn
);
1546 goto alloc_rx_evtchn_fail
;
1548 snprintf(info
->tx_irq_name
, sizeof(info
->tx_irq_name
),
1549 "%s-tx", info
->netdev
->name
);
1550 err
= bind_evtchn_to_irqhandler(info
->tx_evtchn
,
1551 xennet_tx_interrupt
,
1552 0, info
->tx_irq_name
, info
);
1557 snprintf(info
->rx_irq_name
, sizeof(info
->rx_irq_name
),
1558 "%s-rx", info
->netdev
->name
);
1559 err
= bind_evtchn_to_irqhandler(info
->rx_evtchn
,
1560 xennet_rx_interrupt
,
1561 0, info
->rx_irq_name
, info
);
1569 unbind_from_irqhandler(info
->tx_irq
, info
);
1572 xenbus_free_evtchn(info
->xbdev
, info
->rx_evtchn
);
1573 info
->rx_evtchn
= 0;
1574 alloc_rx_evtchn_fail
:
1575 xenbus_free_evtchn(info
->xbdev
, info
->tx_evtchn
);
1576 info
->tx_evtchn
= 0;
1581 static int setup_netfront(struct xenbus_device
*dev
, struct netfront_info
*info
)
1583 struct xen_netif_tx_sring
*txs
;
1584 struct xen_netif_rx_sring
*rxs
;
1586 struct net_device
*netdev
= info
->netdev
;
1587 unsigned int feature_split_evtchn
;
1589 info
->tx_ring_ref
= GRANT_INVALID_REF
;
1590 info
->rx_ring_ref
= GRANT_INVALID_REF
;
1591 info
->rx
.sring
= NULL
;
1592 info
->tx
.sring
= NULL
;
1595 err
= xenbus_scanf(XBT_NIL
, info
->xbdev
->otherend
,
1596 "feature-split-event-channels", "%u",
1597 &feature_split_evtchn
);
1599 feature_split_evtchn
= 0;
1601 err
= xen_net_read_mac(dev
, netdev
->dev_addr
);
1603 xenbus_dev_fatal(dev
, err
, "parsing %s/mac", dev
->nodename
);
1607 txs
= (struct xen_netif_tx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1610 xenbus_dev_fatal(dev
, err
, "allocating tx ring page");
1613 SHARED_RING_INIT(txs
);
1614 FRONT_RING_INIT(&info
->tx
, txs
, PAGE_SIZE
);
1616 err
= xenbus_grant_ring(dev
, virt_to_mfn(txs
));
1618 goto grant_tx_ring_fail
;
1620 info
->tx_ring_ref
= err
;
1621 rxs
= (struct xen_netif_rx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1624 xenbus_dev_fatal(dev
, err
, "allocating rx ring page");
1625 goto alloc_rx_ring_fail
;
1627 SHARED_RING_INIT(rxs
);
1628 FRONT_RING_INIT(&info
->rx
, rxs
, PAGE_SIZE
);
1630 err
= xenbus_grant_ring(dev
, virt_to_mfn(rxs
));
1632 goto grant_rx_ring_fail
;
1633 info
->rx_ring_ref
= err
;
1635 if (feature_split_evtchn
)
1636 err
= setup_netfront_split(info
);
1637 /* setup single event channel if
1638 * a) feature-split-event-channels == 0
1639 * b) feature-split-event-channels == 1 but failed to setup
1641 if (!feature_split_evtchn
|| (feature_split_evtchn
&& err
))
1642 err
= setup_netfront_single(info
);
1645 goto alloc_evtchn_fail
;
1649 /* If we fail to setup netfront, it is safe to just revoke access to
1650 * granted pages because backend is not accessing it at this point.
1653 gnttab_end_foreign_access_ref(info
->rx_ring_ref
, 0);
1655 free_page((unsigned long)rxs
);
1657 gnttab_end_foreign_access_ref(info
->tx_ring_ref
, 0);
1659 free_page((unsigned long)txs
);
1664 /* Common code used when first setting up, and when resuming. */
1665 static int talk_to_netback(struct xenbus_device
*dev
,
1666 struct netfront_info
*info
)
1668 const char *message
;
1669 struct xenbus_transaction xbt
;
1672 /* Create shared ring, alloc event channel. */
1673 err
= setup_netfront(dev
, info
);
1678 err
= xenbus_transaction_start(&xbt
);
1680 xenbus_dev_fatal(dev
, err
, "starting transaction");
1684 err
= xenbus_printf(xbt
, dev
->nodename
, "tx-ring-ref", "%u",
1687 message
= "writing tx ring-ref";
1688 goto abort_transaction
;
1690 err
= xenbus_printf(xbt
, dev
->nodename
, "rx-ring-ref", "%u",
1693 message
= "writing rx ring-ref";
1694 goto abort_transaction
;
1697 if (info
->tx_evtchn
== info
->rx_evtchn
) {
1698 err
= xenbus_printf(xbt
, dev
->nodename
,
1699 "event-channel", "%u", info
->tx_evtchn
);
1701 message
= "writing event-channel";
1702 goto abort_transaction
;
1705 err
= xenbus_printf(xbt
, dev
->nodename
,
1706 "event-channel-tx", "%u", info
->tx_evtchn
);
1708 message
= "writing event-channel-tx";
1709 goto abort_transaction
;
1711 err
= xenbus_printf(xbt
, dev
->nodename
,
1712 "event-channel-rx", "%u", info
->rx_evtchn
);
1714 message
= "writing event-channel-rx";
1715 goto abort_transaction
;
1719 err
= xenbus_printf(xbt
, dev
->nodename
, "request-rx-copy", "%u",
1722 message
= "writing request-rx-copy";
1723 goto abort_transaction
;
1726 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-rx-notify", "%d", 1);
1728 message
= "writing feature-rx-notify";
1729 goto abort_transaction
;
1732 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-sg", "%d", 1);
1734 message
= "writing feature-sg";
1735 goto abort_transaction
;
1738 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv4", "%d", 1);
1740 message
= "writing feature-gso-tcpv4";
1741 goto abort_transaction
;
1744 err
= xenbus_write(xbt
, dev
->nodename
, "feature-gso-tcpv6", "1");
1746 message
= "writing feature-gso-tcpv6";
1747 goto abort_transaction
;
1750 err
= xenbus_write(xbt
, dev
->nodename
, "feature-ipv6-csum-offload",
1753 message
= "writing feature-ipv6-csum-offload";
1754 goto abort_transaction
;
1757 err
= xenbus_transaction_end(xbt
, 0);
1761 xenbus_dev_fatal(dev
, err
, "completing transaction");
1768 xenbus_transaction_end(xbt
, 1);
1769 xenbus_dev_fatal(dev
, err
, "%s", message
);
1771 xennet_disconnect_backend(info
);
1776 static int xennet_connect(struct net_device
*dev
)
1778 struct netfront_info
*np
= netdev_priv(dev
);
1779 int i
, requeue_idx
, err
;
1780 struct sk_buff
*skb
;
1782 struct xen_netif_rx_request
*req
;
1783 unsigned int feature_rx_copy
;
1785 err
= xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1786 "feature-rx-copy", "%u", &feature_rx_copy
);
1788 feature_rx_copy
= 0;
1790 if (!feature_rx_copy
) {
1792 "backend does not support copying receive path\n");
1796 err
= talk_to_netback(np
->xbdev
, np
);
1801 netdev_update_features(dev
);
1804 spin_lock_bh(&np
->rx_lock
);
1805 spin_lock_irq(&np
->tx_lock
);
1807 /* Step 1: Discard all pending TX packet fragments. */
1808 xennet_release_tx_bufs(np
);
1810 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1811 for (requeue_idx
= 0, i
= 0; i
< NET_RX_RING_SIZE
; i
++) {
1813 const struct page
*page
;
1814 if (!np
->rx_skbs
[i
])
1817 skb
= np
->rx_skbs
[requeue_idx
] = xennet_get_rx_skb(np
, i
);
1818 ref
= np
->grant_rx_ref
[requeue_idx
] = xennet_get_rx_ref(np
, i
);
1819 req
= RING_GET_REQUEST(&np
->rx
, requeue_idx
);
1821 frag
= &skb_shinfo(skb
)->frags
[0];
1822 page
= skb_frag_page(frag
);
1823 gnttab_grant_foreign_access_ref(
1824 ref
, np
->xbdev
->otherend_id
,
1825 pfn_to_mfn(page_to_pfn(page
)),
1828 req
->id
= requeue_idx
;
1833 np
->rx
.req_prod_pvt
= requeue_idx
;
1836 * Step 3: All public and private state should now be sane. Get
1837 * ready to start sending and receiving packets and give the driver
1838 * domain a kick because we've probably just requeued some
1841 netif_carrier_on(np
->netdev
);
1842 notify_remote_via_irq(np
->tx_irq
);
1843 if (np
->tx_irq
!= np
->rx_irq
)
1844 notify_remote_via_irq(np
->rx_irq
);
1845 xennet_tx_buf_gc(dev
);
1846 xennet_alloc_rx_buffers(dev
);
1848 spin_unlock_irq(&np
->tx_lock
);
1849 spin_unlock_bh(&np
->rx_lock
);
1855 * Callback received when the backend's state changes.
1857 static void netback_changed(struct xenbus_device
*dev
,
1858 enum xenbus_state backend_state
)
1860 struct netfront_info
*np
= dev_get_drvdata(&dev
->dev
);
1861 struct net_device
*netdev
= np
->netdev
;
1863 dev_dbg(&dev
->dev
, "%s\n", xenbus_strstate(backend_state
));
1865 switch (backend_state
) {
1866 case XenbusStateInitialising
:
1867 case XenbusStateInitialised
:
1868 case XenbusStateReconfiguring
:
1869 case XenbusStateReconfigured
:
1870 case XenbusStateUnknown
:
1871 case XenbusStateClosed
:
1874 case XenbusStateInitWait
:
1875 if (dev
->state
!= XenbusStateInitialising
)
1877 if (xennet_connect(netdev
) != 0)
1879 xenbus_switch_state(dev
, XenbusStateConnected
);
1882 case XenbusStateConnected
:
1883 netdev_notify_peers(netdev
);
1886 case XenbusStateClosing
:
1887 xenbus_frontend_closed(dev
);
1892 static const struct xennet_stat
{
1893 char name
[ETH_GSTRING_LEN
];
1895 } xennet_stats
[] = {
1897 "rx_gso_checksum_fixup",
1898 offsetof(struct netfront_info
, rx_gso_checksum_fixup
)
1902 static int xennet_get_sset_count(struct net_device
*dev
, int string_set
)
1904 switch (string_set
) {
1906 return ARRAY_SIZE(xennet_stats
);
1912 static void xennet_get_ethtool_stats(struct net_device
*dev
,
1913 struct ethtool_stats
*stats
, u64
* data
)
1915 void *np
= netdev_priv(dev
);
1918 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
1919 data
[i
] = *(unsigned long *)(np
+ xennet_stats
[i
].offset
);
1922 static void xennet_get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
1926 switch (stringset
) {
1928 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
1929 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1930 xennet_stats
[i
].name
, ETH_GSTRING_LEN
);
1935 static const struct ethtool_ops xennet_ethtool_ops
=
1937 .get_link
= ethtool_op_get_link
,
1939 .get_sset_count
= xennet_get_sset_count
,
1940 .get_ethtool_stats
= xennet_get_ethtool_stats
,
1941 .get_strings
= xennet_get_strings
,
1945 static ssize_t
show_rxbuf_min(struct device
*dev
,
1946 struct device_attribute
*attr
, char *buf
)
1948 struct net_device
*netdev
= to_net_dev(dev
);
1949 struct netfront_info
*info
= netdev_priv(netdev
);
1951 return sprintf(buf
, "%u\n", info
->rx_min_target
);
1954 static ssize_t
store_rxbuf_min(struct device
*dev
,
1955 struct device_attribute
*attr
,
1956 const char *buf
, size_t len
)
1958 struct net_device
*netdev
= to_net_dev(dev
);
1959 struct netfront_info
*np
= netdev_priv(netdev
);
1961 unsigned long target
;
1963 if (!capable(CAP_NET_ADMIN
))
1966 target
= simple_strtoul(buf
, &endp
, 0);
1970 if (target
< RX_MIN_TARGET
)
1971 target
= RX_MIN_TARGET
;
1972 if (target
> RX_MAX_TARGET
)
1973 target
= RX_MAX_TARGET
;
1975 spin_lock_bh(&np
->rx_lock
);
1976 if (target
> np
->rx_max_target
)
1977 np
->rx_max_target
= target
;
1978 np
->rx_min_target
= target
;
1979 if (target
> np
->rx_target
)
1980 np
->rx_target
= target
;
1982 xennet_alloc_rx_buffers(netdev
);
1984 spin_unlock_bh(&np
->rx_lock
);
1988 static ssize_t
show_rxbuf_max(struct device
*dev
,
1989 struct device_attribute
*attr
, char *buf
)
1991 struct net_device
*netdev
= to_net_dev(dev
);
1992 struct netfront_info
*info
= netdev_priv(netdev
);
1994 return sprintf(buf
, "%u\n", info
->rx_max_target
);
1997 static ssize_t
store_rxbuf_max(struct device
*dev
,
1998 struct device_attribute
*attr
,
1999 const char *buf
, size_t len
)
2001 struct net_device
*netdev
= to_net_dev(dev
);
2002 struct netfront_info
*np
= netdev_priv(netdev
);
2004 unsigned long target
;
2006 if (!capable(CAP_NET_ADMIN
))
2009 target
= simple_strtoul(buf
, &endp
, 0);
2013 if (target
< RX_MIN_TARGET
)
2014 target
= RX_MIN_TARGET
;
2015 if (target
> RX_MAX_TARGET
)
2016 target
= RX_MAX_TARGET
;
2018 spin_lock_bh(&np
->rx_lock
);
2019 if (target
< np
->rx_min_target
)
2020 np
->rx_min_target
= target
;
2021 np
->rx_max_target
= target
;
2022 if (target
< np
->rx_target
)
2023 np
->rx_target
= target
;
2025 xennet_alloc_rx_buffers(netdev
);
2027 spin_unlock_bh(&np
->rx_lock
);
2031 static ssize_t
show_rxbuf_cur(struct device
*dev
,
2032 struct device_attribute
*attr
, char *buf
)
2034 struct net_device
*netdev
= to_net_dev(dev
);
2035 struct netfront_info
*info
= netdev_priv(netdev
);
2037 return sprintf(buf
, "%u\n", info
->rx_target
);
2040 static struct device_attribute xennet_attrs
[] = {
2041 __ATTR(rxbuf_min
, S_IRUGO
|S_IWUSR
, show_rxbuf_min
, store_rxbuf_min
),
2042 __ATTR(rxbuf_max
, S_IRUGO
|S_IWUSR
, show_rxbuf_max
, store_rxbuf_max
),
2043 __ATTR(rxbuf_cur
, S_IRUGO
, show_rxbuf_cur
, NULL
),
2046 static int xennet_sysfs_addif(struct net_device
*netdev
)
2051 for (i
= 0; i
< ARRAY_SIZE(xennet_attrs
); i
++) {
2052 err
= device_create_file(&netdev
->dev
,
2061 device_remove_file(&netdev
->dev
, &xennet_attrs
[i
]);
2065 static void xennet_sysfs_delif(struct net_device
*netdev
)
2069 for (i
= 0; i
< ARRAY_SIZE(xennet_attrs
); i
++)
2070 device_remove_file(&netdev
->dev
, &xennet_attrs
[i
]);
2073 #endif /* CONFIG_SYSFS */
2075 static const struct xenbus_device_id netfront_ids
[] = {
2081 static int xennet_remove(struct xenbus_device
*dev
)
2083 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
2085 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
2087 xennet_disconnect_backend(info
);
2089 xennet_sysfs_delif(info
->netdev
);
2091 unregister_netdev(info
->netdev
);
2093 del_timer_sync(&info
->rx_refill_timer
);
2095 free_percpu(info
->stats
);
2097 free_netdev(info
->netdev
);
2102 static DEFINE_XENBUS_DRIVER(netfront
, ,
2103 .probe
= netfront_probe
,
2104 .remove
= xennet_remove
,
2105 .resume
= netfront_resume
,
2106 .otherend_changed
= netback_changed
,
2109 static int __init
netif_init(void)
2114 if (!xen_has_pv_nic_devices())
2117 pr_info("Initialising Xen virtual ethernet driver\n");
2119 return xenbus_register_frontend(&netfront_driver
);
2121 module_init(netif_init
);
2124 static void __exit
netif_exit(void)
2126 xenbus_unregister_driver(&netfront_driver
);
2128 module_exit(netif_exit
);
2130 MODULE_DESCRIPTION("Xen virtual network device frontend");
2131 MODULE_LICENSE("GPL");
2132 MODULE_ALIAS("xen:vif");
2133 MODULE_ALIAS("xennet");