2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
45 #include <linux/slab.h>
49 #include <xen/xenbus.h>
50 #include <xen/events.h>
52 #include <xen/platform_pci.h>
53 #include <xen/grant_table.h>
55 #include <xen/interface/io/netif.h>
56 #include <xen/interface/memory.h>
57 #include <xen/interface/grant_table.h>
59 /* Module parameters */
60 #define MAX_QUEUES_DEFAULT 8
61 static unsigned int xennet_max_queues
;
62 module_param_named(max_queues
, xennet_max_queues
, uint
, 0644);
63 MODULE_PARM_DESC(max_queues
,
64 "Maximum number of queues per virtual interface");
66 static const struct ethtool_ops xennet_ethtool_ops
;
72 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
74 #define RX_COPY_THRESHOLD 256
76 #define GRANT_INVALID_REF 0
78 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
79 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
81 /* Minimum number of Rx slots (includes slot for GSO metadata). */
82 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
84 /* Queue name is interface name with "-qNNN" appended */
85 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
87 /* IRQ name is queue name with "-tx" or "-rx" appended */
88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
90 static DECLARE_WAIT_QUEUE_HEAD(module_load_q
);
91 static DECLARE_WAIT_QUEUE_HEAD(module_unload_q
);
93 struct netfront_stats
{
96 struct u64_stats_sync syncp
;
101 struct netfront_queue
{
102 unsigned int id
; /* Queue ID, 0-based */
103 char name
[QUEUE_NAME_SIZE
]; /* DEVNAME-qN */
104 struct netfront_info
*info
;
106 struct napi_struct napi
;
108 /* Split event channels support, tx_* == rx_* when using
109 * single event channel.
111 unsigned int tx_evtchn
, rx_evtchn
;
112 unsigned int tx_irq
, rx_irq
;
113 /* Only used when split event channels support is enabled */
114 char tx_irq_name
[IRQ_NAME_SIZE
]; /* DEVNAME-qN-tx */
115 char rx_irq_name
[IRQ_NAME_SIZE
]; /* DEVNAME-qN-rx */
118 struct xen_netif_tx_front_ring tx
;
122 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
123 * are linked from tx_skb_freelist through skb_entry.link.
125 * NB. Freelist index entries are always going to be less than
126 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
127 * greater than PAGE_OFFSET: we use this property to distinguish
133 } tx_skbs
[NET_TX_RING_SIZE
];
134 grant_ref_t gref_tx_head
;
135 grant_ref_t grant_tx_ref
[NET_TX_RING_SIZE
];
136 struct page
*grant_tx_page
[NET_TX_RING_SIZE
];
137 unsigned tx_skb_freelist
;
139 spinlock_t rx_lock ____cacheline_aligned_in_smp
;
140 struct xen_netif_rx_front_ring rx
;
143 struct timer_list rx_refill_timer
;
145 struct sk_buff
*rx_skbs
[NET_RX_RING_SIZE
];
146 grant_ref_t gref_rx_head
;
147 grant_ref_t grant_rx_ref
[NET_RX_RING_SIZE
];
150 struct netfront_info
{
151 struct list_head list
;
152 struct net_device
*netdev
;
154 struct xenbus_device
*xbdev
;
156 /* Multi-queue support */
157 struct netfront_queue
*queues
;
160 struct netfront_stats __percpu
*rx_stats
;
161 struct netfront_stats __percpu
*tx_stats
;
163 atomic_t rx_gso_checksum_fixup
;
166 struct netfront_rx_info
{
167 struct xen_netif_rx_response rx
;
168 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
- 1];
171 static void skb_entry_set_link(union skb_entry
*list
, unsigned short id
)
176 static int skb_entry_is_link(const union skb_entry
*list
)
178 BUILD_BUG_ON(sizeof(list
->skb
) != sizeof(list
->link
));
179 return (unsigned long)list
->skb
< PAGE_OFFSET
;
183 * Access macros for acquiring freeing slots in tx_skbs[].
186 static void add_id_to_freelist(unsigned *head
, union skb_entry
*list
,
189 skb_entry_set_link(&list
[id
], *head
);
193 static unsigned short get_id_from_freelist(unsigned *head
,
194 union skb_entry
*list
)
196 unsigned int id
= *head
;
197 *head
= list
[id
].link
;
201 static int xennet_rxidx(RING_IDX idx
)
203 return idx
& (NET_RX_RING_SIZE
- 1);
206 static struct sk_buff
*xennet_get_rx_skb(struct netfront_queue
*queue
,
209 int i
= xennet_rxidx(ri
);
210 struct sk_buff
*skb
= queue
->rx_skbs
[i
];
211 queue
->rx_skbs
[i
] = NULL
;
215 static grant_ref_t
xennet_get_rx_ref(struct netfront_queue
*queue
,
218 int i
= xennet_rxidx(ri
);
219 grant_ref_t ref
= queue
->grant_rx_ref
[i
];
220 queue
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
225 static const struct attribute_group xennet_dev_group
;
228 static bool xennet_can_sg(struct net_device
*dev
)
230 return dev
->features
& NETIF_F_SG
;
234 static void rx_refill_timeout(struct timer_list
*t
)
236 struct netfront_queue
*queue
= from_timer(queue
, t
, rx_refill_timer
);
237 napi_schedule(&queue
->napi
);
240 static int netfront_tx_slot_available(struct netfront_queue
*queue
)
242 return (queue
->tx
.req_prod_pvt
- queue
->tx
.rsp_cons
) <
243 (NET_TX_RING_SIZE
- XEN_NETIF_NR_SLOTS_MIN
- 1);
246 static void xennet_maybe_wake_tx(struct netfront_queue
*queue
)
248 struct net_device
*dev
= queue
->info
->netdev
;
249 struct netdev_queue
*dev_queue
= netdev_get_tx_queue(dev
, queue
->id
);
251 if (unlikely(netif_tx_queue_stopped(dev_queue
)) &&
252 netfront_tx_slot_available(queue
) &&
253 likely(netif_running(dev
)))
254 netif_tx_wake_queue(netdev_get_tx_queue(dev
, queue
->id
));
258 static struct sk_buff
*xennet_alloc_one_rx_buffer(struct netfront_queue
*queue
)
263 skb
= __netdev_alloc_skb(queue
->info
->netdev
,
264 RX_COPY_THRESHOLD
+ NET_IP_ALIGN
,
265 GFP_ATOMIC
| __GFP_NOWARN
);
269 page
= alloc_page(GFP_ATOMIC
| __GFP_NOWARN
);
274 skb_add_rx_frag(skb
, 0, page
, 0, 0, PAGE_SIZE
);
276 /* Align ip header to a 16 bytes boundary */
277 skb_reserve(skb
, NET_IP_ALIGN
);
278 skb
->dev
= queue
->info
->netdev
;
284 static void xennet_alloc_rx_buffers(struct netfront_queue
*queue
)
286 RING_IDX req_prod
= queue
->rx
.req_prod_pvt
;
290 if (unlikely(!netif_carrier_ok(queue
->info
->netdev
)))
293 for (req_prod
= queue
->rx
.req_prod_pvt
;
294 req_prod
- queue
->rx
.rsp_cons
< NET_RX_RING_SIZE
;
300 struct xen_netif_rx_request
*req
;
302 skb
= xennet_alloc_one_rx_buffer(queue
);
308 id
= xennet_rxidx(req_prod
);
310 BUG_ON(queue
->rx_skbs
[id
]);
311 queue
->rx_skbs
[id
] = skb
;
313 ref
= gnttab_claim_grant_reference(&queue
->gref_rx_head
);
314 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref
));
315 queue
->grant_rx_ref
[id
] = ref
;
317 page
= skb_frag_page(&skb_shinfo(skb
)->frags
[0]);
319 req
= RING_GET_REQUEST(&queue
->rx
, req_prod
);
320 gnttab_page_grant_foreign_access_ref_one(ref
,
321 queue
->info
->xbdev
->otherend_id
,
328 queue
->rx
.req_prod_pvt
= req_prod
;
330 /* Try again later if there are not enough requests or skb allocation
332 * Enough requests is quantified as the sum of newly created slots and
333 * the unconsumed slots at the backend.
335 if (req_prod
- queue
->rx
.rsp_cons
< NET_RX_SLOTS_MIN
||
337 mod_timer(&queue
->rx_refill_timer
, jiffies
+ (HZ
/10));
341 wmb(); /* barrier so backend seens requests */
343 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue
->rx
, notify
);
345 notify_remote_via_irq(queue
->rx_irq
);
348 static int xennet_open(struct net_device
*dev
)
350 struct netfront_info
*np
= netdev_priv(dev
);
351 unsigned int num_queues
= dev
->real_num_tx_queues
;
353 struct netfront_queue
*queue
= NULL
;
358 for (i
= 0; i
< num_queues
; ++i
) {
359 queue
= &np
->queues
[i
];
360 napi_enable(&queue
->napi
);
362 spin_lock_bh(&queue
->rx_lock
);
363 if (netif_carrier_ok(dev
)) {
364 xennet_alloc_rx_buffers(queue
);
365 queue
->rx
.sring
->rsp_event
= queue
->rx
.rsp_cons
+ 1;
366 if (RING_HAS_UNCONSUMED_RESPONSES(&queue
->rx
))
367 napi_schedule(&queue
->napi
);
369 spin_unlock_bh(&queue
->rx_lock
);
372 netif_tx_start_all_queues(dev
);
377 static void xennet_tx_buf_gc(struct netfront_queue
*queue
)
384 BUG_ON(!netif_carrier_ok(queue
->info
->netdev
));
387 prod
= queue
->tx
.sring
->rsp_prod
;
388 rmb(); /* Ensure we see responses up to 'rp'. */
390 for (cons
= queue
->tx
.rsp_cons
; cons
!= prod
; cons
++) {
391 struct xen_netif_tx_response
*txrsp
;
393 txrsp
= RING_GET_RESPONSE(&queue
->tx
, cons
);
394 if (txrsp
->status
== XEN_NETIF_RSP_NULL
)
398 skb
= queue
->tx_skbs
[id
].skb
;
399 if (unlikely(gnttab_query_foreign_access(
400 queue
->grant_tx_ref
[id
]) != 0)) {
401 pr_alert("%s: warning -- grant still in use by backend domain\n",
405 gnttab_end_foreign_access_ref(
406 queue
->grant_tx_ref
[id
], GNTMAP_readonly
);
407 gnttab_release_grant_reference(
408 &queue
->gref_tx_head
, queue
->grant_tx_ref
[id
]);
409 queue
->grant_tx_ref
[id
] = GRANT_INVALID_REF
;
410 queue
->grant_tx_page
[id
] = NULL
;
411 add_id_to_freelist(&queue
->tx_skb_freelist
, queue
->tx_skbs
, id
);
412 dev_kfree_skb_irq(skb
);
415 queue
->tx
.rsp_cons
= prod
;
417 RING_FINAL_CHECK_FOR_RESPONSES(&queue
->tx
, more_to_do
);
418 } while (more_to_do
);
420 xennet_maybe_wake_tx(queue
);
423 struct xennet_gnttab_make_txreq
{
424 struct netfront_queue
*queue
;
427 struct xen_netif_tx_request
*tx
; /* Last request */
431 static void xennet_tx_setup_grant(unsigned long gfn
, unsigned int offset
,
432 unsigned int len
, void *data
)
434 struct xennet_gnttab_make_txreq
*info
= data
;
436 struct xen_netif_tx_request
*tx
;
438 /* convenient aliases */
439 struct page
*page
= info
->page
;
440 struct netfront_queue
*queue
= info
->queue
;
441 struct sk_buff
*skb
= info
->skb
;
443 id
= get_id_from_freelist(&queue
->tx_skb_freelist
, queue
->tx_skbs
);
444 tx
= RING_GET_REQUEST(&queue
->tx
, queue
->tx
.req_prod_pvt
++);
445 ref
= gnttab_claim_grant_reference(&queue
->gref_tx_head
);
446 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref
));
448 gnttab_grant_foreign_access_ref(ref
, queue
->info
->xbdev
->otherend_id
,
449 gfn
, GNTMAP_readonly
);
451 queue
->tx_skbs
[id
].skb
= skb
;
452 queue
->grant_tx_page
[id
] = page
;
453 queue
->grant_tx_ref
[id
] = ref
;
462 info
->size
+= tx
->size
;
465 static struct xen_netif_tx_request
*xennet_make_first_txreq(
466 struct netfront_queue
*queue
, struct sk_buff
*skb
,
467 struct page
*page
, unsigned int offset
, unsigned int len
)
469 struct xennet_gnttab_make_txreq info
= {
476 gnttab_for_one_grant(page
, offset
, len
, xennet_tx_setup_grant
, &info
);
481 static void xennet_make_one_txreq(unsigned long gfn
, unsigned int offset
,
482 unsigned int len
, void *data
)
484 struct xennet_gnttab_make_txreq
*info
= data
;
486 info
->tx
->flags
|= XEN_NETTXF_more_data
;
488 xennet_tx_setup_grant(gfn
, offset
, len
, data
);
491 static struct xen_netif_tx_request
*xennet_make_txreqs(
492 struct netfront_queue
*queue
, struct xen_netif_tx_request
*tx
,
493 struct sk_buff
*skb
, struct page
*page
,
494 unsigned int offset
, unsigned int len
)
496 struct xennet_gnttab_make_txreq info
= {
502 /* Skip unused frames from start of page */
503 page
+= offset
>> PAGE_SHIFT
;
504 offset
&= ~PAGE_MASK
;
510 gnttab_foreach_grant_in_range(page
, offset
, len
,
511 xennet_make_one_txreq
,
523 * Count how many ring slots are required to send this skb. Each frag
524 * might be a compound page.
526 static int xennet_count_skb_slots(struct sk_buff
*skb
)
528 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
531 slots
= gnttab_count_grant(offset_in_page(skb
->data
),
534 for (i
= 0; i
< frags
; i
++) {
535 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
536 unsigned long size
= skb_frag_size(frag
);
537 unsigned long offset
= frag
->page_offset
;
539 /* Skip unused frames from start of page */
540 offset
&= ~PAGE_MASK
;
542 slots
+= gnttab_count_grant(offset
, size
);
548 static u16
xennet_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
549 void *accel_priv
, select_queue_fallback_t fallback
)
551 unsigned int num_queues
= dev
->real_num_tx_queues
;
555 /* First, check if there is only one queue */
556 if (num_queues
== 1) {
559 hash
= skb_get_hash(skb
);
560 queue_idx
= hash
% num_queues
;
566 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
568 static int xennet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
570 struct netfront_info
*np
= netdev_priv(dev
);
571 struct netfront_stats
*tx_stats
= this_cpu_ptr(np
->tx_stats
);
572 struct xen_netif_tx_request
*tx
, *first_tx
;
580 struct netfront_queue
*queue
= NULL
;
581 unsigned int num_queues
= dev
->real_num_tx_queues
;
583 struct sk_buff
*nskb
;
585 /* Drop the packet if no queues are set up */
588 /* Determine which queue to transmit this SKB on */
589 queue_index
= skb_get_queue_mapping(skb
);
590 queue
= &np
->queues
[queue_index
];
592 /* If skb->len is too big for wire format, drop skb and alert
593 * user about misconfiguration.
595 if (unlikely(skb
->len
> XEN_NETIF_MAX_TX_SIZE
)) {
596 net_alert_ratelimited(
597 "xennet: skb->len = %u, too big for wire format\n",
602 slots
= xennet_count_skb_slots(skb
);
603 if (unlikely(slots
> MAX_XEN_SKB_FRAGS
+ 1)) {
604 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
606 if (skb_linearize(skb
))
610 page
= virt_to_page(skb
->data
);
611 offset
= offset_in_page(skb
->data
);
613 /* The first req should be at least ETH_HLEN size or the packet will be
614 * dropped by netback.
616 if (unlikely(PAGE_SIZE
- offset
< ETH_HLEN
)) {
617 nskb
= skb_copy(skb
, GFP_ATOMIC
);
620 dev_consume_skb_any(skb
);
622 page
= virt_to_page(skb
->data
);
623 offset
= offset_in_page(skb
->data
);
626 len
= skb_headlen(skb
);
628 spin_lock_irqsave(&queue
->tx_lock
, flags
);
630 if (unlikely(!netif_carrier_ok(dev
) ||
631 (slots
> 1 && !xennet_can_sg(dev
)) ||
632 netif_needs_gso(skb
, netif_skb_features(skb
)))) {
633 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
637 /* First request for the linear area. */
638 first_tx
= tx
= xennet_make_first_txreq(queue
, skb
,
641 if (offset
== PAGE_SIZE
) {
647 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
649 tx
->flags
|= XEN_NETTXF_csum_blank
| XEN_NETTXF_data_validated
;
650 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
651 /* remote but checksummed. */
652 tx
->flags
|= XEN_NETTXF_data_validated
;
654 /* Optional extra info after the first request. */
655 if (skb_shinfo(skb
)->gso_size
) {
656 struct xen_netif_extra_info
*gso
;
658 gso
= (struct xen_netif_extra_info
*)
659 RING_GET_REQUEST(&queue
->tx
, queue
->tx
.req_prod_pvt
++);
661 tx
->flags
|= XEN_NETTXF_extra_info
;
663 gso
->u
.gso
.size
= skb_shinfo(skb
)->gso_size
;
664 gso
->u
.gso
.type
= (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) ?
665 XEN_NETIF_GSO_TYPE_TCPV6
:
666 XEN_NETIF_GSO_TYPE_TCPV4
;
668 gso
->u
.gso
.features
= 0;
670 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
674 /* Requests for the rest of the linear area. */
675 tx
= xennet_make_txreqs(queue
, tx
, skb
, page
, offset
, len
);
677 /* Requests for all the frags. */
678 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
679 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
680 tx
= xennet_make_txreqs(queue
, tx
, skb
,
681 skb_frag_page(frag
), frag
->page_offset
,
682 skb_frag_size(frag
));
685 /* First request has the packet length. */
686 first_tx
->size
= skb
->len
;
688 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue
->tx
, notify
);
690 notify_remote_via_irq(queue
->tx_irq
);
692 u64_stats_update_begin(&tx_stats
->syncp
);
693 tx_stats
->bytes
+= skb
->len
;
695 u64_stats_update_end(&tx_stats
->syncp
);
697 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
698 xennet_tx_buf_gc(queue
);
700 if (!netfront_tx_slot_available(queue
))
701 netif_tx_stop_queue(netdev_get_tx_queue(dev
, queue
->id
));
703 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
708 dev
->stats
.tx_dropped
++;
709 dev_kfree_skb_any(skb
);
713 static int xennet_close(struct net_device
*dev
)
715 struct netfront_info
*np
= netdev_priv(dev
);
716 unsigned int num_queues
= dev
->real_num_tx_queues
;
718 struct netfront_queue
*queue
;
719 netif_tx_stop_all_queues(np
->netdev
);
720 for (i
= 0; i
< num_queues
; ++i
) {
721 queue
= &np
->queues
[i
];
722 napi_disable(&queue
->napi
);
727 static void xennet_move_rx_slot(struct netfront_queue
*queue
, struct sk_buff
*skb
,
730 int new = xennet_rxidx(queue
->rx
.req_prod_pvt
);
732 BUG_ON(queue
->rx_skbs
[new]);
733 queue
->rx_skbs
[new] = skb
;
734 queue
->grant_rx_ref
[new] = ref
;
735 RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_prod_pvt
)->id
= new;
736 RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_prod_pvt
)->gref
= ref
;
737 queue
->rx
.req_prod_pvt
++;
740 static int xennet_get_extras(struct netfront_queue
*queue
,
741 struct xen_netif_extra_info
*extras
,
745 struct xen_netif_extra_info
*extra
;
746 struct device
*dev
= &queue
->info
->netdev
->dev
;
747 RING_IDX cons
= queue
->rx
.rsp_cons
;
754 if (unlikely(cons
+ 1 == rp
)) {
756 dev_warn(dev
, "Missing extra info\n");
761 extra
= (struct xen_netif_extra_info
*)
762 RING_GET_RESPONSE(&queue
->rx
, ++cons
);
764 if (unlikely(!extra
->type
||
765 extra
->type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
767 dev_warn(dev
, "Invalid extra type: %d\n",
771 memcpy(&extras
[extra
->type
- 1], extra
,
775 skb
= xennet_get_rx_skb(queue
, cons
);
776 ref
= xennet_get_rx_ref(queue
, cons
);
777 xennet_move_rx_slot(queue
, skb
, ref
);
778 } while (extra
->flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
780 queue
->rx
.rsp_cons
= cons
;
784 static int xennet_get_responses(struct netfront_queue
*queue
,
785 struct netfront_rx_info
*rinfo
, RING_IDX rp
,
786 struct sk_buff_head
*list
)
788 struct xen_netif_rx_response
*rx
= &rinfo
->rx
;
789 struct xen_netif_extra_info
*extras
= rinfo
->extras
;
790 struct device
*dev
= &queue
->info
->netdev
->dev
;
791 RING_IDX cons
= queue
->rx
.rsp_cons
;
792 struct sk_buff
*skb
= xennet_get_rx_skb(queue
, cons
);
793 grant_ref_t ref
= xennet_get_rx_ref(queue
, cons
);
794 int max
= XEN_NETIF_NR_SLOTS_MIN
+ (rx
->status
<= RX_COPY_THRESHOLD
);
799 if (rx
->flags
& XEN_NETRXF_extra_info
) {
800 err
= xennet_get_extras(queue
, extras
, rp
);
801 cons
= queue
->rx
.rsp_cons
;
805 if (unlikely(rx
->status
< 0 ||
806 rx
->offset
+ rx
->status
> XEN_PAGE_SIZE
)) {
808 dev_warn(dev
, "rx->offset: %u, size: %d\n",
809 rx
->offset
, rx
->status
);
810 xennet_move_rx_slot(queue
, skb
, ref
);
816 * This definitely indicates a bug, either in this driver or in
817 * the backend driver. In future this should flag the bad
818 * situation to the system controller to reboot the backend.
820 if (ref
== GRANT_INVALID_REF
) {
822 dev_warn(dev
, "Bad rx response id %d.\n",
828 ret
= gnttab_end_foreign_access_ref(ref
, 0);
831 gnttab_release_grant_reference(&queue
->gref_rx_head
, ref
);
833 __skb_queue_tail(list
, skb
);
836 if (!(rx
->flags
& XEN_NETRXF_more_data
))
839 if (cons
+ slots
== rp
) {
841 dev_warn(dev
, "Need more slots\n");
846 rx
= RING_GET_RESPONSE(&queue
->rx
, cons
+ slots
);
847 skb
= xennet_get_rx_skb(queue
, cons
+ slots
);
848 ref
= xennet_get_rx_ref(queue
, cons
+ slots
);
852 if (unlikely(slots
> max
)) {
854 dev_warn(dev
, "Too many slots\n");
859 queue
->rx
.rsp_cons
= cons
+ slots
;
864 static int xennet_set_skb_gso(struct sk_buff
*skb
,
865 struct xen_netif_extra_info
*gso
)
867 if (!gso
->u
.gso
.size
) {
869 pr_warn("GSO size must not be zero\n");
873 if (gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV4
&&
874 gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV6
) {
876 pr_warn("Bad GSO type %d\n", gso
->u
.gso
.type
);
880 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
881 skb_shinfo(skb
)->gso_type
=
882 (gso
->u
.gso
.type
== XEN_NETIF_GSO_TYPE_TCPV4
) ?
886 /* Header must be checked, and gso_segs computed. */
887 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
888 skb_shinfo(skb
)->gso_segs
= 0;
893 static RING_IDX
xennet_fill_frags(struct netfront_queue
*queue
,
895 struct sk_buff_head
*list
)
897 RING_IDX cons
= queue
->rx
.rsp_cons
;
898 struct sk_buff
*nskb
;
900 while ((nskb
= __skb_dequeue(list
))) {
901 struct xen_netif_rx_response
*rx
=
902 RING_GET_RESPONSE(&queue
->rx
, ++cons
);
903 skb_frag_t
*nfrag
= &skb_shinfo(nskb
)->frags
[0];
905 if (skb_shinfo(skb
)->nr_frags
== MAX_SKB_FRAGS
) {
906 unsigned int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
908 BUG_ON(pull_to
<= skb_headlen(skb
));
909 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
911 BUG_ON(skb_shinfo(skb
)->nr_frags
>= MAX_SKB_FRAGS
);
913 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
914 skb_frag_page(nfrag
),
915 rx
->offset
, rx
->status
, PAGE_SIZE
);
917 skb_shinfo(nskb
)->nr_frags
= 0;
924 static int checksum_setup(struct net_device
*dev
, struct sk_buff
*skb
)
926 bool recalculate_partial_csum
= false;
929 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
930 * peers can fail to set NETRXF_csum_blank when sending a GSO
931 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
932 * recalculate the partial checksum.
934 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
935 struct netfront_info
*np
= netdev_priv(dev
);
936 atomic_inc(&np
->rx_gso_checksum_fixup
);
937 skb
->ip_summed
= CHECKSUM_PARTIAL
;
938 recalculate_partial_csum
= true;
941 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
942 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
945 return skb_checksum_setup(skb
, recalculate_partial_csum
);
948 static int handle_incoming_queue(struct netfront_queue
*queue
,
949 struct sk_buff_head
*rxq
)
951 struct netfront_stats
*rx_stats
= this_cpu_ptr(queue
->info
->rx_stats
);
952 int packets_dropped
= 0;
955 while ((skb
= __skb_dequeue(rxq
)) != NULL
) {
956 int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
958 if (pull_to
> skb_headlen(skb
))
959 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
961 /* Ethernet work: Delayed to here as it peeks the header. */
962 skb
->protocol
= eth_type_trans(skb
, queue
->info
->netdev
);
963 skb_reset_network_header(skb
);
965 if (checksum_setup(queue
->info
->netdev
, skb
)) {
968 queue
->info
->netdev
->stats
.rx_errors
++;
972 u64_stats_update_begin(&rx_stats
->syncp
);
974 rx_stats
->bytes
+= skb
->len
;
975 u64_stats_update_end(&rx_stats
->syncp
);
978 napi_gro_receive(&queue
->napi
, skb
);
981 return packets_dropped
;
984 static int xennet_poll(struct napi_struct
*napi
, int budget
)
986 struct netfront_queue
*queue
= container_of(napi
, struct netfront_queue
, napi
);
987 struct net_device
*dev
= queue
->info
->netdev
;
989 struct netfront_rx_info rinfo
;
990 struct xen_netif_rx_response
*rx
= &rinfo
.rx
;
991 struct xen_netif_extra_info
*extras
= rinfo
.extras
;
994 struct sk_buff_head rxq
;
995 struct sk_buff_head errq
;
996 struct sk_buff_head tmpq
;
999 spin_lock(&queue
->rx_lock
);
1001 skb_queue_head_init(&rxq
);
1002 skb_queue_head_init(&errq
);
1003 skb_queue_head_init(&tmpq
);
1005 rp
= queue
->rx
.sring
->rsp_prod
;
1006 rmb(); /* Ensure we see queued responses up to 'rp'. */
1008 i
= queue
->rx
.rsp_cons
;
1010 while ((i
!= rp
) && (work_done
< budget
)) {
1011 memcpy(rx
, RING_GET_RESPONSE(&queue
->rx
, i
), sizeof(*rx
));
1012 memset(extras
, 0, sizeof(rinfo
.extras
));
1014 err
= xennet_get_responses(queue
, &rinfo
, rp
, &tmpq
);
1016 if (unlikely(err
)) {
1018 while ((skb
= __skb_dequeue(&tmpq
)))
1019 __skb_queue_tail(&errq
, skb
);
1020 dev
->stats
.rx_errors
++;
1021 i
= queue
->rx
.rsp_cons
;
1025 skb
= __skb_dequeue(&tmpq
);
1027 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1028 struct xen_netif_extra_info
*gso
;
1029 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1031 if (unlikely(xennet_set_skb_gso(skb
, gso
))) {
1032 __skb_queue_head(&tmpq
, skb
);
1033 queue
->rx
.rsp_cons
+= skb_queue_len(&tmpq
);
1038 NETFRONT_SKB_CB(skb
)->pull_to
= rx
->status
;
1039 if (NETFRONT_SKB_CB(skb
)->pull_to
> RX_COPY_THRESHOLD
)
1040 NETFRONT_SKB_CB(skb
)->pull_to
= RX_COPY_THRESHOLD
;
1042 skb_shinfo(skb
)->frags
[0].page_offset
= rx
->offset
;
1043 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], rx
->status
);
1044 skb
->data_len
= rx
->status
;
1045 skb
->len
+= rx
->status
;
1047 i
= xennet_fill_frags(queue
, skb
, &tmpq
);
1049 if (rx
->flags
& XEN_NETRXF_csum_blank
)
1050 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1051 else if (rx
->flags
& XEN_NETRXF_data_validated
)
1052 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1054 __skb_queue_tail(&rxq
, skb
);
1056 queue
->rx
.rsp_cons
= ++i
;
1060 __skb_queue_purge(&errq
);
1062 work_done
-= handle_incoming_queue(queue
, &rxq
);
1064 xennet_alloc_rx_buffers(queue
);
1066 if (work_done
< budget
) {
1069 napi_complete_done(napi
, work_done
);
1071 RING_FINAL_CHECK_FOR_RESPONSES(&queue
->rx
, more_to_do
);
1073 napi_schedule(napi
);
1076 spin_unlock(&queue
->rx_lock
);
1081 static int xennet_change_mtu(struct net_device
*dev
, int mtu
)
1083 int max
= xennet_can_sg(dev
) ? XEN_NETIF_MAX_TX_SIZE
: ETH_DATA_LEN
;
1091 static void xennet_get_stats64(struct net_device
*dev
,
1092 struct rtnl_link_stats64
*tot
)
1094 struct netfront_info
*np
= netdev_priv(dev
);
1097 for_each_possible_cpu(cpu
) {
1098 struct netfront_stats
*rx_stats
= per_cpu_ptr(np
->rx_stats
, cpu
);
1099 struct netfront_stats
*tx_stats
= per_cpu_ptr(np
->tx_stats
, cpu
);
1100 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1104 start
= u64_stats_fetch_begin_irq(&tx_stats
->syncp
);
1105 tx_packets
= tx_stats
->packets
;
1106 tx_bytes
= tx_stats
->bytes
;
1107 } while (u64_stats_fetch_retry_irq(&tx_stats
->syncp
, start
));
1110 start
= u64_stats_fetch_begin_irq(&rx_stats
->syncp
);
1111 rx_packets
= rx_stats
->packets
;
1112 rx_bytes
= rx_stats
->bytes
;
1113 } while (u64_stats_fetch_retry_irq(&rx_stats
->syncp
, start
));
1115 tot
->rx_packets
+= rx_packets
;
1116 tot
->tx_packets
+= tx_packets
;
1117 tot
->rx_bytes
+= rx_bytes
;
1118 tot
->tx_bytes
+= tx_bytes
;
1121 tot
->rx_errors
= dev
->stats
.rx_errors
;
1122 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
1125 static void xennet_release_tx_bufs(struct netfront_queue
*queue
)
1127 struct sk_buff
*skb
;
1130 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1131 /* Skip over entries which are actually freelist references */
1132 if (skb_entry_is_link(&queue
->tx_skbs
[i
]))
1135 skb
= queue
->tx_skbs
[i
].skb
;
1136 get_page(queue
->grant_tx_page
[i
]);
1137 gnttab_end_foreign_access(queue
->grant_tx_ref
[i
],
1139 (unsigned long)page_address(queue
->grant_tx_page
[i
]));
1140 queue
->grant_tx_page
[i
] = NULL
;
1141 queue
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1142 add_id_to_freelist(&queue
->tx_skb_freelist
, queue
->tx_skbs
, i
);
1143 dev_kfree_skb_irq(skb
);
1147 static void xennet_release_rx_bufs(struct netfront_queue
*queue
)
1151 spin_lock_bh(&queue
->rx_lock
);
1153 for (id
= 0; id
< NET_RX_RING_SIZE
; id
++) {
1154 struct sk_buff
*skb
;
1157 skb
= queue
->rx_skbs
[id
];
1161 ref
= queue
->grant_rx_ref
[id
];
1162 if (ref
== GRANT_INVALID_REF
)
1165 page
= skb_frag_page(&skb_shinfo(skb
)->frags
[0]);
1167 /* gnttab_end_foreign_access() needs a page ref until
1168 * foreign access is ended (which may be deferred).
1171 gnttab_end_foreign_access(ref
, 0,
1172 (unsigned long)page_address(page
));
1173 queue
->grant_rx_ref
[id
] = GRANT_INVALID_REF
;
1178 spin_unlock_bh(&queue
->rx_lock
);
1181 static netdev_features_t
xennet_fix_features(struct net_device
*dev
,
1182 netdev_features_t features
)
1184 struct netfront_info
*np
= netdev_priv(dev
);
1186 if (features
& NETIF_F_SG
&&
1187 !xenbus_read_unsigned(np
->xbdev
->otherend
, "feature-sg", 0))
1188 features
&= ~NETIF_F_SG
;
1190 if (features
& NETIF_F_IPV6_CSUM
&&
1191 !xenbus_read_unsigned(np
->xbdev
->otherend
,
1192 "feature-ipv6-csum-offload", 0))
1193 features
&= ~NETIF_F_IPV6_CSUM
;
1195 if (features
& NETIF_F_TSO
&&
1196 !xenbus_read_unsigned(np
->xbdev
->otherend
, "feature-gso-tcpv4", 0))
1197 features
&= ~NETIF_F_TSO
;
1199 if (features
& NETIF_F_TSO6
&&
1200 !xenbus_read_unsigned(np
->xbdev
->otherend
, "feature-gso-tcpv6", 0))
1201 features
&= ~NETIF_F_TSO6
;
1206 static int xennet_set_features(struct net_device
*dev
,
1207 netdev_features_t features
)
1209 if (!(features
& NETIF_F_SG
) && dev
->mtu
> ETH_DATA_LEN
) {
1210 netdev_info(dev
, "Reducing MTU because no SG offload");
1211 dev
->mtu
= ETH_DATA_LEN
;
1217 static irqreturn_t
xennet_tx_interrupt(int irq
, void *dev_id
)
1219 struct netfront_queue
*queue
= dev_id
;
1220 unsigned long flags
;
1222 spin_lock_irqsave(&queue
->tx_lock
, flags
);
1223 xennet_tx_buf_gc(queue
);
1224 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
1229 static irqreturn_t
xennet_rx_interrupt(int irq
, void *dev_id
)
1231 struct netfront_queue
*queue
= dev_id
;
1232 struct net_device
*dev
= queue
->info
->netdev
;
1234 if (likely(netif_carrier_ok(dev
) &&
1235 RING_HAS_UNCONSUMED_RESPONSES(&queue
->rx
)))
1236 napi_schedule(&queue
->napi
);
1241 static irqreturn_t
xennet_interrupt(int irq
, void *dev_id
)
1243 xennet_tx_interrupt(irq
, dev_id
);
1244 xennet_rx_interrupt(irq
, dev_id
);
1248 #ifdef CONFIG_NET_POLL_CONTROLLER
1249 static void xennet_poll_controller(struct net_device
*dev
)
1251 /* Poll each queue */
1252 struct netfront_info
*info
= netdev_priv(dev
);
1253 unsigned int num_queues
= dev
->real_num_tx_queues
;
1255 for (i
= 0; i
< num_queues
; ++i
)
1256 xennet_interrupt(0, &info
->queues
[i
]);
1260 static const struct net_device_ops xennet_netdev_ops
= {
1261 .ndo_open
= xennet_open
,
1262 .ndo_stop
= xennet_close
,
1263 .ndo_start_xmit
= xennet_start_xmit
,
1264 .ndo_change_mtu
= xennet_change_mtu
,
1265 .ndo_get_stats64
= xennet_get_stats64
,
1266 .ndo_set_mac_address
= eth_mac_addr
,
1267 .ndo_validate_addr
= eth_validate_addr
,
1268 .ndo_fix_features
= xennet_fix_features
,
1269 .ndo_set_features
= xennet_set_features
,
1270 .ndo_select_queue
= xennet_select_queue
,
1271 #ifdef CONFIG_NET_POLL_CONTROLLER
1272 .ndo_poll_controller
= xennet_poll_controller
,
1276 static void xennet_free_netdev(struct net_device
*netdev
)
1278 struct netfront_info
*np
= netdev_priv(netdev
);
1280 free_percpu(np
->rx_stats
);
1281 free_percpu(np
->tx_stats
);
1282 free_netdev(netdev
);
1285 static struct net_device
*xennet_create_dev(struct xenbus_device
*dev
)
1288 struct net_device
*netdev
;
1289 struct netfront_info
*np
;
1291 netdev
= alloc_etherdev_mq(sizeof(struct netfront_info
), xennet_max_queues
);
1293 return ERR_PTR(-ENOMEM
);
1295 np
= netdev_priv(netdev
);
1301 np
->rx_stats
= netdev_alloc_pcpu_stats(struct netfront_stats
);
1302 if (np
->rx_stats
== NULL
)
1304 np
->tx_stats
= netdev_alloc_pcpu_stats(struct netfront_stats
);
1305 if (np
->tx_stats
== NULL
)
1308 netdev
->netdev_ops
= &xennet_netdev_ops
;
1310 netdev
->features
= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
|
1312 netdev
->hw_features
= NETIF_F_SG
|
1314 NETIF_F_TSO
| NETIF_F_TSO6
;
1317 * Assume that all hw features are available for now. This set
1318 * will be adjusted by the call to netdev_update_features() in
1319 * xennet_connect() which is the earliest point where we can
1320 * negotiate with the backend regarding supported features.
1322 netdev
->features
|= netdev
->hw_features
;
1324 netdev
->ethtool_ops
= &xennet_ethtool_ops
;
1325 netdev
->min_mtu
= ETH_MIN_MTU
;
1326 netdev
->max_mtu
= XEN_NETIF_MAX_TX_SIZE
;
1327 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1329 np
->netdev
= netdev
;
1331 netif_carrier_off(netdev
);
1333 xenbus_switch_state(dev
, XenbusStateInitialising
);
1334 wait_event(module_load_q
,
1335 xenbus_read_driver_state(dev
->otherend
) !=
1336 XenbusStateClosed
&&
1337 xenbus_read_driver_state(dev
->otherend
) !=
1338 XenbusStateUnknown
);
1342 xennet_free_netdev(netdev
);
1343 return ERR_PTR(err
);
1347 * Entry point to this code when a new device is created. Allocate the basic
1348 * structures and the ring buffers for communication with the backend, and
1349 * inform the backend of the appropriate details for those.
1351 static int netfront_probe(struct xenbus_device
*dev
,
1352 const struct xenbus_device_id
*id
)
1355 struct net_device
*netdev
;
1356 struct netfront_info
*info
;
1358 netdev
= xennet_create_dev(dev
);
1359 if (IS_ERR(netdev
)) {
1360 err
= PTR_ERR(netdev
);
1361 xenbus_dev_fatal(dev
, err
, "creating netdev");
1365 info
= netdev_priv(netdev
);
1366 dev_set_drvdata(&dev
->dev
, info
);
1368 info
->netdev
->sysfs_groups
[0] = &xennet_dev_group
;
1374 static void xennet_end_access(int ref
, void *page
)
1376 /* This frees the page as a side-effect */
1377 if (ref
!= GRANT_INVALID_REF
)
1378 gnttab_end_foreign_access(ref
, 0, (unsigned long)page
);
1381 static void xennet_disconnect_backend(struct netfront_info
*info
)
1384 unsigned int num_queues
= info
->netdev
->real_num_tx_queues
;
1386 netif_carrier_off(info
->netdev
);
1388 for (i
= 0; i
< num_queues
&& info
->queues
; ++i
) {
1389 struct netfront_queue
*queue
= &info
->queues
[i
];
1391 del_timer_sync(&queue
->rx_refill_timer
);
1393 if (queue
->tx_irq
&& (queue
->tx_irq
== queue
->rx_irq
))
1394 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1395 if (queue
->tx_irq
&& (queue
->tx_irq
!= queue
->rx_irq
)) {
1396 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1397 unbind_from_irqhandler(queue
->rx_irq
, queue
);
1399 queue
->tx_evtchn
= queue
->rx_evtchn
= 0;
1400 queue
->tx_irq
= queue
->rx_irq
= 0;
1402 if (netif_running(info
->netdev
))
1403 napi_synchronize(&queue
->napi
);
1405 xennet_release_tx_bufs(queue
);
1406 xennet_release_rx_bufs(queue
);
1407 gnttab_free_grant_references(queue
->gref_tx_head
);
1408 gnttab_free_grant_references(queue
->gref_rx_head
);
1410 /* End access and free the pages */
1411 xennet_end_access(queue
->tx_ring_ref
, queue
->tx
.sring
);
1412 xennet_end_access(queue
->rx_ring_ref
, queue
->rx
.sring
);
1414 queue
->tx_ring_ref
= GRANT_INVALID_REF
;
1415 queue
->rx_ring_ref
= GRANT_INVALID_REF
;
1416 queue
->tx
.sring
= NULL
;
1417 queue
->rx
.sring
= NULL
;
1422 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1423 * driver restart. We tear down our netif structure and recreate it, but
1424 * leave the device-layer structures intact so that this is transparent to the
1425 * rest of the kernel.
1427 static int netfront_resume(struct xenbus_device
*dev
)
1429 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1431 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
1433 xennet_disconnect_backend(info
);
1437 static int xen_net_read_mac(struct xenbus_device
*dev
, u8 mac
[])
1439 char *s
, *e
, *macstr
;
1442 macstr
= s
= xenbus_read(XBT_NIL
, dev
->nodename
, "mac", NULL
);
1444 return PTR_ERR(macstr
);
1446 for (i
= 0; i
< ETH_ALEN
; i
++) {
1447 mac
[i
] = simple_strtoul(s
, &e
, 16);
1448 if ((s
== e
) || (*e
!= ((i
== ETH_ALEN
-1) ? '\0' : ':'))) {
1459 static int setup_netfront_single(struct netfront_queue
*queue
)
1463 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->tx_evtchn
);
1467 err
= bind_evtchn_to_irqhandler(queue
->tx_evtchn
,
1469 0, queue
->info
->netdev
->name
, queue
);
1472 queue
->rx_evtchn
= queue
->tx_evtchn
;
1473 queue
->rx_irq
= queue
->tx_irq
= err
;
1478 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->tx_evtchn
);
1479 queue
->tx_evtchn
= 0;
1484 static int setup_netfront_split(struct netfront_queue
*queue
)
1488 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->tx_evtchn
);
1491 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->rx_evtchn
);
1493 goto alloc_rx_evtchn_fail
;
1495 snprintf(queue
->tx_irq_name
, sizeof(queue
->tx_irq_name
),
1496 "%s-tx", queue
->name
);
1497 err
= bind_evtchn_to_irqhandler(queue
->tx_evtchn
,
1498 xennet_tx_interrupt
,
1499 0, queue
->tx_irq_name
, queue
);
1502 queue
->tx_irq
= err
;
1504 snprintf(queue
->rx_irq_name
, sizeof(queue
->rx_irq_name
),
1505 "%s-rx", queue
->name
);
1506 err
= bind_evtchn_to_irqhandler(queue
->rx_evtchn
,
1507 xennet_rx_interrupt
,
1508 0, queue
->rx_irq_name
, queue
);
1511 queue
->rx_irq
= err
;
1516 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1519 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->rx_evtchn
);
1520 queue
->rx_evtchn
= 0;
1521 alloc_rx_evtchn_fail
:
1522 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->tx_evtchn
);
1523 queue
->tx_evtchn
= 0;
1528 static int setup_netfront(struct xenbus_device
*dev
,
1529 struct netfront_queue
*queue
, unsigned int feature_split_evtchn
)
1531 struct xen_netif_tx_sring
*txs
;
1532 struct xen_netif_rx_sring
*rxs
;
1536 queue
->tx_ring_ref
= GRANT_INVALID_REF
;
1537 queue
->rx_ring_ref
= GRANT_INVALID_REF
;
1538 queue
->rx
.sring
= NULL
;
1539 queue
->tx
.sring
= NULL
;
1541 txs
= (struct xen_netif_tx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1544 xenbus_dev_fatal(dev
, err
, "allocating tx ring page");
1547 SHARED_RING_INIT(txs
);
1548 FRONT_RING_INIT(&queue
->tx
, txs
, XEN_PAGE_SIZE
);
1550 err
= xenbus_grant_ring(dev
, txs
, 1, &gref
);
1552 goto grant_tx_ring_fail
;
1553 queue
->tx_ring_ref
= gref
;
1555 rxs
= (struct xen_netif_rx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1558 xenbus_dev_fatal(dev
, err
, "allocating rx ring page");
1559 goto alloc_rx_ring_fail
;
1561 SHARED_RING_INIT(rxs
);
1562 FRONT_RING_INIT(&queue
->rx
, rxs
, XEN_PAGE_SIZE
);
1564 err
= xenbus_grant_ring(dev
, rxs
, 1, &gref
);
1566 goto grant_rx_ring_fail
;
1567 queue
->rx_ring_ref
= gref
;
1569 if (feature_split_evtchn
)
1570 err
= setup_netfront_split(queue
);
1571 /* setup single event channel if
1572 * a) feature-split-event-channels == 0
1573 * b) feature-split-event-channels == 1 but failed to setup
1575 if (!feature_split_evtchn
|| (feature_split_evtchn
&& err
))
1576 err
= setup_netfront_single(queue
);
1579 goto alloc_evtchn_fail
;
1583 /* If we fail to setup netfront, it is safe to just revoke access to
1584 * granted pages because backend is not accessing it at this point.
1587 gnttab_end_foreign_access_ref(queue
->rx_ring_ref
, 0);
1589 free_page((unsigned long)rxs
);
1591 gnttab_end_foreign_access_ref(queue
->tx_ring_ref
, 0);
1593 free_page((unsigned long)txs
);
1598 /* Queue-specific initialisation
1599 * This used to be done in xennet_create_dev() but must now
1602 static int xennet_init_queue(struct netfront_queue
*queue
)
1607 spin_lock_init(&queue
->tx_lock
);
1608 spin_lock_init(&queue
->rx_lock
);
1610 timer_setup(&queue
->rx_refill_timer
, rx_refill_timeout
, 0);
1612 snprintf(queue
->name
, sizeof(queue
->name
), "%s-q%u",
1613 queue
->info
->netdev
->name
, queue
->id
);
1615 /* Initialise tx_skbs as a free chain containing every entry. */
1616 queue
->tx_skb_freelist
= 0;
1617 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1618 skb_entry_set_link(&queue
->tx_skbs
[i
], i
+1);
1619 queue
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1620 queue
->grant_tx_page
[i
] = NULL
;
1623 /* Clear out rx_skbs */
1624 for (i
= 0; i
< NET_RX_RING_SIZE
; i
++) {
1625 queue
->rx_skbs
[i
] = NULL
;
1626 queue
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
1629 /* A grant for every tx ring slot */
1630 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE
,
1631 &queue
->gref_tx_head
) < 0) {
1632 pr_alert("can't alloc tx grant refs\n");
1637 /* A grant for every rx ring slot */
1638 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE
,
1639 &queue
->gref_rx_head
) < 0) {
1640 pr_alert("can't alloc rx grant refs\n");
1648 gnttab_free_grant_references(queue
->gref_tx_head
);
1653 static int write_queue_xenstore_keys(struct netfront_queue
*queue
,
1654 struct xenbus_transaction
*xbt
, int write_hierarchical
)
1656 /* Write the queue-specific keys into XenStore in the traditional
1657 * way for a single queue, or in a queue subkeys for multiple
1660 struct xenbus_device
*dev
= queue
->info
->xbdev
;
1662 const char *message
;
1666 /* Choose the correct place to write the keys */
1667 if (write_hierarchical
) {
1668 pathsize
= strlen(dev
->nodename
) + 10;
1669 path
= kzalloc(pathsize
, GFP_KERNEL
);
1672 message
= "out of memory while writing ring references";
1675 snprintf(path
, pathsize
, "%s/queue-%u",
1676 dev
->nodename
, queue
->id
);
1678 path
= (char *)dev
->nodename
;
1681 /* Write ring references */
1682 err
= xenbus_printf(*xbt
, path
, "tx-ring-ref", "%u",
1683 queue
->tx_ring_ref
);
1685 message
= "writing tx-ring-ref";
1689 err
= xenbus_printf(*xbt
, path
, "rx-ring-ref", "%u",
1690 queue
->rx_ring_ref
);
1692 message
= "writing rx-ring-ref";
1696 /* Write event channels; taking into account both shared
1697 * and split event channel scenarios.
1699 if (queue
->tx_evtchn
== queue
->rx_evtchn
) {
1700 /* Shared event channel */
1701 err
= xenbus_printf(*xbt
, path
,
1702 "event-channel", "%u", queue
->tx_evtchn
);
1704 message
= "writing event-channel";
1708 /* Split event channels */
1709 err
= xenbus_printf(*xbt
, path
,
1710 "event-channel-tx", "%u", queue
->tx_evtchn
);
1712 message
= "writing event-channel-tx";
1716 err
= xenbus_printf(*xbt
, path
,
1717 "event-channel-rx", "%u", queue
->rx_evtchn
);
1719 message
= "writing event-channel-rx";
1724 if (write_hierarchical
)
1729 if (write_hierarchical
)
1731 xenbus_dev_fatal(dev
, err
, "%s", message
);
1735 static void xennet_destroy_queues(struct netfront_info
*info
)
1739 for (i
= 0; i
< info
->netdev
->real_num_tx_queues
; i
++) {
1740 struct netfront_queue
*queue
= &info
->queues
[i
];
1742 if (netif_running(info
->netdev
))
1743 napi_disable(&queue
->napi
);
1744 netif_napi_del(&queue
->napi
);
1747 kfree(info
->queues
);
1748 info
->queues
= NULL
;
1751 static int xennet_create_queues(struct netfront_info
*info
,
1752 unsigned int *num_queues
)
1757 info
->queues
= kcalloc(*num_queues
, sizeof(struct netfront_queue
),
1762 for (i
= 0; i
< *num_queues
; i
++) {
1763 struct netfront_queue
*queue
= &info
->queues
[i
];
1768 ret
= xennet_init_queue(queue
);
1770 dev_warn(&info
->xbdev
->dev
,
1771 "only created %d queues\n", i
);
1776 netif_napi_add(queue
->info
->netdev
, &queue
->napi
,
1778 if (netif_running(info
->netdev
))
1779 napi_enable(&queue
->napi
);
1782 netif_set_real_num_tx_queues(info
->netdev
, *num_queues
);
1784 if (*num_queues
== 0) {
1785 dev_err(&info
->xbdev
->dev
, "no queues\n");
1791 /* Common code used when first setting up, and when resuming. */
1792 static int talk_to_netback(struct xenbus_device
*dev
,
1793 struct netfront_info
*info
)
1795 const char *message
;
1796 struct xenbus_transaction xbt
;
1798 unsigned int feature_split_evtchn
;
1800 unsigned int max_queues
= 0;
1801 struct netfront_queue
*queue
= NULL
;
1802 unsigned int num_queues
= 1;
1804 info
->netdev
->irq
= 0;
1806 /* Check if backend supports multiple queues */
1807 max_queues
= xenbus_read_unsigned(info
->xbdev
->otherend
,
1808 "multi-queue-max-queues", 1);
1809 num_queues
= min(max_queues
, xennet_max_queues
);
1811 /* Check feature-split-event-channels */
1812 feature_split_evtchn
= xenbus_read_unsigned(info
->xbdev
->otherend
,
1813 "feature-split-event-channels", 0);
1815 /* Read mac addr. */
1816 err
= xen_net_read_mac(dev
, info
->netdev
->dev_addr
);
1818 xenbus_dev_fatal(dev
, err
, "parsing %s/mac", dev
->nodename
);
1824 xennet_destroy_queues(info
);
1826 err
= xennet_create_queues(info
, &num_queues
);
1828 xenbus_dev_fatal(dev
, err
, "creating queues");
1829 kfree(info
->queues
);
1830 info
->queues
= NULL
;
1835 /* Create shared ring, alloc event channel -- for each queue */
1836 for (i
= 0; i
< num_queues
; ++i
) {
1837 queue
= &info
->queues
[i
];
1838 err
= setup_netfront(dev
, queue
, feature_split_evtchn
);
1844 err
= xenbus_transaction_start(&xbt
);
1846 xenbus_dev_fatal(dev
, err
, "starting transaction");
1850 if (xenbus_exists(XBT_NIL
,
1851 info
->xbdev
->otherend
, "multi-queue-max-queues")) {
1852 /* Write the number of queues */
1853 err
= xenbus_printf(xbt
, dev
->nodename
,
1854 "multi-queue-num-queues", "%u", num_queues
);
1856 message
= "writing multi-queue-num-queues";
1857 goto abort_transaction_no_dev_fatal
;
1861 if (num_queues
== 1) {
1862 err
= write_queue_xenstore_keys(&info
->queues
[0], &xbt
, 0); /* flat */
1864 goto abort_transaction_no_dev_fatal
;
1866 /* Write the keys for each queue */
1867 for (i
= 0; i
< num_queues
; ++i
) {
1868 queue
= &info
->queues
[i
];
1869 err
= write_queue_xenstore_keys(queue
, &xbt
, 1); /* hierarchical */
1871 goto abort_transaction_no_dev_fatal
;
1875 /* The remaining keys are not queue-specific */
1876 err
= xenbus_printf(xbt
, dev
->nodename
, "request-rx-copy", "%u",
1879 message
= "writing request-rx-copy";
1880 goto abort_transaction
;
1883 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-rx-notify", "%d", 1);
1885 message
= "writing feature-rx-notify";
1886 goto abort_transaction
;
1889 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-sg", "%d", 1);
1891 message
= "writing feature-sg";
1892 goto abort_transaction
;
1895 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv4", "%d", 1);
1897 message
= "writing feature-gso-tcpv4";
1898 goto abort_transaction
;
1901 err
= xenbus_write(xbt
, dev
->nodename
, "feature-gso-tcpv6", "1");
1903 message
= "writing feature-gso-tcpv6";
1904 goto abort_transaction
;
1907 err
= xenbus_write(xbt
, dev
->nodename
, "feature-ipv6-csum-offload",
1910 message
= "writing feature-ipv6-csum-offload";
1911 goto abort_transaction
;
1914 err
= xenbus_transaction_end(xbt
, 0);
1918 xenbus_dev_fatal(dev
, err
, "completing transaction");
1925 xenbus_dev_fatal(dev
, err
, "%s", message
);
1926 abort_transaction_no_dev_fatal
:
1927 xenbus_transaction_end(xbt
, 1);
1929 xennet_disconnect_backend(info
);
1931 xennet_destroy_queues(info
);
1935 device_unregister(&dev
->dev
);
1939 static int xennet_connect(struct net_device
*dev
)
1941 struct netfront_info
*np
= netdev_priv(dev
);
1942 unsigned int num_queues
= 0;
1945 struct netfront_queue
*queue
= NULL
;
1947 if (!xenbus_read_unsigned(np
->xbdev
->otherend
, "feature-rx-copy", 0)) {
1949 "backend does not support copying receive path\n");
1953 err
= talk_to_netback(np
->xbdev
, np
);
1957 /* talk_to_netback() sets the correct number of queues */
1958 num_queues
= dev
->real_num_tx_queues
;
1960 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
1961 err
= register_netdev(dev
);
1963 pr_warn("%s: register_netdev err=%d\n", __func__
, err
);
1964 device_unregister(&np
->xbdev
->dev
);
1970 netdev_update_features(dev
);
1974 * All public and private state should now be sane. Get
1975 * ready to start sending and receiving packets and give the driver
1976 * domain a kick because we've probably just requeued some
1979 netif_carrier_on(np
->netdev
);
1980 for (j
= 0; j
< num_queues
; ++j
) {
1981 queue
= &np
->queues
[j
];
1983 notify_remote_via_irq(queue
->tx_irq
);
1984 if (queue
->tx_irq
!= queue
->rx_irq
)
1985 notify_remote_via_irq(queue
->rx_irq
);
1987 spin_lock_irq(&queue
->tx_lock
);
1988 xennet_tx_buf_gc(queue
);
1989 spin_unlock_irq(&queue
->tx_lock
);
1991 spin_lock_bh(&queue
->rx_lock
);
1992 xennet_alloc_rx_buffers(queue
);
1993 spin_unlock_bh(&queue
->rx_lock
);
2000 * Callback received when the backend's state changes.
2002 static void netback_changed(struct xenbus_device
*dev
,
2003 enum xenbus_state backend_state
)
2005 struct netfront_info
*np
= dev_get_drvdata(&dev
->dev
);
2006 struct net_device
*netdev
= np
->netdev
;
2008 dev_dbg(&dev
->dev
, "%s\n", xenbus_strstate(backend_state
));
2010 switch (backend_state
) {
2011 case XenbusStateInitialising
:
2012 case XenbusStateInitialised
:
2013 case XenbusStateReconfiguring
:
2014 case XenbusStateReconfigured
:
2017 case XenbusStateUnknown
:
2018 wake_up_all(&module_unload_q
);
2021 case XenbusStateInitWait
:
2022 if (dev
->state
!= XenbusStateInitialising
)
2024 if (xennet_connect(netdev
) != 0)
2026 xenbus_switch_state(dev
, XenbusStateConnected
);
2029 case XenbusStateConnected
:
2030 netdev_notify_peers(netdev
);
2033 case XenbusStateClosed
:
2034 wake_up_all(&module_unload_q
);
2035 if (dev
->state
== XenbusStateClosed
)
2037 /* Missed the backend's CLOSING state -- fallthrough */
2038 case XenbusStateClosing
:
2039 wake_up_all(&module_unload_q
);
2040 xenbus_frontend_closed(dev
);
2045 static const struct xennet_stat
{
2046 char name
[ETH_GSTRING_LEN
];
2048 } xennet_stats
[] = {
2050 "rx_gso_checksum_fixup",
2051 offsetof(struct netfront_info
, rx_gso_checksum_fixup
)
2055 static int xennet_get_sset_count(struct net_device
*dev
, int string_set
)
2057 switch (string_set
) {
2059 return ARRAY_SIZE(xennet_stats
);
2065 static void xennet_get_ethtool_stats(struct net_device
*dev
,
2066 struct ethtool_stats
*stats
, u64
* data
)
2068 void *np
= netdev_priv(dev
);
2071 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
2072 data
[i
] = atomic_read((atomic_t
*)(np
+ xennet_stats
[i
].offset
));
2075 static void xennet_get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
2079 switch (stringset
) {
2081 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
2082 memcpy(data
+ i
* ETH_GSTRING_LEN
,
2083 xennet_stats
[i
].name
, ETH_GSTRING_LEN
);
2088 static const struct ethtool_ops xennet_ethtool_ops
=
2090 .get_link
= ethtool_op_get_link
,
2092 .get_sset_count
= xennet_get_sset_count
,
2093 .get_ethtool_stats
= xennet_get_ethtool_stats
,
2094 .get_strings
= xennet_get_strings
,
2098 static ssize_t
show_rxbuf(struct device
*dev
,
2099 struct device_attribute
*attr
, char *buf
)
2101 return sprintf(buf
, "%lu\n", NET_RX_RING_SIZE
);
2104 static ssize_t
store_rxbuf(struct device
*dev
,
2105 struct device_attribute
*attr
,
2106 const char *buf
, size_t len
)
2109 unsigned long target
;
2111 if (!capable(CAP_NET_ADMIN
))
2114 target
= simple_strtoul(buf
, &endp
, 0);
2118 /* rxbuf_min and rxbuf_max are no longer configurable. */
2123 static DEVICE_ATTR(rxbuf_min
, S_IRUGO
|S_IWUSR
, show_rxbuf
, store_rxbuf
);
2124 static DEVICE_ATTR(rxbuf_max
, S_IRUGO
|S_IWUSR
, show_rxbuf
, store_rxbuf
);
2125 static DEVICE_ATTR(rxbuf_cur
, S_IRUGO
, show_rxbuf
, NULL
);
2127 static struct attribute
*xennet_dev_attrs
[] = {
2128 &dev_attr_rxbuf_min
.attr
,
2129 &dev_attr_rxbuf_max
.attr
,
2130 &dev_attr_rxbuf_cur
.attr
,
2134 static const struct attribute_group xennet_dev_group
= {
2135 .attrs
= xennet_dev_attrs
2137 #endif /* CONFIG_SYSFS */
2139 static int xennet_remove(struct xenbus_device
*dev
)
2141 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
2143 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
2145 if (xenbus_read_driver_state(dev
->otherend
) != XenbusStateClosed
) {
2146 xenbus_switch_state(dev
, XenbusStateClosing
);
2147 wait_event(module_unload_q
,
2148 xenbus_read_driver_state(dev
->otherend
) ==
2149 XenbusStateClosing
||
2150 xenbus_read_driver_state(dev
->otherend
) ==
2151 XenbusStateUnknown
);
2153 xenbus_switch_state(dev
, XenbusStateClosed
);
2154 wait_event(module_unload_q
,
2155 xenbus_read_driver_state(dev
->otherend
) ==
2156 XenbusStateClosed
||
2157 xenbus_read_driver_state(dev
->otherend
) ==
2158 XenbusStateUnknown
);
2161 xennet_disconnect_backend(info
);
2163 if (info
->netdev
->reg_state
== NETREG_REGISTERED
)
2164 unregister_netdev(info
->netdev
);
2168 xennet_destroy_queues(info
);
2171 xennet_free_netdev(info
->netdev
);
2176 static const struct xenbus_device_id netfront_ids
[] = {
2181 static struct xenbus_driver netfront_driver
= {
2182 .ids
= netfront_ids
,
2183 .probe
= netfront_probe
,
2184 .remove
= xennet_remove
,
2185 .resume
= netfront_resume
,
2186 .otherend_changed
= netback_changed
,
2189 static int __init
netif_init(void)
2194 if (!xen_has_pv_nic_devices())
2197 pr_info("Initialising Xen virtual ethernet driver\n");
2199 /* Allow as many queues as there are CPUs inut max. 8 if user has not
2200 * specified a value.
2202 if (xennet_max_queues
== 0)
2203 xennet_max_queues
= min_t(unsigned int, MAX_QUEUES_DEFAULT
,
2206 return xenbus_register_frontend(&netfront_driver
);
2208 module_init(netif_init
);
2211 static void __exit
netif_exit(void)
2213 xenbus_unregister_driver(&netfront_driver
);
2215 module_exit(netif_exit
);
2217 MODULE_DESCRIPTION("Xen virtual network device frontend");
2218 MODULE_LICENSE("GPL");
2219 MODULE_ALIAS("xen:vif");
2220 MODULE_ALIAS("xennet");