2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
45 #include <linux/slab.h>
47 #include <linux/bpf.h>
48 #include <net/page_pool.h>
49 #include <linux/bpf_trace.h>
52 #include <xen/xenbus.h>
53 #include <xen/events.h>
55 #include <xen/platform_pci.h>
56 #include <xen/grant_table.h>
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/interface/grant_table.h>
62 /* Module parameters */
63 #define MAX_QUEUES_DEFAULT 8
64 static unsigned int xennet_max_queues
;
65 module_param_named(max_queues
, xennet_max_queues
, uint
, 0644);
66 MODULE_PARM_DESC(max_queues
,
67 "Maximum number of queues per virtual interface");
69 static bool __read_mostly xennet_trusted
= true;
70 module_param_named(trusted
, xennet_trusted
, bool, 0644);
71 MODULE_PARM_DESC(trusted
, "Is the backend trusted");
73 #define XENNET_TIMEOUT (5 * HZ)
75 static const struct ethtool_ops xennet_ethtool_ops
;
81 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
83 #define RX_COPY_THRESHOLD 256
85 #define GRANT_INVALID_REF 0
87 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
88 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
90 /* Minimum number of Rx slots (includes slot for GSO metadata). */
91 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
93 /* Queue name is interface name with "-qNNN" appended */
94 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
96 /* IRQ name is queue name with "-tx" or "-rx" appended */
97 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
99 static DECLARE_WAIT_QUEUE_HEAD(module_wq
);
101 struct netfront_stats
{
104 struct u64_stats_sync syncp
;
107 struct netfront_info
;
109 struct netfront_queue
{
110 unsigned int id
; /* Queue ID, 0-based */
111 char name
[QUEUE_NAME_SIZE
]; /* DEVNAME-qN */
112 struct netfront_info
*info
;
114 struct bpf_prog __rcu
*xdp_prog
;
116 struct napi_struct napi
;
118 /* Split event channels support, tx_* == rx_* when using
119 * single event channel.
121 unsigned int tx_evtchn
, rx_evtchn
;
122 unsigned int tx_irq
, rx_irq
;
123 /* Only used when split event channels support is enabled */
124 char tx_irq_name
[IRQ_NAME_SIZE
]; /* DEVNAME-qN-tx */
125 char rx_irq_name
[IRQ_NAME_SIZE
]; /* DEVNAME-qN-rx */
128 struct xen_netif_tx_front_ring tx
;
132 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
133 * are linked from tx_skb_freelist through tx_link.
135 struct sk_buff
*tx_skbs
[NET_TX_RING_SIZE
];
136 unsigned short tx_link
[NET_TX_RING_SIZE
];
137 #define TX_LINK_NONE 0xffff
138 #define TX_PENDING 0xfffe
139 grant_ref_t gref_tx_head
;
140 grant_ref_t grant_tx_ref
[NET_TX_RING_SIZE
];
141 struct page
*grant_tx_page
[NET_TX_RING_SIZE
];
142 unsigned tx_skb_freelist
;
143 unsigned int tx_pend_queue
;
145 spinlock_t rx_lock ____cacheline_aligned_in_smp
;
146 struct xen_netif_rx_front_ring rx
;
149 struct timer_list rx_refill_timer
;
151 struct sk_buff
*rx_skbs
[NET_RX_RING_SIZE
];
152 grant_ref_t gref_rx_head
;
153 grant_ref_t grant_rx_ref
[NET_RX_RING_SIZE
];
155 unsigned int rx_rsp_unconsumed
;
156 spinlock_t rx_cons_lock
;
158 struct page_pool
*page_pool
;
159 struct xdp_rxq_info xdp_rxq
;
162 struct netfront_info
{
163 struct list_head list
;
164 struct net_device
*netdev
;
166 struct xenbus_device
*xbdev
;
168 /* Multi-queue support */
169 struct netfront_queue
*queues
;
172 struct netfront_stats __percpu
*rx_stats
;
173 struct netfront_stats __percpu
*tx_stats
;
176 bool netback_has_xdp_headroom
;
177 bool netfront_xdp_enabled
;
179 /* Is device behaving sane? */
182 /* Should skbs be bounced into a zeroed buffer? */
185 atomic_t rx_gso_checksum_fixup
;
188 struct netfront_rx_info
{
189 struct xen_netif_rx_response rx
;
190 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
- 1];
194 * Access macros for acquiring freeing slots in tx_skbs[].
197 static void add_id_to_list(unsigned *head
, unsigned short *list
,
204 static unsigned short get_id_from_list(unsigned *head
, unsigned short *list
)
206 unsigned int id
= *head
;
208 if (id
!= TX_LINK_NONE
) {
210 list
[id
] = TX_LINK_NONE
;
215 static int xennet_rxidx(RING_IDX idx
)
217 return idx
& (NET_RX_RING_SIZE
- 1);
220 static struct sk_buff
*xennet_get_rx_skb(struct netfront_queue
*queue
,
223 int i
= xennet_rxidx(ri
);
224 struct sk_buff
*skb
= queue
->rx_skbs
[i
];
225 queue
->rx_skbs
[i
] = NULL
;
229 static grant_ref_t
xennet_get_rx_ref(struct netfront_queue
*queue
,
232 int i
= xennet_rxidx(ri
);
233 grant_ref_t ref
= queue
->grant_rx_ref
[i
];
234 queue
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
239 static const struct attribute_group xennet_dev_group
;
242 static bool xennet_can_sg(struct net_device
*dev
)
244 return dev
->features
& NETIF_F_SG
;
248 static void rx_refill_timeout(struct timer_list
*t
)
250 struct netfront_queue
*queue
= from_timer(queue
, t
, rx_refill_timer
);
251 napi_schedule(&queue
->napi
);
254 static int netfront_tx_slot_available(struct netfront_queue
*queue
)
256 return (queue
->tx
.req_prod_pvt
- queue
->tx
.rsp_cons
) <
257 (NET_TX_RING_SIZE
- XEN_NETIF_NR_SLOTS_MIN
- 1);
260 static void xennet_maybe_wake_tx(struct netfront_queue
*queue
)
262 struct net_device
*dev
= queue
->info
->netdev
;
263 struct netdev_queue
*dev_queue
= netdev_get_tx_queue(dev
, queue
->id
);
265 if (unlikely(netif_tx_queue_stopped(dev_queue
)) &&
266 netfront_tx_slot_available(queue
) &&
267 likely(netif_running(dev
)))
268 netif_tx_wake_queue(netdev_get_tx_queue(dev
, queue
->id
));
272 static struct sk_buff
*xennet_alloc_one_rx_buffer(struct netfront_queue
*queue
)
277 skb
= __netdev_alloc_skb(queue
->info
->netdev
,
278 RX_COPY_THRESHOLD
+ NET_IP_ALIGN
,
279 GFP_ATOMIC
| __GFP_NOWARN
);
283 page
= page_pool_alloc_pages(queue
->page_pool
,
284 GFP_ATOMIC
| __GFP_NOWARN
| __GFP_ZERO
);
285 if (unlikely(!page
)) {
289 skb_add_rx_frag(skb
, 0, page
, 0, 0, PAGE_SIZE
);
291 /* Align ip header to a 16 bytes boundary */
292 skb_reserve(skb
, NET_IP_ALIGN
);
293 skb
->dev
= queue
->info
->netdev
;
299 static void xennet_alloc_rx_buffers(struct netfront_queue
*queue
)
301 RING_IDX req_prod
= queue
->rx
.req_prod_pvt
;
305 if (unlikely(!netif_carrier_ok(queue
->info
->netdev
)))
308 for (req_prod
= queue
->rx
.req_prod_pvt
;
309 req_prod
- queue
->rx
.rsp_cons
< NET_RX_RING_SIZE
;
315 struct xen_netif_rx_request
*req
;
317 skb
= xennet_alloc_one_rx_buffer(queue
);
323 id
= xennet_rxidx(req_prod
);
325 BUG_ON(queue
->rx_skbs
[id
]);
326 queue
->rx_skbs
[id
] = skb
;
328 ref
= gnttab_claim_grant_reference(&queue
->gref_rx_head
);
329 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref
));
330 queue
->grant_rx_ref
[id
] = ref
;
332 page
= skb_frag_page(&skb_shinfo(skb
)->frags
[0]);
334 req
= RING_GET_REQUEST(&queue
->rx
, req_prod
);
335 gnttab_page_grant_foreign_access_ref_one(ref
,
336 queue
->info
->xbdev
->otherend_id
,
343 queue
->rx
.req_prod_pvt
= req_prod
;
345 /* Try again later if there are not enough requests or skb allocation
347 * Enough requests is quantified as the sum of newly created slots and
348 * the unconsumed slots at the backend.
350 if (req_prod
- queue
->rx
.rsp_cons
< NET_RX_SLOTS_MIN
||
352 mod_timer(&queue
->rx_refill_timer
, jiffies
+ (HZ
/10));
356 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue
->rx
, notify
);
358 notify_remote_via_irq(queue
->rx_irq
);
361 static int xennet_open(struct net_device
*dev
)
363 struct netfront_info
*np
= netdev_priv(dev
);
364 unsigned int num_queues
= dev
->real_num_tx_queues
;
366 struct netfront_queue
*queue
= NULL
;
368 if (!np
->queues
|| np
->broken
)
371 for (i
= 0; i
< num_queues
; ++i
) {
372 queue
= &np
->queues
[i
];
373 napi_enable(&queue
->napi
);
375 spin_lock_bh(&queue
->rx_lock
);
376 if (netif_carrier_ok(dev
)) {
377 xennet_alloc_rx_buffers(queue
);
378 queue
->rx
.sring
->rsp_event
= queue
->rx
.rsp_cons
+ 1;
379 if (RING_HAS_UNCONSUMED_RESPONSES(&queue
->rx
))
380 napi_schedule(&queue
->napi
);
382 spin_unlock_bh(&queue
->rx_lock
);
385 netif_tx_start_all_queues(dev
);
390 static bool xennet_tx_buf_gc(struct netfront_queue
*queue
)
396 bool work_done
= false;
397 const struct device
*dev
= &queue
->info
->netdev
->dev
;
399 BUG_ON(!netif_carrier_ok(queue
->info
->netdev
));
402 prod
= queue
->tx
.sring
->rsp_prod
;
403 if (RING_RESPONSE_PROD_OVERFLOW(&queue
->tx
, prod
)) {
404 dev_alert(dev
, "Illegal number of responses %u\n",
405 prod
- queue
->tx
.rsp_cons
);
408 rmb(); /* Ensure we see responses up to 'rp'. */
410 for (cons
= queue
->tx
.rsp_cons
; cons
!= prod
; cons
++) {
411 struct xen_netif_tx_response txrsp
;
415 RING_COPY_RESPONSE(&queue
->tx
, cons
, &txrsp
);
416 if (txrsp
.status
== XEN_NETIF_RSP_NULL
)
420 if (id
>= RING_SIZE(&queue
->tx
)) {
422 "Response has incorrect id (%u)\n",
426 if (queue
->tx_link
[id
] != TX_PENDING
) {
428 "Response for inactive request\n");
432 queue
->tx_link
[id
] = TX_LINK_NONE
;
433 skb
= queue
->tx_skbs
[id
];
434 queue
->tx_skbs
[id
] = NULL
;
435 if (unlikely(!gnttab_end_foreign_access_ref(
436 queue
->grant_tx_ref
[id
], GNTMAP_readonly
))) {
438 "Grant still in use by backend domain\n");
441 gnttab_release_grant_reference(
442 &queue
->gref_tx_head
, queue
->grant_tx_ref
[id
]);
443 queue
->grant_tx_ref
[id
] = GRANT_INVALID_REF
;
444 queue
->grant_tx_page
[id
] = NULL
;
445 add_id_to_list(&queue
->tx_skb_freelist
, queue
->tx_link
, id
);
446 dev_kfree_skb_irq(skb
);
449 queue
->tx
.rsp_cons
= prod
;
451 RING_FINAL_CHECK_FOR_RESPONSES(&queue
->tx
, more_to_do
);
452 } while (more_to_do
);
454 xennet_maybe_wake_tx(queue
);
459 queue
->info
->broken
= true;
460 dev_alert(dev
, "Disabled for further use\n");
465 struct xennet_gnttab_make_txreq
{
466 struct netfront_queue
*queue
;
469 struct xen_netif_tx_request
*tx
; /* Last request on ring page */
470 struct xen_netif_tx_request tx_local
; /* Last request local copy*/
474 static void xennet_tx_setup_grant(unsigned long gfn
, unsigned int offset
,
475 unsigned int len
, void *data
)
477 struct xennet_gnttab_make_txreq
*info
= data
;
479 struct xen_netif_tx_request
*tx
;
481 /* convenient aliases */
482 struct page
*page
= info
->page
;
483 struct netfront_queue
*queue
= info
->queue
;
484 struct sk_buff
*skb
= info
->skb
;
486 id
= get_id_from_list(&queue
->tx_skb_freelist
, queue
->tx_link
);
487 tx
= RING_GET_REQUEST(&queue
->tx
, queue
->tx
.req_prod_pvt
++);
488 ref
= gnttab_claim_grant_reference(&queue
->gref_tx_head
);
489 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref
));
491 gnttab_grant_foreign_access_ref(ref
, queue
->info
->xbdev
->otherend_id
,
492 gfn
, GNTMAP_readonly
);
494 queue
->tx_skbs
[id
] = skb
;
495 queue
->grant_tx_page
[id
] = page
;
496 queue
->grant_tx_ref
[id
] = ref
;
498 info
->tx_local
.id
= id
;
499 info
->tx_local
.gref
= ref
;
500 info
->tx_local
.offset
= offset
;
501 info
->tx_local
.size
= len
;
502 info
->tx_local
.flags
= 0;
504 *tx
= info
->tx_local
;
507 * Put the request in the pending queue, it will be set to be pending
508 * when the producer index is about to be raised.
510 add_id_to_list(&queue
->tx_pend_queue
, queue
->tx_link
, id
);
513 info
->size
+= info
->tx_local
.size
;
516 static struct xen_netif_tx_request
*xennet_make_first_txreq(
517 struct xennet_gnttab_make_txreq
*info
,
518 unsigned int offset
, unsigned int len
)
522 gnttab_for_one_grant(info
->page
, offset
, len
, xennet_tx_setup_grant
, info
);
527 static void xennet_make_one_txreq(unsigned long gfn
, unsigned int offset
,
528 unsigned int len
, void *data
)
530 struct xennet_gnttab_make_txreq
*info
= data
;
532 info
->tx
->flags
|= XEN_NETTXF_more_data
;
534 xennet_tx_setup_grant(gfn
, offset
, len
, data
);
537 static void xennet_make_txreqs(
538 struct xennet_gnttab_make_txreq
*info
,
540 unsigned int offset
, unsigned int len
)
542 /* Skip unused frames from start of page */
543 page
+= offset
>> PAGE_SHIFT
;
544 offset
&= ~PAGE_MASK
;
550 gnttab_foreach_grant_in_range(page
, offset
, len
,
551 xennet_make_one_txreq
,
561 * Count how many ring slots are required to send this skb. Each frag
562 * might be a compound page.
564 static int xennet_count_skb_slots(struct sk_buff
*skb
)
566 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
569 slots
= gnttab_count_grant(offset_in_page(skb
->data
),
572 for (i
= 0; i
< frags
; i
++) {
573 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
574 unsigned long size
= skb_frag_size(frag
);
575 unsigned long offset
= skb_frag_off(frag
);
577 /* Skip unused frames from start of page */
578 offset
&= ~PAGE_MASK
;
580 slots
+= gnttab_count_grant(offset
, size
);
586 static u16
xennet_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
587 struct net_device
*sb_dev
)
589 unsigned int num_queues
= dev
->real_num_tx_queues
;
593 /* First, check if there is only one queue */
594 if (num_queues
== 1) {
597 hash
= skb_get_hash(skb
);
598 queue_idx
= hash
% num_queues
;
604 static void xennet_mark_tx_pending(struct netfront_queue
*queue
)
608 while ((i
= get_id_from_list(&queue
->tx_pend_queue
, queue
->tx_link
)) !=
610 queue
->tx_link
[i
] = TX_PENDING
;
613 static int xennet_xdp_xmit_one(struct net_device
*dev
,
614 struct netfront_queue
*queue
,
615 struct xdp_frame
*xdpf
)
617 struct netfront_info
*np
= netdev_priv(dev
);
618 struct netfront_stats
*tx_stats
= this_cpu_ptr(np
->tx_stats
);
619 struct xennet_gnttab_make_txreq info
= {
622 .page
= virt_to_page(xdpf
->data
),
626 xennet_make_first_txreq(&info
,
627 offset_in_page(xdpf
->data
),
630 xennet_mark_tx_pending(queue
);
632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue
->tx
, notify
);
634 notify_remote_via_irq(queue
->tx_irq
);
636 u64_stats_update_begin(&tx_stats
->syncp
);
637 tx_stats
->bytes
+= xdpf
->len
;
639 u64_stats_update_end(&tx_stats
->syncp
);
641 xennet_tx_buf_gc(queue
);
646 static int xennet_xdp_xmit(struct net_device
*dev
, int n
,
647 struct xdp_frame
**frames
, u32 flags
)
649 unsigned int num_queues
= dev
->real_num_tx_queues
;
650 struct netfront_info
*np
= netdev_priv(dev
);
651 struct netfront_queue
*queue
= NULL
;
652 unsigned long irq_flags
;
656 if (unlikely(np
->broken
))
658 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
661 queue
= &np
->queues
[smp_processor_id() % num_queues
];
663 spin_lock_irqsave(&queue
->tx_lock
, irq_flags
);
664 for (i
= 0; i
< n
; i
++) {
665 struct xdp_frame
*xdpf
= frames
[i
];
669 if (xennet_xdp_xmit_one(dev
, queue
, xdpf
))
673 spin_unlock_irqrestore(&queue
->tx_lock
, irq_flags
);
678 struct sk_buff
*bounce_skb(const struct sk_buff
*skb
)
680 unsigned int headerlen
= skb_headroom(skb
);
681 /* Align size to allocate full pages and avoid contiguous data leaks */
682 unsigned int size
= ALIGN(skb_end_offset(skb
) + skb
->data_len
,
684 struct sk_buff
*n
= alloc_skb(size
, GFP_ATOMIC
| __GFP_ZERO
);
689 if (!IS_ALIGNED((uintptr_t)n
->head
, XEN_PAGE_SIZE
)) {
690 WARN_ONCE(1, "misaligned skb allocated\n");
695 /* Set the data pointer */
696 skb_reserve(n
, headerlen
);
697 /* Set the tail pointer and length */
698 skb_put(n
, skb
->len
);
700 BUG_ON(skb_copy_bits(skb
, -headerlen
, n
->head
, headerlen
+ skb
->len
));
702 skb_copy_header(n
, skb
);
706 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
708 static netdev_tx_t
xennet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
710 struct netfront_info
*np
= netdev_priv(dev
);
711 struct netfront_stats
*tx_stats
= this_cpu_ptr(np
->tx_stats
);
712 struct xen_netif_tx_request
*first_tx
;
720 struct netfront_queue
*queue
= NULL
;
721 struct xennet_gnttab_make_txreq info
= { };
722 unsigned int num_queues
= dev
->real_num_tx_queues
;
724 struct sk_buff
*nskb
;
726 /* Drop the packet if no queues are set up */
729 if (unlikely(np
->broken
))
731 /* Determine which queue to transmit this SKB on */
732 queue_index
= skb_get_queue_mapping(skb
);
733 queue
= &np
->queues
[queue_index
];
735 /* If skb->len is too big for wire format, drop skb and alert
736 * user about misconfiguration.
738 if (unlikely(skb
->len
> XEN_NETIF_MAX_TX_SIZE
)) {
739 net_alert_ratelimited(
740 "xennet: skb->len = %u, too big for wire format\n",
745 slots
= xennet_count_skb_slots(skb
);
746 if (unlikely(slots
> MAX_XEN_SKB_FRAGS
+ 1)) {
747 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
749 if (skb_linearize(skb
))
753 page
= virt_to_page(skb
->data
);
754 offset
= offset_in_page(skb
->data
);
756 /* The first req should be at least ETH_HLEN size or the packet will be
757 * dropped by netback.
759 * If the backend is not trusted bounce all data to zeroed pages to
760 * avoid exposing contiguous data on the granted page not belonging to
763 if (np
->bounce
|| unlikely(PAGE_SIZE
- offset
< ETH_HLEN
)) {
764 nskb
= bounce_skb(skb
);
767 dev_consume_skb_any(skb
);
769 page
= virt_to_page(skb
->data
);
770 offset
= offset_in_page(skb
->data
);
773 len
= skb_headlen(skb
);
775 spin_lock_irqsave(&queue
->tx_lock
, flags
);
777 if (unlikely(!netif_carrier_ok(dev
) ||
778 (slots
> 1 && !xennet_can_sg(dev
)) ||
779 netif_needs_gso(skb
, netif_skb_features(skb
)))) {
780 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
784 /* First request for the linear area. */
788 first_tx
= xennet_make_first_txreq(&info
, offset
, len
);
789 offset
+= info
.tx_local
.size
;
790 if (offset
== PAGE_SIZE
) {
794 len
-= info
.tx_local
.size
;
796 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
798 first_tx
->flags
|= XEN_NETTXF_csum_blank
|
799 XEN_NETTXF_data_validated
;
800 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
801 /* remote but checksummed. */
802 first_tx
->flags
|= XEN_NETTXF_data_validated
;
804 /* Optional extra info after the first request. */
805 if (skb_shinfo(skb
)->gso_size
) {
806 struct xen_netif_extra_info
*gso
;
808 gso
= (struct xen_netif_extra_info
*)
809 RING_GET_REQUEST(&queue
->tx
, queue
->tx
.req_prod_pvt
++);
811 first_tx
->flags
|= XEN_NETTXF_extra_info
;
813 gso
->u
.gso
.size
= skb_shinfo(skb
)->gso_size
;
814 gso
->u
.gso
.type
= (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) ?
815 XEN_NETIF_GSO_TYPE_TCPV6
:
816 XEN_NETIF_GSO_TYPE_TCPV4
;
818 gso
->u
.gso
.features
= 0;
820 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
824 /* Requests for the rest of the linear area. */
825 xennet_make_txreqs(&info
, page
, offset
, len
);
827 /* Requests for all the frags. */
828 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
829 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
830 xennet_make_txreqs(&info
, skb_frag_page(frag
),
832 skb_frag_size(frag
));
835 /* First request has the packet length. */
836 first_tx
->size
= skb
->len
;
838 /* timestamp packet in software */
839 skb_tx_timestamp(skb
);
841 xennet_mark_tx_pending(queue
);
843 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue
->tx
, notify
);
845 notify_remote_via_irq(queue
->tx_irq
);
847 u64_stats_update_begin(&tx_stats
->syncp
);
848 tx_stats
->bytes
+= skb
->len
;
850 u64_stats_update_end(&tx_stats
->syncp
);
852 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
853 xennet_tx_buf_gc(queue
);
855 if (!netfront_tx_slot_available(queue
))
856 netif_tx_stop_queue(netdev_get_tx_queue(dev
, queue
->id
));
858 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
863 dev
->stats
.tx_dropped
++;
864 dev_kfree_skb_any(skb
);
868 static int xennet_close(struct net_device
*dev
)
870 struct netfront_info
*np
= netdev_priv(dev
);
871 unsigned int num_queues
= dev
->real_num_tx_queues
;
873 struct netfront_queue
*queue
;
874 netif_tx_stop_all_queues(np
->netdev
);
875 for (i
= 0; i
< num_queues
; ++i
) {
876 queue
= &np
->queues
[i
];
877 napi_disable(&queue
->napi
);
882 static void xennet_destroy_queues(struct netfront_info
*info
)
886 for (i
= 0; i
< info
->netdev
->real_num_tx_queues
; i
++) {
887 struct netfront_queue
*queue
= &info
->queues
[i
];
889 if (netif_running(info
->netdev
))
890 napi_disable(&queue
->napi
);
891 netif_napi_del(&queue
->napi
);
898 static void xennet_uninit(struct net_device
*dev
)
900 struct netfront_info
*np
= netdev_priv(dev
);
901 xennet_destroy_queues(np
);
904 static void xennet_set_rx_rsp_cons(struct netfront_queue
*queue
, RING_IDX val
)
908 spin_lock_irqsave(&queue
->rx_cons_lock
, flags
);
909 queue
->rx
.rsp_cons
= val
;
910 queue
->rx_rsp_unconsumed
= RING_HAS_UNCONSUMED_RESPONSES(&queue
->rx
);
911 spin_unlock_irqrestore(&queue
->rx_cons_lock
, flags
);
914 static void xennet_move_rx_slot(struct netfront_queue
*queue
, struct sk_buff
*skb
,
917 int new = xennet_rxidx(queue
->rx
.req_prod_pvt
);
919 BUG_ON(queue
->rx_skbs
[new]);
920 queue
->rx_skbs
[new] = skb
;
921 queue
->grant_rx_ref
[new] = ref
;
922 RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_prod_pvt
)->id
= new;
923 RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_prod_pvt
)->gref
= ref
;
924 queue
->rx
.req_prod_pvt
++;
927 static int xennet_get_extras(struct netfront_queue
*queue
,
928 struct xen_netif_extra_info
*extras
,
932 struct xen_netif_extra_info extra
;
933 struct device
*dev
= &queue
->info
->netdev
->dev
;
934 RING_IDX cons
= queue
->rx
.rsp_cons
;
941 if (unlikely(cons
+ 1 == rp
)) {
943 dev_warn(dev
, "Missing extra info\n");
948 RING_COPY_RESPONSE(&queue
->rx
, ++cons
, &extra
);
950 if (unlikely(!extra
.type
||
951 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
953 dev_warn(dev
, "Invalid extra type: %d\n",
957 extras
[extra
.type
- 1] = extra
;
960 skb
= xennet_get_rx_skb(queue
, cons
);
961 ref
= xennet_get_rx_ref(queue
, cons
);
962 xennet_move_rx_slot(queue
, skb
, ref
);
963 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
965 xennet_set_rx_rsp_cons(queue
, cons
);
969 static u32
xennet_run_xdp(struct netfront_queue
*queue
, struct page
*pdata
,
970 struct xen_netif_rx_response
*rx
, struct bpf_prog
*prog
,
971 struct xdp_buff
*xdp
, bool *need_xdp_flush
)
973 struct xdp_frame
*xdpf
;
974 u32 len
= rx
->status
;
978 xdp_init_buff(xdp
, XEN_PAGE_SIZE
- XDP_PACKET_HEADROOM
,
980 xdp_prepare_buff(xdp
, page_address(pdata
), XDP_PACKET_HEADROOM
,
983 act
= bpf_prog_run_xdp(prog
, xdp
);
987 xdpf
= xdp_convert_buff_to_frame(xdp
);
988 err
= xennet_xdp_xmit(queue
->info
->netdev
, 1, &xdpf
, 0);
990 xdp_return_frame_rx_napi(xdpf
);
991 else if (unlikely(err
< 0))
992 trace_xdp_exception(queue
->info
->netdev
, prog
, act
);
996 err
= xdp_do_redirect(queue
->info
->netdev
, xdp
, prog
);
997 *need_xdp_flush
= true;
999 trace_xdp_exception(queue
->info
->netdev
, prog
, act
);
1006 trace_xdp_exception(queue
->info
->netdev
, prog
, act
);
1010 bpf_warn_invalid_xdp_action(act
);
1016 static int xennet_get_responses(struct netfront_queue
*queue
,
1017 struct netfront_rx_info
*rinfo
, RING_IDX rp
,
1018 struct sk_buff_head
*list
,
1019 bool *need_xdp_flush
)
1021 struct xen_netif_rx_response
*rx
= &rinfo
->rx
, rx_local
;
1022 int max
= XEN_NETIF_NR_SLOTS_MIN
+ (rx
->status
<= RX_COPY_THRESHOLD
);
1023 RING_IDX cons
= queue
->rx
.rsp_cons
;
1024 struct sk_buff
*skb
= xennet_get_rx_skb(queue
, cons
);
1025 struct xen_netif_extra_info
*extras
= rinfo
->extras
;
1026 grant_ref_t ref
= xennet_get_rx_ref(queue
, cons
);
1027 struct device
*dev
= &queue
->info
->netdev
->dev
;
1028 struct bpf_prog
*xdp_prog
;
1029 struct xdp_buff xdp
;
1034 if (rx
->flags
& XEN_NETRXF_extra_info
) {
1035 err
= xennet_get_extras(queue
, extras
, rp
);
1037 if (extras
[XEN_NETIF_EXTRA_TYPE_XDP
- 1].type
) {
1038 struct xen_netif_extra_info
*xdp
;
1040 xdp
= &extras
[XEN_NETIF_EXTRA_TYPE_XDP
- 1];
1041 rx
->offset
= xdp
->u
.xdp
.headroom
;
1044 cons
= queue
->rx
.rsp_cons
;
1048 if (unlikely(rx
->status
< 0 ||
1049 rx
->offset
+ rx
->status
> XEN_PAGE_SIZE
)) {
1050 if (net_ratelimit())
1051 dev_warn(dev
, "rx->offset: %u, size: %d\n",
1052 rx
->offset
, rx
->status
);
1053 xennet_move_rx_slot(queue
, skb
, ref
);
1059 * This definitely indicates a bug, either in this driver or in
1060 * the backend driver. In future this should flag the bad
1061 * situation to the system controller to reboot the backend.
1063 if (ref
== GRANT_INVALID_REF
) {
1064 if (net_ratelimit())
1065 dev_warn(dev
, "Bad rx response id %d.\n",
1071 if (!gnttab_end_foreign_access_ref(ref
, 0)) {
1073 "Grant still in use by backend domain\n");
1074 queue
->info
->broken
= true;
1075 dev_alert(dev
, "Disabled for further use\n");
1079 gnttab_release_grant_reference(&queue
->gref_rx_head
, ref
);
1082 xdp_prog
= rcu_dereference(queue
->xdp_prog
);
1084 if (!(rx
->flags
& XEN_NETRXF_more_data
)) {
1085 /* currently only a single page contains data */
1086 verdict
= xennet_run_xdp(queue
,
1087 skb_frag_page(&skb_shinfo(skb
)->frags
[0]),
1088 rx
, xdp_prog
, &xdp
, need_xdp_flush
);
1089 if (verdict
!= XDP_PASS
)
1092 /* drop the frame */
1098 __skb_queue_tail(list
, skb
);
1101 if (!(rx
->flags
& XEN_NETRXF_more_data
))
1104 if (cons
+ slots
== rp
) {
1105 if (net_ratelimit())
1106 dev_warn(dev
, "Need more slots\n");
1111 RING_COPY_RESPONSE(&queue
->rx
, cons
+ slots
, &rx_local
);
1113 skb
= xennet_get_rx_skb(queue
, cons
+ slots
);
1114 ref
= xennet_get_rx_ref(queue
, cons
+ slots
);
1118 if (unlikely(slots
> max
)) {
1119 if (net_ratelimit())
1120 dev_warn(dev
, "Too many slots\n");
1125 xennet_set_rx_rsp_cons(queue
, cons
+ slots
);
1130 static int xennet_set_skb_gso(struct sk_buff
*skb
,
1131 struct xen_netif_extra_info
*gso
)
1133 if (!gso
->u
.gso
.size
) {
1134 if (net_ratelimit())
1135 pr_warn("GSO size must not be zero\n");
1139 if (gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV4
&&
1140 gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV6
) {
1141 if (net_ratelimit())
1142 pr_warn("Bad GSO type %d\n", gso
->u
.gso
.type
);
1146 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1147 skb_shinfo(skb
)->gso_type
=
1148 (gso
->u
.gso
.type
== XEN_NETIF_GSO_TYPE_TCPV4
) ?
1152 /* Header must be checked, and gso_segs computed. */
1153 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1154 skb_shinfo(skb
)->gso_segs
= 0;
1159 static int xennet_fill_frags(struct netfront_queue
*queue
,
1160 struct sk_buff
*skb
,
1161 struct sk_buff_head
*list
)
1163 RING_IDX cons
= queue
->rx
.rsp_cons
;
1164 struct sk_buff
*nskb
;
1166 while ((nskb
= __skb_dequeue(list
))) {
1167 struct xen_netif_rx_response rx
;
1168 skb_frag_t
*nfrag
= &skb_shinfo(nskb
)->frags
[0];
1170 RING_COPY_RESPONSE(&queue
->rx
, ++cons
, &rx
);
1172 if (skb_shinfo(skb
)->nr_frags
== MAX_SKB_FRAGS
) {
1173 unsigned int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
1175 BUG_ON(pull_to
< skb_headlen(skb
));
1176 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
1178 if (unlikely(skb_shinfo(skb
)->nr_frags
>= MAX_SKB_FRAGS
)) {
1179 xennet_set_rx_rsp_cons(queue
,
1180 ++cons
+ skb_queue_len(list
));
1185 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
1186 skb_frag_page(nfrag
),
1187 rx
.offset
, rx
.status
, PAGE_SIZE
);
1189 skb_shinfo(nskb
)->nr_frags
= 0;
1193 xennet_set_rx_rsp_cons(queue
, cons
);
1198 static int checksum_setup(struct net_device
*dev
, struct sk_buff
*skb
)
1200 bool recalculate_partial_csum
= false;
1203 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1204 * peers can fail to set NETRXF_csum_blank when sending a GSO
1205 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1206 * recalculate the partial checksum.
1208 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1209 struct netfront_info
*np
= netdev_priv(dev
);
1210 atomic_inc(&np
->rx_gso_checksum_fixup
);
1211 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1212 recalculate_partial_csum
= true;
1215 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1216 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1219 return skb_checksum_setup(skb
, recalculate_partial_csum
);
1222 static int handle_incoming_queue(struct netfront_queue
*queue
,
1223 struct sk_buff_head
*rxq
)
1225 struct netfront_stats
*rx_stats
= this_cpu_ptr(queue
->info
->rx_stats
);
1226 int packets_dropped
= 0;
1227 struct sk_buff
*skb
;
1229 while ((skb
= __skb_dequeue(rxq
)) != NULL
) {
1230 int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
1232 if (pull_to
> skb_headlen(skb
))
1233 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
1235 /* Ethernet work: Delayed to here as it peeks the header. */
1236 skb
->protocol
= eth_type_trans(skb
, queue
->info
->netdev
);
1237 skb_reset_network_header(skb
);
1239 if (checksum_setup(queue
->info
->netdev
, skb
)) {
1242 queue
->info
->netdev
->stats
.rx_errors
++;
1246 u64_stats_update_begin(&rx_stats
->syncp
);
1247 rx_stats
->packets
++;
1248 rx_stats
->bytes
+= skb
->len
;
1249 u64_stats_update_end(&rx_stats
->syncp
);
1252 napi_gro_receive(&queue
->napi
, skb
);
1255 return packets_dropped
;
1258 static int xennet_poll(struct napi_struct
*napi
, int budget
)
1260 struct netfront_queue
*queue
= container_of(napi
, struct netfront_queue
, napi
);
1261 struct net_device
*dev
= queue
->info
->netdev
;
1262 struct sk_buff
*skb
;
1263 struct netfront_rx_info rinfo
;
1264 struct xen_netif_rx_response
*rx
= &rinfo
.rx
;
1265 struct xen_netif_extra_info
*extras
= rinfo
.extras
;
1268 struct sk_buff_head rxq
;
1269 struct sk_buff_head errq
;
1270 struct sk_buff_head tmpq
;
1272 bool need_xdp_flush
= false;
1274 spin_lock(&queue
->rx_lock
);
1276 skb_queue_head_init(&rxq
);
1277 skb_queue_head_init(&errq
);
1278 skb_queue_head_init(&tmpq
);
1280 rp
= queue
->rx
.sring
->rsp_prod
;
1281 if (RING_RESPONSE_PROD_OVERFLOW(&queue
->rx
, rp
)) {
1282 dev_alert(&dev
->dev
, "Illegal number of responses %u\n",
1283 rp
- queue
->rx
.rsp_cons
);
1284 queue
->info
->broken
= true;
1285 spin_unlock(&queue
->rx_lock
);
1288 rmb(); /* Ensure we see queued responses up to 'rp'. */
1290 i
= queue
->rx
.rsp_cons
;
1292 while ((i
!= rp
) && (work_done
< budget
)) {
1293 RING_COPY_RESPONSE(&queue
->rx
, i
, rx
);
1294 memset(extras
, 0, sizeof(rinfo
.extras
));
1296 err
= xennet_get_responses(queue
, &rinfo
, rp
, &tmpq
,
1299 if (unlikely(err
)) {
1300 if (queue
->info
->broken
) {
1301 spin_unlock(&queue
->rx_lock
);
1305 while ((skb
= __skb_dequeue(&tmpq
)))
1306 __skb_queue_tail(&errq
, skb
);
1307 dev
->stats
.rx_errors
++;
1308 i
= queue
->rx
.rsp_cons
;
1312 skb
= __skb_dequeue(&tmpq
);
1314 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1315 struct xen_netif_extra_info
*gso
;
1316 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1318 if (unlikely(xennet_set_skb_gso(skb
, gso
))) {
1319 __skb_queue_head(&tmpq
, skb
);
1320 xennet_set_rx_rsp_cons(queue
,
1321 queue
->rx
.rsp_cons
+
1322 skb_queue_len(&tmpq
));
1327 NETFRONT_SKB_CB(skb
)->pull_to
= rx
->status
;
1328 if (NETFRONT_SKB_CB(skb
)->pull_to
> RX_COPY_THRESHOLD
)
1329 NETFRONT_SKB_CB(skb
)->pull_to
= RX_COPY_THRESHOLD
;
1331 skb_frag_off_set(&skb_shinfo(skb
)->frags
[0], rx
->offset
);
1332 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], rx
->status
);
1333 skb
->data_len
= rx
->status
;
1334 skb
->len
+= rx
->status
;
1336 if (unlikely(xennet_fill_frags(queue
, skb
, &tmpq
)))
1339 if (rx
->flags
& XEN_NETRXF_csum_blank
)
1340 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1341 else if (rx
->flags
& XEN_NETRXF_data_validated
)
1342 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1344 __skb_queue_tail(&rxq
, skb
);
1346 i
= queue
->rx
.rsp_cons
+ 1;
1347 xennet_set_rx_rsp_cons(queue
, i
);
1353 __skb_queue_purge(&errq
);
1355 work_done
-= handle_incoming_queue(queue
, &rxq
);
1357 xennet_alloc_rx_buffers(queue
);
1359 if (work_done
< budget
) {
1362 napi_complete_done(napi
, work_done
);
1364 RING_FINAL_CHECK_FOR_RESPONSES(&queue
->rx
, more_to_do
);
1366 napi_schedule(napi
);
1369 spin_unlock(&queue
->rx_lock
);
1374 static int xennet_change_mtu(struct net_device
*dev
, int mtu
)
1376 int max
= xennet_can_sg(dev
) ? XEN_NETIF_MAX_TX_SIZE
: ETH_DATA_LEN
;
1384 static void xennet_get_stats64(struct net_device
*dev
,
1385 struct rtnl_link_stats64
*tot
)
1387 struct netfront_info
*np
= netdev_priv(dev
);
1390 for_each_possible_cpu(cpu
) {
1391 struct netfront_stats
*rx_stats
= per_cpu_ptr(np
->rx_stats
, cpu
);
1392 struct netfront_stats
*tx_stats
= per_cpu_ptr(np
->tx_stats
, cpu
);
1393 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1397 start
= u64_stats_fetch_begin_irq(&tx_stats
->syncp
);
1398 tx_packets
= tx_stats
->packets
;
1399 tx_bytes
= tx_stats
->bytes
;
1400 } while (u64_stats_fetch_retry_irq(&tx_stats
->syncp
, start
));
1403 start
= u64_stats_fetch_begin_irq(&rx_stats
->syncp
);
1404 rx_packets
= rx_stats
->packets
;
1405 rx_bytes
= rx_stats
->bytes
;
1406 } while (u64_stats_fetch_retry_irq(&rx_stats
->syncp
, start
));
1408 tot
->rx_packets
+= rx_packets
;
1409 tot
->tx_packets
+= tx_packets
;
1410 tot
->rx_bytes
+= rx_bytes
;
1411 tot
->tx_bytes
+= tx_bytes
;
1414 tot
->rx_errors
= dev
->stats
.rx_errors
;
1415 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
1418 static void xennet_release_tx_bufs(struct netfront_queue
*queue
)
1420 struct sk_buff
*skb
;
1423 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1424 /* Skip over entries which are actually freelist references */
1425 if (!queue
->tx_skbs
[i
])
1428 skb
= queue
->tx_skbs
[i
];
1429 queue
->tx_skbs
[i
] = NULL
;
1430 get_page(queue
->grant_tx_page
[i
]);
1431 gnttab_end_foreign_access(queue
->grant_tx_ref
[i
],
1433 (unsigned long)page_address(queue
->grant_tx_page
[i
]));
1434 queue
->grant_tx_page
[i
] = NULL
;
1435 queue
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1436 add_id_to_list(&queue
->tx_skb_freelist
, queue
->tx_link
, i
);
1437 dev_kfree_skb_irq(skb
);
1441 static void xennet_release_rx_bufs(struct netfront_queue
*queue
)
1445 spin_lock_bh(&queue
->rx_lock
);
1447 for (id
= 0; id
< NET_RX_RING_SIZE
; id
++) {
1448 struct sk_buff
*skb
;
1451 skb
= queue
->rx_skbs
[id
];
1455 ref
= queue
->grant_rx_ref
[id
];
1456 if (ref
== GRANT_INVALID_REF
)
1459 page
= skb_frag_page(&skb_shinfo(skb
)->frags
[0]);
1461 /* gnttab_end_foreign_access() needs a page ref until
1462 * foreign access is ended (which may be deferred).
1465 gnttab_end_foreign_access(ref
, 0,
1466 (unsigned long)page_address(page
));
1467 queue
->grant_rx_ref
[id
] = GRANT_INVALID_REF
;
1472 spin_unlock_bh(&queue
->rx_lock
);
1475 static netdev_features_t
xennet_fix_features(struct net_device
*dev
,
1476 netdev_features_t features
)
1478 struct netfront_info
*np
= netdev_priv(dev
);
1480 if (features
& NETIF_F_SG
&&
1481 !xenbus_read_unsigned(np
->xbdev
->otherend
, "feature-sg", 0))
1482 features
&= ~NETIF_F_SG
;
1484 if (features
& NETIF_F_IPV6_CSUM
&&
1485 !xenbus_read_unsigned(np
->xbdev
->otherend
,
1486 "feature-ipv6-csum-offload", 0))
1487 features
&= ~NETIF_F_IPV6_CSUM
;
1489 if (features
& NETIF_F_TSO
&&
1490 !xenbus_read_unsigned(np
->xbdev
->otherend
, "feature-gso-tcpv4", 0))
1491 features
&= ~NETIF_F_TSO
;
1493 if (features
& NETIF_F_TSO6
&&
1494 !xenbus_read_unsigned(np
->xbdev
->otherend
, "feature-gso-tcpv6", 0))
1495 features
&= ~NETIF_F_TSO6
;
1500 static int xennet_set_features(struct net_device
*dev
,
1501 netdev_features_t features
)
1503 if (!(features
& NETIF_F_SG
) && dev
->mtu
> ETH_DATA_LEN
) {
1504 netdev_info(dev
, "Reducing MTU because no SG offload");
1505 dev
->mtu
= ETH_DATA_LEN
;
1511 static bool xennet_handle_tx(struct netfront_queue
*queue
, unsigned int *eoi
)
1513 unsigned long flags
;
1515 if (unlikely(queue
->info
->broken
))
1518 spin_lock_irqsave(&queue
->tx_lock
, flags
);
1519 if (xennet_tx_buf_gc(queue
))
1521 spin_unlock_irqrestore(&queue
->tx_lock
, flags
);
1526 static irqreturn_t
xennet_tx_interrupt(int irq
, void *dev_id
)
1528 unsigned int eoiflag
= XEN_EOI_FLAG_SPURIOUS
;
1530 if (likely(xennet_handle_tx(dev_id
, &eoiflag
)))
1531 xen_irq_lateeoi(irq
, eoiflag
);
1536 static bool xennet_handle_rx(struct netfront_queue
*queue
, unsigned int *eoi
)
1538 unsigned int work_queued
;
1539 unsigned long flags
;
1541 if (unlikely(queue
->info
->broken
))
1544 spin_lock_irqsave(&queue
->rx_cons_lock
, flags
);
1545 work_queued
= RING_HAS_UNCONSUMED_RESPONSES(&queue
->rx
);
1546 if (work_queued
> queue
->rx_rsp_unconsumed
) {
1547 queue
->rx_rsp_unconsumed
= work_queued
;
1549 } else if (unlikely(work_queued
< queue
->rx_rsp_unconsumed
)) {
1550 const struct device
*dev
= &queue
->info
->netdev
->dev
;
1552 spin_unlock_irqrestore(&queue
->rx_cons_lock
, flags
);
1553 dev_alert(dev
, "RX producer index going backwards\n");
1554 dev_alert(dev
, "Disabled for further use\n");
1555 queue
->info
->broken
= true;
1558 spin_unlock_irqrestore(&queue
->rx_cons_lock
, flags
);
1560 if (likely(netif_carrier_ok(queue
->info
->netdev
) && work_queued
))
1561 napi_schedule(&queue
->napi
);
1566 static irqreturn_t
xennet_rx_interrupt(int irq
, void *dev_id
)
1568 unsigned int eoiflag
= XEN_EOI_FLAG_SPURIOUS
;
1570 if (likely(xennet_handle_rx(dev_id
, &eoiflag
)))
1571 xen_irq_lateeoi(irq
, eoiflag
);
1576 static irqreturn_t
xennet_interrupt(int irq
, void *dev_id
)
1578 unsigned int eoiflag
= XEN_EOI_FLAG_SPURIOUS
;
1580 if (xennet_handle_tx(dev_id
, &eoiflag
) &&
1581 xennet_handle_rx(dev_id
, &eoiflag
))
1582 xen_irq_lateeoi(irq
, eoiflag
);
1587 #ifdef CONFIG_NET_POLL_CONTROLLER
1588 static void xennet_poll_controller(struct net_device
*dev
)
1590 /* Poll each queue */
1591 struct netfront_info
*info
= netdev_priv(dev
);
1592 unsigned int num_queues
= dev
->real_num_tx_queues
;
1598 for (i
= 0; i
< num_queues
; ++i
)
1599 xennet_interrupt(0, &info
->queues
[i
]);
1603 #define NETBACK_XDP_HEADROOM_DISABLE 0
1604 #define NETBACK_XDP_HEADROOM_ENABLE 1
1606 static int talk_to_netback_xdp(struct netfront_info
*np
, int xdp
)
1609 unsigned short headroom
;
1611 headroom
= xdp
? XDP_PACKET_HEADROOM
: 0;
1612 err
= xenbus_printf(XBT_NIL
, np
->xbdev
->nodename
,
1613 "xdp-headroom", "%hu",
1616 pr_warn("Error writing xdp-headroom\n");
1621 static int xennet_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
,
1622 struct netlink_ext_ack
*extack
)
1624 unsigned long max_mtu
= XEN_PAGE_SIZE
- XDP_PACKET_HEADROOM
;
1625 struct netfront_info
*np
= netdev_priv(dev
);
1626 struct bpf_prog
*old_prog
;
1627 unsigned int i
, err
;
1629 if (dev
->mtu
> max_mtu
) {
1630 netdev_warn(dev
, "XDP requires MTU less than %lu\n", max_mtu
);
1634 if (!np
->netback_has_xdp_headroom
)
1637 xenbus_switch_state(np
->xbdev
, XenbusStateReconfiguring
);
1639 err
= talk_to_netback_xdp(np
, prog
? NETBACK_XDP_HEADROOM_ENABLE
:
1640 NETBACK_XDP_HEADROOM_DISABLE
);
1644 /* avoid the race with XDP headroom adjustment */
1645 wait_event(module_wq
,
1646 xenbus_read_driver_state(np
->xbdev
->otherend
) ==
1647 XenbusStateReconfigured
);
1648 np
->netfront_xdp_enabled
= true;
1650 old_prog
= rtnl_dereference(np
->queues
[0].xdp_prog
);
1653 bpf_prog_add(prog
, dev
->real_num_tx_queues
);
1655 for (i
= 0; i
< dev
->real_num_tx_queues
; ++i
)
1656 rcu_assign_pointer(np
->queues
[i
].xdp_prog
, prog
);
1659 for (i
= 0; i
< dev
->real_num_tx_queues
; ++i
)
1660 bpf_prog_put(old_prog
);
1662 xenbus_switch_state(np
->xbdev
, XenbusStateConnected
);
1667 static int xennet_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
1669 struct netfront_info
*np
= netdev_priv(dev
);
1674 switch (xdp
->command
) {
1675 case XDP_SETUP_PROG
:
1676 return xennet_xdp_set(dev
, xdp
->prog
, xdp
->extack
);
1682 static const struct net_device_ops xennet_netdev_ops
= {
1683 .ndo_uninit
= xennet_uninit
,
1684 .ndo_open
= xennet_open
,
1685 .ndo_stop
= xennet_close
,
1686 .ndo_start_xmit
= xennet_start_xmit
,
1687 .ndo_change_mtu
= xennet_change_mtu
,
1688 .ndo_get_stats64
= xennet_get_stats64
,
1689 .ndo_set_mac_address
= eth_mac_addr
,
1690 .ndo_validate_addr
= eth_validate_addr
,
1691 .ndo_fix_features
= xennet_fix_features
,
1692 .ndo_set_features
= xennet_set_features
,
1693 .ndo_select_queue
= xennet_select_queue
,
1694 .ndo_bpf
= xennet_xdp
,
1695 .ndo_xdp_xmit
= xennet_xdp_xmit
,
1696 #ifdef CONFIG_NET_POLL_CONTROLLER
1697 .ndo_poll_controller
= xennet_poll_controller
,
1701 static void xennet_free_netdev(struct net_device
*netdev
)
1703 struct netfront_info
*np
= netdev_priv(netdev
);
1705 free_percpu(np
->rx_stats
);
1706 free_percpu(np
->tx_stats
);
1707 free_netdev(netdev
);
1710 static struct net_device
*xennet_create_dev(struct xenbus_device
*dev
)
1713 struct net_device
*netdev
;
1714 struct netfront_info
*np
;
1716 netdev
= alloc_etherdev_mq(sizeof(struct netfront_info
), xennet_max_queues
);
1718 return ERR_PTR(-ENOMEM
);
1720 np
= netdev_priv(netdev
);
1726 np
->rx_stats
= netdev_alloc_pcpu_stats(struct netfront_stats
);
1727 if (np
->rx_stats
== NULL
)
1729 np
->tx_stats
= netdev_alloc_pcpu_stats(struct netfront_stats
);
1730 if (np
->tx_stats
== NULL
)
1733 netdev
->netdev_ops
= &xennet_netdev_ops
;
1735 netdev
->features
= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
|
1737 netdev
->hw_features
= NETIF_F_SG
|
1739 NETIF_F_TSO
| NETIF_F_TSO6
;
1742 * Assume that all hw features are available for now. This set
1743 * will be adjusted by the call to netdev_update_features() in
1744 * xennet_connect() which is the earliest point where we can
1745 * negotiate with the backend regarding supported features.
1747 netdev
->features
|= netdev
->hw_features
;
1749 netdev
->ethtool_ops
= &xennet_ethtool_ops
;
1750 netdev
->min_mtu
= ETH_MIN_MTU
;
1751 netdev
->max_mtu
= XEN_NETIF_MAX_TX_SIZE
;
1752 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1754 np
->netdev
= netdev
;
1755 np
->netfront_xdp_enabled
= false;
1757 netif_carrier_off(netdev
);
1760 xenbus_switch_state(dev
, XenbusStateInitialising
);
1761 err
= wait_event_timeout(module_wq
,
1762 xenbus_read_driver_state(dev
->otherend
) !=
1763 XenbusStateClosed
&&
1764 xenbus_read_driver_state(dev
->otherend
) !=
1765 XenbusStateUnknown
, XENNET_TIMEOUT
);
1771 xennet_free_netdev(netdev
);
1772 return ERR_PTR(err
);
1776 * Entry point to this code when a new device is created. Allocate the basic
1777 * structures and the ring buffers for communication with the backend, and
1778 * inform the backend of the appropriate details for those.
1780 static int netfront_probe(struct xenbus_device
*dev
,
1781 const struct xenbus_device_id
*id
)
1784 struct net_device
*netdev
;
1785 struct netfront_info
*info
;
1787 netdev
= xennet_create_dev(dev
);
1788 if (IS_ERR(netdev
)) {
1789 err
= PTR_ERR(netdev
);
1790 xenbus_dev_fatal(dev
, err
, "creating netdev");
1794 info
= netdev_priv(netdev
);
1795 dev_set_drvdata(&dev
->dev
, info
);
1797 info
->netdev
->sysfs_groups
[0] = &xennet_dev_group
;
1803 static void xennet_end_access(int ref
, void *page
)
1805 /* This frees the page as a side-effect */
1806 if (ref
!= GRANT_INVALID_REF
)
1807 gnttab_end_foreign_access(ref
, 0, (unsigned long)page
);
1810 static void xennet_disconnect_backend(struct netfront_info
*info
)
1813 unsigned int num_queues
= info
->netdev
->real_num_tx_queues
;
1815 netif_carrier_off(info
->netdev
);
1817 for (i
= 0; i
< num_queues
&& info
->queues
; ++i
) {
1818 struct netfront_queue
*queue
= &info
->queues
[i
];
1820 del_timer_sync(&queue
->rx_refill_timer
);
1822 if (queue
->tx_irq
&& (queue
->tx_irq
== queue
->rx_irq
))
1823 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1824 if (queue
->tx_irq
&& (queue
->tx_irq
!= queue
->rx_irq
)) {
1825 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1826 unbind_from_irqhandler(queue
->rx_irq
, queue
);
1828 queue
->tx_evtchn
= queue
->rx_evtchn
= 0;
1829 queue
->tx_irq
= queue
->rx_irq
= 0;
1831 if (netif_running(info
->netdev
))
1832 napi_synchronize(&queue
->napi
);
1834 xennet_release_tx_bufs(queue
);
1835 xennet_release_rx_bufs(queue
);
1836 gnttab_free_grant_references(queue
->gref_tx_head
);
1837 gnttab_free_grant_references(queue
->gref_rx_head
);
1839 /* End access and free the pages */
1840 xennet_end_access(queue
->tx_ring_ref
, queue
->tx
.sring
);
1841 xennet_end_access(queue
->rx_ring_ref
, queue
->rx
.sring
);
1843 queue
->tx_ring_ref
= GRANT_INVALID_REF
;
1844 queue
->rx_ring_ref
= GRANT_INVALID_REF
;
1845 queue
->tx
.sring
= NULL
;
1846 queue
->rx
.sring
= NULL
;
1848 page_pool_destroy(queue
->page_pool
);
1853 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1854 * driver restart. We tear down our netif structure and recreate it, but
1855 * leave the device-layer structures intact so that this is transparent to the
1856 * rest of the kernel.
1858 static int netfront_resume(struct xenbus_device
*dev
)
1860 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1862 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
1864 netif_tx_lock_bh(info
->netdev
);
1865 netif_device_detach(info
->netdev
);
1866 netif_tx_unlock_bh(info
->netdev
);
1868 xennet_disconnect_backend(info
);
1872 xennet_destroy_queues(info
);
1878 static int xen_net_read_mac(struct xenbus_device
*dev
, u8 mac
[])
1880 char *s
, *e
, *macstr
;
1883 macstr
= s
= xenbus_read(XBT_NIL
, dev
->nodename
, "mac", NULL
);
1885 return PTR_ERR(macstr
);
1887 for (i
= 0; i
< ETH_ALEN
; i
++) {
1888 mac
[i
] = simple_strtoul(s
, &e
, 16);
1889 if ((s
== e
) || (*e
!= ((i
== ETH_ALEN
-1) ? '\0' : ':'))) {
1900 static int setup_netfront_single(struct netfront_queue
*queue
)
1904 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->tx_evtchn
);
1908 err
= bind_evtchn_to_irqhandler_lateeoi(queue
->tx_evtchn
,
1909 xennet_interrupt
, 0,
1910 queue
->info
->netdev
->name
,
1914 queue
->rx_evtchn
= queue
->tx_evtchn
;
1915 queue
->rx_irq
= queue
->tx_irq
= err
;
1920 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->tx_evtchn
);
1921 queue
->tx_evtchn
= 0;
1926 static int setup_netfront_split(struct netfront_queue
*queue
)
1930 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->tx_evtchn
);
1933 err
= xenbus_alloc_evtchn(queue
->info
->xbdev
, &queue
->rx_evtchn
);
1935 goto alloc_rx_evtchn_fail
;
1937 snprintf(queue
->tx_irq_name
, sizeof(queue
->tx_irq_name
),
1938 "%s-tx", queue
->name
);
1939 err
= bind_evtchn_to_irqhandler_lateeoi(queue
->tx_evtchn
,
1940 xennet_tx_interrupt
, 0,
1941 queue
->tx_irq_name
, queue
);
1944 queue
->tx_irq
= err
;
1946 snprintf(queue
->rx_irq_name
, sizeof(queue
->rx_irq_name
),
1947 "%s-rx", queue
->name
);
1948 err
= bind_evtchn_to_irqhandler_lateeoi(queue
->rx_evtchn
,
1949 xennet_rx_interrupt
, 0,
1950 queue
->rx_irq_name
, queue
);
1953 queue
->rx_irq
= err
;
1958 unbind_from_irqhandler(queue
->tx_irq
, queue
);
1961 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->rx_evtchn
);
1962 queue
->rx_evtchn
= 0;
1963 alloc_rx_evtchn_fail
:
1964 xenbus_free_evtchn(queue
->info
->xbdev
, queue
->tx_evtchn
);
1965 queue
->tx_evtchn
= 0;
1970 static int setup_netfront(struct xenbus_device
*dev
,
1971 struct netfront_queue
*queue
, unsigned int feature_split_evtchn
)
1973 struct xen_netif_tx_sring
*txs
;
1974 struct xen_netif_rx_sring
*rxs
= NULL
;
1978 queue
->tx_ring_ref
= GRANT_INVALID_REF
;
1979 queue
->rx_ring_ref
= GRANT_INVALID_REF
;
1980 queue
->rx
.sring
= NULL
;
1981 queue
->tx
.sring
= NULL
;
1983 txs
= (struct xen_netif_tx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1986 xenbus_dev_fatal(dev
, err
, "allocating tx ring page");
1989 SHARED_RING_INIT(txs
);
1990 FRONT_RING_INIT(&queue
->tx
, txs
, XEN_PAGE_SIZE
);
1992 err
= xenbus_grant_ring(dev
, txs
, 1, &gref
);
1995 queue
->tx_ring_ref
= gref
;
1997 rxs
= (struct xen_netif_rx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
2000 xenbus_dev_fatal(dev
, err
, "allocating rx ring page");
2003 SHARED_RING_INIT(rxs
);
2004 FRONT_RING_INIT(&queue
->rx
, rxs
, XEN_PAGE_SIZE
);
2006 err
= xenbus_grant_ring(dev
, rxs
, 1, &gref
);
2009 queue
->rx_ring_ref
= gref
;
2011 if (feature_split_evtchn
)
2012 err
= setup_netfront_split(queue
);
2013 /* setup single event channel if
2014 * a) feature-split-event-channels == 0
2015 * b) feature-split-event-channels == 1 but failed to setup
2017 if (!feature_split_evtchn
|| err
)
2018 err
= setup_netfront_single(queue
);
2025 /* If we fail to setup netfront, it is safe to just revoke access to
2026 * granted pages because backend is not accessing it at this point.
2029 if (queue
->rx_ring_ref
!= GRANT_INVALID_REF
) {
2030 gnttab_end_foreign_access(queue
->rx_ring_ref
, 0,
2031 (unsigned long)rxs
);
2032 queue
->rx_ring_ref
= GRANT_INVALID_REF
;
2034 free_page((unsigned long)rxs
);
2036 if (queue
->tx_ring_ref
!= GRANT_INVALID_REF
) {
2037 gnttab_end_foreign_access(queue
->tx_ring_ref
, 0,
2038 (unsigned long)txs
);
2039 queue
->tx_ring_ref
= GRANT_INVALID_REF
;
2041 free_page((unsigned long)txs
);
2046 /* Queue-specific initialisation
2047 * This used to be done in xennet_create_dev() but must now
2050 static int xennet_init_queue(struct netfront_queue
*queue
)
2056 spin_lock_init(&queue
->tx_lock
);
2057 spin_lock_init(&queue
->rx_lock
);
2058 spin_lock_init(&queue
->rx_cons_lock
);
2060 timer_setup(&queue
->rx_refill_timer
, rx_refill_timeout
, 0);
2062 devid
= strrchr(queue
->info
->xbdev
->nodename
, '/') + 1;
2063 snprintf(queue
->name
, sizeof(queue
->name
), "vif%s-q%u",
2066 /* Initialise tx_skb_freelist as a free chain containing every entry. */
2067 queue
->tx_skb_freelist
= 0;
2068 queue
->tx_pend_queue
= TX_LINK_NONE
;
2069 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
2070 queue
->tx_link
[i
] = i
+ 1;
2071 queue
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
2072 queue
->grant_tx_page
[i
] = NULL
;
2074 queue
->tx_link
[NET_TX_RING_SIZE
- 1] = TX_LINK_NONE
;
2076 /* Clear out rx_skbs */
2077 for (i
= 0; i
< NET_RX_RING_SIZE
; i
++) {
2078 queue
->rx_skbs
[i
] = NULL
;
2079 queue
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
2082 /* A grant for every tx ring slot */
2083 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE
,
2084 &queue
->gref_tx_head
) < 0) {
2085 pr_alert("can't alloc tx grant refs\n");
2090 /* A grant for every rx ring slot */
2091 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE
,
2092 &queue
->gref_rx_head
) < 0) {
2093 pr_alert("can't alloc rx grant refs\n");
2101 gnttab_free_grant_references(queue
->gref_tx_head
);
2106 static int write_queue_xenstore_keys(struct netfront_queue
*queue
,
2107 struct xenbus_transaction
*xbt
, int write_hierarchical
)
2109 /* Write the queue-specific keys into XenStore in the traditional
2110 * way for a single queue, or in a queue subkeys for multiple
2113 struct xenbus_device
*dev
= queue
->info
->xbdev
;
2115 const char *message
;
2119 /* Choose the correct place to write the keys */
2120 if (write_hierarchical
) {
2121 pathsize
= strlen(dev
->nodename
) + 10;
2122 path
= kzalloc(pathsize
, GFP_KERNEL
);
2125 message
= "out of memory while writing ring references";
2128 snprintf(path
, pathsize
, "%s/queue-%u",
2129 dev
->nodename
, queue
->id
);
2131 path
= (char *)dev
->nodename
;
2134 /* Write ring references */
2135 err
= xenbus_printf(*xbt
, path
, "tx-ring-ref", "%u",
2136 queue
->tx_ring_ref
);
2138 message
= "writing tx-ring-ref";
2142 err
= xenbus_printf(*xbt
, path
, "rx-ring-ref", "%u",
2143 queue
->rx_ring_ref
);
2145 message
= "writing rx-ring-ref";
2149 /* Write event channels; taking into account both shared
2150 * and split event channel scenarios.
2152 if (queue
->tx_evtchn
== queue
->rx_evtchn
) {
2153 /* Shared event channel */
2154 err
= xenbus_printf(*xbt
, path
,
2155 "event-channel", "%u", queue
->tx_evtchn
);
2157 message
= "writing event-channel";
2161 /* Split event channels */
2162 err
= xenbus_printf(*xbt
, path
,
2163 "event-channel-tx", "%u", queue
->tx_evtchn
);
2165 message
= "writing event-channel-tx";
2169 err
= xenbus_printf(*xbt
, path
,
2170 "event-channel-rx", "%u", queue
->rx_evtchn
);
2172 message
= "writing event-channel-rx";
2177 if (write_hierarchical
)
2182 if (write_hierarchical
)
2184 xenbus_dev_fatal(dev
, err
, "%s", message
);
2190 static int xennet_create_page_pool(struct netfront_queue
*queue
)
2193 struct page_pool_params pp_params
= {
2196 .pool_size
= NET_RX_RING_SIZE
,
2197 .nid
= NUMA_NO_NODE
,
2198 .dev
= &queue
->info
->netdev
->dev
,
2199 .offset
= XDP_PACKET_HEADROOM
,
2200 .max_len
= XEN_PAGE_SIZE
- XDP_PACKET_HEADROOM
,
2203 queue
->page_pool
= page_pool_create(&pp_params
);
2204 if (IS_ERR(queue
->page_pool
)) {
2205 err
= PTR_ERR(queue
->page_pool
);
2206 queue
->page_pool
= NULL
;
2210 err
= xdp_rxq_info_reg(&queue
->xdp_rxq
, queue
->info
->netdev
,
2213 netdev_err(queue
->info
->netdev
, "xdp_rxq_info_reg failed\n");
2217 err
= xdp_rxq_info_reg_mem_model(&queue
->xdp_rxq
,
2218 MEM_TYPE_PAGE_POOL
, queue
->page_pool
);
2220 netdev_err(queue
->info
->netdev
, "xdp_rxq_info_reg_mem_model failed\n");
2221 goto err_unregister_rxq
;
2226 xdp_rxq_info_unreg(&queue
->xdp_rxq
);
2228 page_pool_destroy(queue
->page_pool
);
2229 queue
->page_pool
= NULL
;
2233 static int xennet_create_queues(struct netfront_info
*info
,
2234 unsigned int *num_queues
)
2239 info
->queues
= kcalloc(*num_queues
, sizeof(struct netfront_queue
),
2244 for (i
= 0; i
< *num_queues
; i
++) {
2245 struct netfront_queue
*queue
= &info
->queues
[i
];
2250 ret
= xennet_init_queue(queue
);
2252 dev_warn(&info
->xbdev
->dev
,
2253 "only created %d queues\n", i
);
2258 /* use page pool recycling instead of buddy allocator */
2259 ret
= xennet_create_page_pool(queue
);
2261 dev_err(&info
->xbdev
->dev
, "can't allocate page pool\n");
2266 netif_napi_add(queue
->info
->netdev
, &queue
->napi
,
2268 if (netif_running(info
->netdev
))
2269 napi_enable(&queue
->napi
);
2272 netif_set_real_num_tx_queues(info
->netdev
, *num_queues
);
2274 if (*num_queues
== 0) {
2275 dev_err(&info
->xbdev
->dev
, "no queues\n");
2281 /* Common code used when first setting up, and when resuming. */
2282 static int talk_to_netback(struct xenbus_device
*dev
,
2283 struct netfront_info
*info
)
2285 const char *message
;
2286 struct xenbus_transaction xbt
;
2288 unsigned int feature_split_evtchn
;
2290 unsigned int max_queues
= 0;
2291 struct netfront_queue
*queue
= NULL
;
2292 unsigned int num_queues
= 1;
2294 info
->netdev
->irq
= 0;
2296 /* Check if backend is trusted. */
2297 info
->bounce
= !xennet_trusted
||
2298 !xenbus_read_unsigned(dev
->nodename
, "trusted", 1);
2300 /* Check if backend supports multiple queues */
2301 max_queues
= xenbus_read_unsigned(info
->xbdev
->otherend
,
2302 "multi-queue-max-queues", 1);
2303 num_queues
= min(max_queues
, xennet_max_queues
);
2305 /* Check feature-split-event-channels */
2306 feature_split_evtchn
= xenbus_read_unsigned(info
->xbdev
->otherend
,
2307 "feature-split-event-channels", 0);
2309 /* Read mac addr. */
2310 err
= xen_net_read_mac(dev
, info
->netdev
->dev_addr
);
2312 xenbus_dev_fatal(dev
, err
, "parsing %s/mac", dev
->nodename
);
2316 info
->netback_has_xdp_headroom
= xenbus_read_unsigned(info
->xbdev
->otherend
,
2317 "feature-xdp-headroom", 0);
2318 if (info
->netback_has_xdp_headroom
) {
2319 /* set the current xen-netfront xdp state */
2320 err
= talk_to_netback_xdp(info
, info
->netfront_xdp_enabled
?
2321 NETBACK_XDP_HEADROOM_ENABLE
:
2322 NETBACK_XDP_HEADROOM_DISABLE
);
2329 xennet_destroy_queues(info
);
2331 /* For the case of a reconnect reset the "broken" indicator. */
2332 info
->broken
= false;
2334 err
= xennet_create_queues(info
, &num_queues
);
2336 xenbus_dev_fatal(dev
, err
, "creating queues");
2337 kfree(info
->queues
);
2338 info
->queues
= NULL
;
2343 /* Create shared ring, alloc event channel -- for each queue */
2344 for (i
= 0; i
< num_queues
; ++i
) {
2345 queue
= &info
->queues
[i
];
2346 err
= setup_netfront(dev
, queue
, feature_split_evtchn
);
2352 err
= xenbus_transaction_start(&xbt
);
2354 xenbus_dev_fatal(dev
, err
, "starting transaction");
2358 if (xenbus_exists(XBT_NIL
,
2359 info
->xbdev
->otherend
, "multi-queue-max-queues")) {
2360 /* Write the number of queues */
2361 err
= xenbus_printf(xbt
, dev
->nodename
,
2362 "multi-queue-num-queues", "%u", num_queues
);
2364 message
= "writing multi-queue-num-queues";
2365 goto abort_transaction_no_dev_fatal
;
2369 if (num_queues
== 1) {
2370 err
= write_queue_xenstore_keys(&info
->queues
[0], &xbt
, 0); /* flat */
2372 goto abort_transaction_no_dev_fatal
;
2374 /* Write the keys for each queue */
2375 for (i
= 0; i
< num_queues
; ++i
) {
2376 queue
= &info
->queues
[i
];
2377 err
= write_queue_xenstore_keys(queue
, &xbt
, 1); /* hierarchical */
2379 goto abort_transaction_no_dev_fatal
;
2383 /* The remaining keys are not queue-specific */
2384 err
= xenbus_printf(xbt
, dev
->nodename
, "request-rx-copy", "%u",
2387 message
= "writing request-rx-copy";
2388 goto abort_transaction
;
2391 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-rx-notify", "%d", 1);
2393 message
= "writing feature-rx-notify";
2394 goto abort_transaction
;
2397 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-sg", "%d", 1);
2399 message
= "writing feature-sg";
2400 goto abort_transaction
;
2403 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv4", "%d", 1);
2405 message
= "writing feature-gso-tcpv4";
2406 goto abort_transaction
;
2409 err
= xenbus_write(xbt
, dev
->nodename
, "feature-gso-tcpv6", "1");
2411 message
= "writing feature-gso-tcpv6";
2412 goto abort_transaction
;
2415 err
= xenbus_write(xbt
, dev
->nodename
, "feature-ipv6-csum-offload",
2418 message
= "writing feature-ipv6-csum-offload";
2419 goto abort_transaction
;
2422 err
= xenbus_transaction_end(xbt
, 0);
2426 xenbus_dev_fatal(dev
, err
, "completing transaction");
2433 xenbus_dev_fatal(dev
, err
, "%s", message
);
2434 abort_transaction_no_dev_fatal
:
2435 xenbus_transaction_end(xbt
, 1);
2437 xennet_disconnect_backend(info
);
2439 xennet_destroy_queues(info
);
2443 device_unregister(&dev
->dev
);
2447 static int xennet_connect(struct net_device
*dev
)
2449 struct netfront_info
*np
= netdev_priv(dev
);
2450 unsigned int num_queues
= 0;
2453 struct netfront_queue
*queue
= NULL
;
2455 if (!xenbus_read_unsigned(np
->xbdev
->otherend
, "feature-rx-copy", 0)) {
2457 "backend does not support copying receive path\n");
2461 err
= talk_to_netback(np
->xbdev
, np
);
2464 if (np
->netback_has_xdp_headroom
)
2465 pr_info("backend supports XDP headroom\n");
2467 dev_info(&np
->xbdev
->dev
,
2468 "bouncing transmitted data to zeroed pages\n");
2470 /* talk_to_netback() sets the correct number of queues */
2471 num_queues
= dev
->real_num_tx_queues
;
2473 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
2474 err
= register_netdev(dev
);
2476 pr_warn("%s: register_netdev err=%d\n", __func__
, err
);
2477 device_unregister(&np
->xbdev
->dev
);
2483 netdev_update_features(dev
);
2487 * All public and private state should now be sane. Get
2488 * ready to start sending and receiving packets and give the driver
2489 * domain a kick because we've probably just requeued some
2492 netif_tx_lock_bh(np
->netdev
);
2493 netif_device_attach(np
->netdev
);
2494 netif_tx_unlock_bh(np
->netdev
);
2496 netif_carrier_on(np
->netdev
);
2497 for (j
= 0; j
< num_queues
; ++j
) {
2498 queue
= &np
->queues
[j
];
2500 notify_remote_via_irq(queue
->tx_irq
);
2501 if (queue
->tx_irq
!= queue
->rx_irq
)
2502 notify_remote_via_irq(queue
->rx_irq
);
2504 spin_lock_irq(&queue
->tx_lock
);
2505 xennet_tx_buf_gc(queue
);
2506 spin_unlock_irq(&queue
->tx_lock
);
2508 spin_lock_bh(&queue
->rx_lock
);
2509 xennet_alloc_rx_buffers(queue
);
2510 spin_unlock_bh(&queue
->rx_lock
);
2517 * Callback received when the backend's state changes.
2519 static void netback_changed(struct xenbus_device
*dev
,
2520 enum xenbus_state backend_state
)
2522 struct netfront_info
*np
= dev_get_drvdata(&dev
->dev
);
2523 struct net_device
*netdev
= np
->netdev
;
2525 dev_dbg(&dev
->dev
, "%s\n", xenbus_strstate(backend_state
));
2527 wake_up_all(&module_wq
);
2529 switch (backend_state
) {
2530 case XenbusStateInitialising
:
2531 case XenbusStateInitialised
:
2532 case XenbusStateReconfiguring
:
2533 case XenbusStateReconfigured
:
2534 case XenbusStateUnknown
:
2537 case XenbusStateInitWait
:
2538 if (dev
->state
!= XenbusStateInitialising
)
2540 if (xennet_connect(netdev
) != 0)
2542 xenbus_switch_state(dev
, XenbusStateConnected
);
2545 case XenbusStateConnected
:
2546 netdev_notify_peers(netdev
);
2549 case XenbusStateClosed
:
2550 if (dev
->state
== XenbusStateClosed
)
2552 fallthrough
; /* Missed the backend's CLOSING state */
2553 case XenbusStateClosing
:
2554 xenbus_frontend_closed(dev
);
2559 static const struct xennet_stat
{
2560 char name
[ETH_GSTRING_LEN
];
2562 } xennet_stats
[] = {
2564 "rx_gso_checksum_fixup",
2565 offsetof(struct netfront_info
, rx_gso_checksum_fixup
)
2569 static int xennet_get_sset_count(struct net_device
*dev
, int string_set
)
2571 switch (string_set
) {
2573 return ARRAY_SIZE(xennet_stats
);
2579 static void xennet_get_ethtool_stats(struct net_device
*dev
,
2580 struct ethtool_stats
*stats
, u64
* data
)
2582 void *np
= netdev_priv(dev
);
2585 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
2586 data
[i
] = atomic_read((atomic_t
*)(np
+ xennet_stats
[i
].offset
));
2589 static void xennet_get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
2593 switch (stringset
) {
2595 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
2596 memcpy(data
+ i
* ETH_GSTRING_LEN
,
2597 xennet_stats
[i
].name
, ETH_GSTRING_LEN
);
2602 static const struct ethtool_ops xennet_ethtool_ops
=
2604 .get_link
= ethtool_op_get_link
,
2606 .get_sset_count
= xennet_get_sset_count
,
2607 .get_ethtool_stats
= xennet_get_ethtool_stats
,
2608 .get_strings
= xennet_get_strings
,
2609 .get_ts_info
= ethtool_op_get_ts_info
,
2613 static ssize_t
show_rxbuf(struct device
*dev
,
2614 struct device_attribute
*attr
, char *buf
)
2616 return sprintf(buf
, "%lu\n", NET_RX_RING_SIZE
);
2619 static ssize_t
store_rxbuf(struct device
*dev
,
2620 struct device_attribute
*attr
,
2621 const char *buf
, size_t len
)
2625 if (!capable(CAP_NET_ADMIN
))
2628 simple_strtoul(buf
, &endp
, 0);
2632 /* rxbuf_min and rxbuf_max are no longer configurable. */
2637 static DEVICE_ATTR(rxbuf_min
, 0644, show_rxbuf
, store_rxbuf
);
2638 static DEVICE_ATTR(rxbuf_max
, 0644, show_rxbuf
, store_rxbuf
);
2639 static DEVICE_ATTR(rxbuf_cur
, 0444, show_rxbuf
, NULL
);
2641 static struct attribute
*xennet_dev_attrs
[] = {
2642 &dev_attr_rxbuf_min
.attr
,
2643 &dev_attr_rxbuf_max
.attr
,
2644 &dev_attr_rxbuf_cur
.attr
,
2648 static const struct attribute_group xennet_dev_group
= {
2649 .attrs
= xennet_dev_attrs
2651 #endif /* CONFIG_SYSFS */
2653 static void xennet_bus_close(struct xenbus_device
*dev
)
2657 if (xenbus_read_driver_state(dev
->otherend
) == XenbusStateClosed
)
2660 xenbus_switch_state(dev
, XenbusStateClosing
);
2661 ret
= wait_event_timeout(module_wq
,
2662 xenbus_read_driver_state(dev
->otherend
) ==
2663 XenbusStateClosing
||
2664 xenbus_read_driver_state(dev
->otherend
) ==
2665 XenbusStateClosed
||
2666 xenbus_read_driver_state(dev
->otherend
) ==
2671 if (xenbus_read_driver_state(dev
->otherend
) == XenbusStateClosed
)
2675 xenbus_switch_state(dev
, XenbusStateClosed
);
2676 ret
= wait_event_timeout(module_wq
,
2677 xenbus_read_driver_state(dev
->otherend
) ==
2678 XenbusStateClosed
||
2679 xenbus_read_driver_state(dev
->otherend
) ==
2685 static int xennet_remove(struct xenbus_device
*dev
)
2687 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
2689 xennet_bus_close(dev
);
2690 xennet_disconnect_backend(info
);
2692 if (info
->netdev
->reg_state
== NETREG_REGISTERED
)
2693 unregister_netdev(info
->netdev
);
2697 xennet_destroy_queues(info
);
2700 xennet_free_netdev(info
->netdev
);
2705 static const struct xenbus_device_id netfront_ids
[] = {
2710 static struct xenbus_driver netfront_driver
= {
2711 .ids
= netfront_ids
,
2712 .probe
= netfront_probe
,
2713 .remove
= xennet_remove
,
2714 .resume
= netfront_resume
,
2715 .otherend_changed
= netback_changed
,
2718 static int __init
netif_init(void)
2723 if (!xen_has_pv_nic_devices())
2726 pr_info("Initialising Xen virtual ethernet driver\n");
2728 /* Allow as many queues as there are CPUs inut max. 8 if user has not
2729 * specified a value.
2731 if (xennet_max_queues
== 0)
2732 xennet_max_queues
= min_t(unsigned int, MAX_QUEUES_DEFAULT
,
2735 return xenbus_register_frontend(&netfront_driver
);
2737 module_init(netif_init
);
2740 static void __exit
netif_exit(void)
2742 xenbus_unregister_driver(&netfront_driver
);
2744 module_exit(netif_exit
);
2746 MODULE_DESCRIPTION("Xen virtual network device frontend");
2747 MODULE_LICENSE("GPL");
2748 MODULE_ALIAS("xen:vif");
2749 MODULE_ALIAS("xennet");