]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/xen-netfront.c
net: Prevent infinite while loop in skb_tx_hash()
[mirror_ubuntu-jammy-kernel.git] / drivers / net / xen-netfront.c
CommitLineData
0d160211
JF
1/*
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
383eda32
JP
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
0d160211
JF
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/skbuff.h>
39#include <linux/ethtool.h>
40#include <linux/if_ether.h>
9ecd1a75 41#include <net/tcp.h>
0d160211
JF
42#include <linux/udp.h>
43#include <linux/moduleparam.h>
44#include <linux/mm.h>
5a0e3ad6 45#include <linux/slab.h>
0d160211 46#include <net/ip.h>
6c5aa6fc
DK
47#include <linux/bpf.h>
48#include <net/page_pool.h>
49#include <linux/bpf_trace.h>
0d160211 50
1ccbf534 51#include <xen/xen.h>
0d160211
JF
52#include <xen/xenbus.h>
53#include <xen/events.h>
54#include <xen/page.h>
b9136d20 55#include <xen/platform_pci.h>
0d160211
JF
56#include <xen/grant_table.h>
57
58#include <xen/interface/io/netif.h>
59#include <xen/interface/memory.h>
60#include <xen/interface/grant_table.h>
61
50ee6061 62/* Module parameters */
034702a6 63#define MAX_QUEUES_DEFAULT 8
50ee6061
AB
64static unsigned int xennet_max_queues;
65module_param_named(max_queues, xennet_max_queues, uint, 0644);
66MODULE_PARM_DESC(max_queues,
67 "Maximum number of queues per virtual interface");
68
c2c63310
AR
69#define XENNET_TIMEOUT (5 * HZ)
70
0fc0b732 71static const struct ethtool_ops xennet_ethtool_ops;
0d160211
JF
72
73struct netfront_cb {
3683243b 74 int pull_to;
0d160211
JF
75};
76
77#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
78
79#define RX_COPY_THRESHOLD 256
80
81#define GRANT_INVALID_REF 0
82
30c5d7f0
JG
83#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
84#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
1f3c2eba
DV
85
86/* Minimum number of Rx slots (includes slot for GSO metadata). */
87#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
0d160211 88
2688fcb7
AB
89/* Queue name is interface name with "-qNNN" appended */
90#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
91
92/* IRQ name is queue name with "-tx" or "-rx" appended */
93#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
94
8edfe2e9 95static DECLARE_WAIT_QUEUE_HEAD(module_wq);
5b5971df 96
e00f85be 97struct netfront_stats {
900e1833
DV
98 u64 packets;
99 u64 bytes;
e00f85be 100 struct u64_stats_sync syncp;
101};
102
2688fcb7
AB
103struct netfront_info;
104
105struct netfront_queue {
106 unsigned int id; /* Queue ID, 0-based */
107 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
108 struct netfront_info *info;
0d160211 109
6c5aa6fc
DK
110 struct bpf_prog __rcu *xdp_prog;
111
bea3348e 112 struct napi_struct napi;
0d160211 113
d634bf2c
WL
114 /* Split event channels support, tx_* == rx_* when using
115 * single event channel.
116 */
117 unsigned int tx_evtchn, rx_evtchn;
118 unsigned int tx_irq, rx_irq;
119 /* Only used when split event channels support is enabled */
2688fcb7
AB
120 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
121 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
0d160211 122
84284d3c
JF
123 spinlock_t tx_lock;
124 struct xen_netif_tx_front_ring tx;
125 int tx_ring_ref;
0d160211
JF
126
127 /*
128 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
21631d2d 129 * are linked from tx_skb_freelist through tx_link.
0d160211 130 */
21631d2d
JG
131 struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
132 unsigned short tx_link[NET_TX_RING_SIZE];
133#define TX_LINK_NONE 0xffff
a884daa6 134#define TX_PENDING 0xfffe
0d160211
JF
135 grant_ref_t gref_tx_head;
136 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
cefe0078 137 struct page *grant_tx_page[NET_TX_RING_SIZE];
0d160211 138 unsigned tx_skb_freelist;
a884daa6 139 unsigned int tx_pend_queue;
0d160211 140
84284d3c
JF
141 spinlock_t rx_lock ____cacheline_aligned_in_smp;
142 struct xen_netif_rx_front_ring rx;
143 int rx_ring_ref;
144
84284d3c
JF
145 struct timer_list rx_refill_timer;
146
0d160211
JF
147 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
148 grant_ref_t gref_rx_head;
149 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
6c5aa6fc
DK
150
151 struct page_pool *page_pool;
152 struct xdp_rxq_info xdp_rxq;
2688fcb7
AB
153};
154
155struct netfront_info {
156 struct list_head list;
157 struct net_device *netdev;
158
159 struct xenbus_device *xbdev;
160
161 /* Multi-queue support */
162 struct netfront_queue *queues;
e0ce4af9
IC
163
164 /* Statistics */
900e1833
DV
165 struct netfront_stats __percpu *rx_stats;
166 struct netfront_stats __percpu *tx_stats;
e00f85be 167
6c5aa6fc
DK
168 /* XDP state */
169 bool netback_has_xdp_headroom;
170 bool netfront_xdp_enabled;
171
a884daa6
JG
172 /* Is device behaving sane? */
173 bool broken;
174
2688fcb7 175 atomic_t rx_gso_checksum_fixup;
0d160211
JF
176};
177
178struct netfront_rx_info {
179 struct xen_netif_rx_response rx;
180 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
181};
182
183/*
184 * Access macros for acquiring freeing slots in tx_skbs[].
185 */
186
21631d2d
JG
187static void add_id_to_list(unsigned *head, unsigned short *list,
188 unsigned short id)
0d160211 189{
21631d2d 190 list[id] = *head;
0d160211
JF
191 *head = id;
192}
193
21631d2d 194static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
0d160211
JF
195{
196 unsigned int id = *head;
21631d2d
JG
197
198 if (id != TX_LINK_NONE) {
199 *head = list[id];
200 list[id] = TX_LINK_NONE;
201 }
0d160211
JF
202 return id;
203}
204
205static int xennet_rxidx(RING_IDX idx)
206{
207 return idx & (NET_RX_RING_SIZE - 1);
208}
209
2688fcb7 210static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
0d160211
JF
211 RING_IDX ri)
212{
213 int i = xennet_rxidx(ri);
2688fcb7
AB
214 struct sk_buff *skb = queue->rx_skbs[i];
215 queue->rx_skbs[i] = NULL;
0d160211
JF
216 return skb;
217}
218
2688fcb7 219static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
0d160211
JF
220 RING_IDX ri)
221{
222 int i = xennet_rxidx(ri);
2688fcb7
AB
223 grant_ref_t ref = queue->grant_rx_ref[i];
224 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
0d160211
JF
225 return ref;
226}
227
228#ifdef CONFIG_SYSFS
27b917e5 229static const struct attribute_group xennet_dev_group;
0d160211
JF
230#endif
231
3ad9b358 232static bool xennet_can_sg(struct net_device *dev)
0d160211 233{
3ad9b358 234 return dev->features & NETIF_F_SG;
0d160211
JF
235}
236
237
e99e88a9 238static void rx_refill_timeout(struct timer_list *t)
0d160211 239{
e99e88a9 240 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
2688fcb7 241 napi_schedule(&queue->napi);
0d160211
JF
242}
243
2688fcb7 244static int netfront_tx_slot_available(struct netfront_queue *queue)
0d160211 245{
2688fcb7 246 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
57f230ab 247 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
0d160211
JF
248}
249
2688fcb7 250static void xennet_maybe_wake_tx(struct netfront_queue *queue)
0d160211 251{
2688fcb7
AB
252 struct net_device *dev = queue->info->netdev;
253 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
0d160211 254
2688fcb7
AB
255 if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
256 netfront_tx_slot_available(queue) &&
0d160211 257 likely(netif_running(dev)))
2688fcb7 258 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
0d160211
JF
259}
260
1f3c2eba
DV
261
262static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
0d160211 263{
0d160211
JF
264 struct sk_buff *skb;
265 struct page *page;
0d160211 266
1f3c2eba
DV
267 skb = __netdev_alloc_skb(queue->info->netdev,
268 RX_COPY_THRESHOLD + NET_IP_ALIGN,
269 GFP_ATOMIC | __GFP_NOWARN);
270 if (unlikely(!skb))
271 return NULL;
0d160211 272
6c5aa6fc
DK
273 page = page_pool_dev_alloc_pages(queue->page_pool);
274 if (unlikely(!page)) {
1f3c2eba
DV
275 kfree_skb(skb);
276 return NULL;
0d160211 277 }
1f3c2eba
DV
278 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
279
280 /* Align ip header to a 16 bytes boundary */
281 skb_reserve(skb, NET_IP_ALIGN);
282 skb->dev = queue->info->netdev;
283
284 return skb;
285}
0d160211 286
1f3c2eba
DV
287
288static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
289{
290 RING_IDX req_prod = queue->rx.req_prod_pvt;
291 int notify;
538d9291 292 int err = 0;
1f3c2eba
DV
293
294 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
0d160211 295 return;
0d160211 296
1f3c2eba
DV
297 for (req_prod = queue->rx.req_prod_pvt;
298 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
299 req_prod++) {
300 struct sk_buff *skb;
301 unsigned short id;
302 grant_ref_t ref;
30c5d7f0 303 struct page *page;
1f3c2eba 304 struct xen_netif_rx_request *req;
0d160211 305
1f3c2eba 306 skb = xennet_alloc_one_rx_buffer(queue);
538d9291
VRP
307 if (!skb) {
308 err = -ENOMEM;
0d160211 309 break;
538d9291 310 }
0d160211 311
1f3c2eba 312 id = xennet_rxidx(req_prod);
0d160211 313
2688fcb7
AB
314 BUG_ON(queue->rx_skbs[id]);
315 queue->rx_skbs[id] = skb;
0d160211 316
2688fcb7 317 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
269ebce4 318 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
2688fcb7 319 queue->grant_rx_ref[id] = ref;
0d160211 320
30c5d7f0 321 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
0d160211 322
1f3c2eba 323 req = RING_GET_REQUEST(&queue->rx, req_prod);
30c5d7f0
JG
324 gnttab_page_grant_foreign_access_ref_one(ref,
325 queue->info->xbdev->otherend_id,
326 page,
327 0);
0d160211
JF
328 req->id = id;
329 req->gref = ref;
330 }
331
1f3c2eba
DV
332 queue->rx.req_prod_pvt = req_prod;
333
538d9291
VRP
334 /* Try again later if there are not enough requests or skb allocation
335 * failed.
336 * Enough requests is quantified as the sum of newly created slots and
337 * the unconsumed slots at the backend.
338 */
339 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
340 unlikely(err)) {
1f3c2eba
DV
341 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
342 return;
343 }
344
2688fcb7 345 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
0d160211 346 if (notify)
2688fcb7 347 notify_remote_via_irq(queue->rx_irq);
0d160211
JF
348}
349
350static int xennet_open(struct net_device *dev)
351{
352 struct netfront_info *np = netdev_priv(dev);
2688fcb7
AB
353 unsigned int num_queues = dev->real_num_tx_queues;
354 unsigned int i = 0;
355 struct netfront_queue *queue = NULL;
356
a884daa6 357 if (!np->queues || np->broken)
f599c64f
RL
358 return -ENODEV;
359
2688fcb7
AB
360 for (i = 0; i < num_queues; ++i) {
361 queue = &np->queues[i];
362 napi_enable(&queue->napi);
363
364 spin_lock_bh(&queue->rx_lock);
365 if (netif_carrier_ok(dev)) {
366 xennet_alloc_rx_buffers(queue);
367 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
368 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
369 napi_schedule(&queue->napi);
370 }
371 spin_unlock_bh(&queue->rx_lock);
0d160211 372 }
0d160211 373
2688fcb7 374 netif_tx_start_all_queues(dev);
0d160211
JF
375
376 return 0;
377}
378
2688fcb7 379static void xennet_tx_buf_gc(struct netfront_queue *queue)
0d160211
JF
380{
381 RING_IDX cons, prod;
382 unsigned short id;
0d160211 383 struct sk_buff *skb;
7d0105b5 384 bool more_to_do;
a884daa6 385 const struct device *dev = &queue->info->netdev->dev;
0d160211 386
2688fcb7 387 BUG_ON(!netif_carrier_ok(queue->info->netdev));
0d160211
JF
388
389 do {
2688fcb7 390 prod = queue->tx.sring->rsp_prod;
a884daa6
JG
391 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
392 dev_alert(dev, "Illegal number of responses %u\n",
393 prod - queue->tx.rsp_cons);
394 goto err;
395 }
0d160211
JF
396 rmb(); /* Ensure we see responses up to 'rp'. */
397
2688fcb7 398 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
8446066b 399 struct xen_netif_tx_response txrsp;
0d160211 400
8446066b
JG
401 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
402 if (txrsp.status == XEN_NETIF_RSP_NULL)
0d160211
JF
403 continue;
404
a884daa6
JG
405 id = txrsp.id;
406 if (id >= RING_SIZE(&queue->tx)) {
407 dev_alert(dev,
408 "Response has incorrect id (%u)\n",
409 id);
410 goto err;
411 }
412 if (queue->tx_link[id] != TX_PENDING) {
413 dev_alert(dev,
414 "Response for inactive request\n");
415 goto err;
416 }
417
418 queue->tx_link[id] = TX_LINK_NONE;
21631d2d
JG
419 skb = queue->tx_skbs[id];
420 queue->tx_skbs[id] = NULL;
0d160211 421 if (unlikely(gnttab_query_foreign_access(
2688fcb7 422 queue->grant_tx_ref[id]) != 0)) {
a884daa6
JG
423 dev_alert(dev,
424 "Grant still in use by backend domain\n");
425 goto err;
0d160211
JF
426 }
427 gnttab_end_foreign_access_ref(
2688fcb7 428 queue->grant_tx_ref[id], GNTMAP_readonly);
0d160211 429 gnttab_release_grant_reference(
2688fcb7
AB
430 &queue->gref_tx_head, queue->grant_tx_ref[id]);
431 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
432 queue->grant_tx_page[id] = NULL;
21631d2d 433 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
0d160211
JF
434 dev_kfree_skb_irq(skb);
435 }
436
2688fcb7 437 queue->tx.rsp_cons = prod;
0d160211 438
7d0105b5
MC
439 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
440 } while (more_to_do);
0d160211 441
2688fcb7 442 xennet_maybe_wake_tx(queue);
a884daa6
JG
443
444 return;
445
446 err:
447 queue->info->broken = true;
448 dev_alert(dev, "Disabled for further use\n");
0d160211
JF
449}
450
30c5d7f0
JG
451struct xennet_gnttab_make_txreq {
452 struct netfront_queue *queue;
453 struct sk_buff *skb;
454 struct page *page;
162081ec
JG
455 struct xen_netif_tx_request *tx; /* Last request on ring page */
456 struct xen_netif_tx_request tx_local; /* Last request local copy*/
30c5d7f0
JG
457 unsigned int size;
458};
459
460static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
461 unsigned int len, void *data)
0d160211 462{
30c5d7f0 463 struct xennet_gnttab_make_txreq *info = data;
0d160211 464 unsigned int id;
a55e8bb8 465 struct xen_netif_tx_request *tx;
0d160211 466 grant_ref_t ref;
30c5d7f0
JG
467 /* convenient aliases */
468 struct page *page = info->page;
469 struct netfront_queue *queue = info->queue;
470 struct sk_buff *skb = info->skb;
0d160211 471
21631d2d 472 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
a55e8bb8
DV
473 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
474 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
269ebce4 475 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
0d160211 476
30c5d7f0
JG
477 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
478 gfn, GNTMAP_readonly);
0d160211 479
21631d2d 480 queue->tx_skbs[id] = skb;
a55e8bb8
DV
481 queue->grant_tx_page[id] = page;
482 queue->grant_tx_ref[id] = ref;
0d160211 483
162081ec
JG
484 info->tx_local.id = id;
485 info->tx_local.gref = ref;
486 info->tx_local.offset = offset;
487 info->tx_local.size = len;
488 info->tx_local.flags = 0;
489
490 *tx = info->tx_local;
0d160211 491
a884daa6
JG
492 /*
493 * Put the request in the pending queue, it will be set to be pending
494 * when the producer index is about to be raised.
495 */
496 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
497
30c5d7f0 498 info->tx = tx;
162081ec 499 info->size += info->tx_local.size;
30c5d7f0
JG
500}
501
502static struct xen_netif_tx_request *xennet_make_first_txreq(
162081ec
JG
503 struct xennet_gnttab_make_txreq *info,
504 unsigned int offset, unsigned int len)
30c5d7f0 505{
162081ec 506 info->size = 0;
30c5d7f0 507
162081ec 508 gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
30c5d7f0 509
162081ec 510 return info->tx;
30c5d7f0
JG
511}
512
513static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
514 unsigned int len, void *data)
515{
516 struct xennet_gnttab_make_txreq *info = data;
517
518 info->tx->flags |= XEN_NETTXF_more_data;
519 skb_get(info->skb);
520 xennet_tx_setup_grant(gfn, offset, len, data);
a55e8bb8 521}
0d160211 522
162081ec
JG
523static void xennet_make_txreqs(
524 struct xennet_gnttab_make_txreq *info,
525 struct page *page,
a55e8bb8
DV
526 unsigned int offset, unsigned int len)
527{
528 /* Skip unused frames from start of page */
529 page += offset >> PAGE_SHIFT;
530 offset &= ~PAGE_MASK;
0d160211 531
a55e8bb8 532 while (len) {
162081ec
JG
533 info->page = page;
534 info->size = 0;
30c5d7f0
JG
535
536 gnttab_foreach_grant_in_range(page, offset, len,
537 xennet_make_one_txreq,
162081ec 538 info);
30c5d7f0 539
a55e8bb8
DV
540 page++;
541 offset = 0;
162081ec 542 len -= info->size;
0d160211 543 }
0d160211
JF
544}
545
f36c3747 546/*
e84448d5
DV
547 * Count how many ring slots are required to send this skb. Each frag
548 * might be a compound page.
f36c3747 549 */
e84448d5 550static int xennet_count_skb_slots(struct sk_buff *skb)
f36c3747
IC
551{
552 int i, frags = skb_shinfo(skb)->nr_frags;
30c5d7f0 553 int slots;
e84448d5 554
30c5d7f0
JG
555 slots = gnttab_count_grant(offset_in_page(skb->data),
556 skb_headlen(skb));
f36c3747
IC
557
558 for (i = 0; i < frags; i++) {
559 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
560 unsigned long size = skb_frag_size(frag);
b54c9d5b 561 unsigned long offset = skb_frag_off(frag);
f36c3747
IC
562
563 /* Skip unused frames from start of page */
564 offset &= ~PAGE_MASK;
565
30c5d7f0 566 slots += gnttab_count_grant(offset, size);
f36c3747
IC
567 }
568
30c5d7f0 569 return slots;
f36c3747
IC
570}
571
50ee6061 572static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
a350ecce 573 struct net_device *sb_dev)
2688fcb7 574{
50ee6061
AB
575 unsigned int num_queues = dev->real_num_tx_queues;
576 u32 hash;
577 u16 queue_idx;
578
579 /* First, check if there is only one queue */
580 if (num_queues == 1) {
581 queue_idx = 0;
582 } else {
583 hash = skb_get_hash(skb);
584 queue_idx = hash % num_queues;
585 }
586
587 return queue_idx;
2688fcb7
AB
588}
589
a884daa6
JG
590static void xennet_mark_tx_pending(struct netfront_queue *queue)
591{
592 unsigned int i;
593
594 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
595 TX_LINK_NONE)
596 queue->tx_link[i] = TX_PENDING;
597}
598
6c5aa6fc
DK
599static int xennet_xdp_xmit_one(struct net_device *dev,
600 struct netfront_queue *queue,
601 struct xdp_frame *xdpf)
602{
603 struct netfront_info *np = netdev_priv(dev);
604 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
162081ec
JG
605 struct xennet_gnttab_make_txreq info = {
606 .queue = queue,
607 .skb = NULL,
608 .page = virt_to_page(xdpf->data),
609 };
6c5aa6fc
DK
610 int notify;
611
162081ec 612 xennet_make_first_txreq(&info,
6c5aa6fc
DK
613 offset_in_page(xdpf->data),
614 xdpf->len);
615
a884daa6
JG
616 xennet_mark_tx_pending(queue);
617
6c5aa6fc
DK
618 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
619 if (notify)
620 notify_remote_via_irq(queue->tx_irq);
621
622 u64_stats_update_begin(&tx_stats->syncp);
623 tx_stats->bytes += xdpf->len;
624 tx_stats->packets++;
625 u64_stats_update_end(&tx_stats->syncp);
626
627 xennet_tx_buf_gc(queue);
628
629 return 0;
630}
631
632static int xennet_xdp_xmit(struct net_device *dev, int n,
633 struct xdp_frame **frames, u32 flags)
634{
635 unsigned int num_queues = dev->real_num_tx_queues;
636 struct netfront_info *np = netdev_priv(dev);
637 struct netfront_queue *queue = NULL;
638 unsigned long irq_flags;
fdc13979
LB
639 int nxmit = 0;
640 int i;
6c5aa6fc 641
a884daa6
JG
642 if (unlikely(np->broken))
643 return -ENODEV;
6c5aa6fc
DK
644 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
645 return -EINVAL;
646
647 queue = &np->queues[smp_processor_id() % num_queues];
648
649 spin_lock_irqsave(&queue->tx_lock, irq_flags);
650 for (i = 0; i < n; i++) {
651 struct xdp_frame *xdpf = frames[i];
652
653 if (!xdpf)
654 continue;
fdc13979
LB
655 if (xennet_xdp_xmit_one(dev, queue, xdpf))
656 break;
657 nxmit++;
6c5aa6fc
DK
658 }
659 spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
660
fdc13979 661 return nxmit;
6c5aa6fc
DK
662}
663
664
30c5d7f0
JG
665#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
666
24a94b3c 667static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
0d160211 668{
0d160211 669 struct netfront_info *np = netdev_priv(dev);
900e1833 670 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
162081ec 671 struct xen_netif_tx_request *first_tx;
a55e8bb8 672 unsigned int i;
0d160211 673 int notify;
f36c3747 674 int slots;
a55e8bb8
DV
675 struct page *page;
676 unsigned int offset;
677 unsigned int len;
cf66f9d4 678 unsigned long flags;
2688fcb7 679 struct netfront_queue *queue = NULL;
162081ec 680 struct xennet_gnttab_make_txreq info = { };
2688fcb7
AB
681 unsigned int num_queues = dev->real_num_tx_queues;
682 u16 queue_index;
fd07160b 683 struct sk_buff *nskb;
2688fcb7
AB
684
685 /* Drop the packet if no queues are set up */
686 if (num_queues < 1)
687 goto drop;
a884daa6
JG
688 if (unlikely(np->broken))
689 goto drop;
2688fcb7
AB
690 /* Determine which queue to transmit this SKB on */
691 queue_index = skb_get_queue_mapping(skb);
692 queue = &np->queues[queue_index];
0d160211 693
9ecd1a75
WL
694 /* If skb->len is too big for wire format, drop skb and alert
695 * user about misconfiguration.
696 */
697 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
698 net_alert_ratelimited(
699 "xennet: skb->len = %u, too big for wire format\n",
700 skb->len);
701 goto drop;
702 }
703
e84448d5 704 slots = xennet_count_skb_slots(skb);
30c5d7f0 705 if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
97a6d1bb
ZK
706 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
707 slots, skb->len);
708 if (skb_linearize(skb))
709 goto drop;
0d160211
JF
710 }
711
a55e8bb8
DV
712 page = virt_to_page(skb->data);
713 offset = offset_in_page(skb->data);
fd07160b
VK
714
715 /* The first req should be at least ETH_HLEN size or the packet will be
716 * dropped by netback.
717 */
718 if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
719 nskb = skb_copy(skb, GFP_ATOMIC);
720 if (!nskb)
721 goto drop;
62f3250f 722 dev_consume_skb_any(skb);
fd07160b
VK
723 skb = nskb;
724 page = virt_to_page(skb->data);
725 offset = offset_in_page(skb->data);
726 }
727
a55e8bb8
DV
728 len = skb_headlen(skb);
729
2688fcb7 730 spin_lock_irqsave(&queue->tx_lock, flags);
0d160211
JF
731
732 if (unlikely(!netif_carrier_ok(dev) ||
f36c3747 733 (slots > 1 && !xennet_can_sg(dev)) ||
8b86a61d 734 netif_needs_gso(skb, netif_skb_features(skb)))) {
2688fcb7 735 spin_unlock_irqrestore(&queue->tx_lock, flags);
0d160211
JF
736 goto drop;
737 }
738
a55e8bb8 739 /* First request for the linear area. */
162081ec
JG
740 info.queue = queue;
741 info.skb = skb;
742 info.page = page;
743 first_tx = xennet_make_first_txreq(&info, offset, len);
744 offset += info.tx_local.size;
30c5d7f0
JG
745 if (offset == PAGE_SIZE) {
746 page++;
747 offset = 0;
748 }
162081ec 749 len -= info.tx_local.size;
0d160211 750
0d160211
JF
751 if (skb->ip_summed == CHECKSUM_PARTIAL)
752 /* local packet? */
162081ec
JG
753 first_tx->flags |= XEN_NETTXF_csum_blank |
754 XEN_NETTXF_data_validated;
0d160211
JF
755 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
756 /* remote but checksummed. */
162081ec 757 first_tx->flags |= XEN_NETTXF_data_validated;
0d160211 758
a55e8bb8 759 /* Optional extra info after the first request. */
0d160211
JF
760 if (skb_shinfo(skb)->gso_size) {
761 struct xen_netif_extra_info *gso;
762
763 gso = (struct xen_netif_extra_info *)
a55e8bb8 764 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
0d160211 765
162081ec 766 first_tx->flags |= XEN_NETTXF_extra_info;
0d160211
JF
767
768 gso->u.gso.size = skb_shinfo(skb)->gso_size;
2c0057de
PD
769 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
770 XEN_NETIF_GSO_TYPE_TCPV6 :
771 XEN_NETIF_GSO_TYPE_TCPV4;
0d160211
JF
772 gso->u.gso.pad = 0;
773 gso->u.gso.features = 0;
774
775 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
776 gso->flags = 0;
0d160211
JF
777 }
778
a55e8bb8 779 /* Requests for the rest of the linear area. */
162081ec 780 xennet_make_txreqs(&info, page, offset, len);
a55e8bb8
DV
781
782 /* Requests for all the frags. */
783 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
784 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
162081ec 785 xennet_make_txreqs(&info, skb_frag_page(frag),
b54c9d5b 786 skb_frag_off(frag),
a55e8bb8
DV
787 skb_frag_size(frag));
788 }
0d160211 789
a55e8bb8
DV
790 /* First request has the packet length. */
791 first_tx->size = skb->len;
0d160211 792
91ffb9d3
DD
793 /* timestamp packet in software */
794 skb_tx_timestamp(skb);
795
a884daa6
JG
796 xennet_mark_tx_pending(queue);
797
2688fcb7 798 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
0d160211 799 if (notify)
2688fcb7 800 notify_remote_via_irq(queue->tx_irq);
0d160211 801
900e1833
DV
802 u64_stats_update_begin(&tx_stats->syncp);
803 tx_stats->bytes += skb->len;
804 tx_stats->packets++;
805 u64_stats_update_end(&tx_stats->syncp);
10a273a6
JF
806
807 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
2688fcb7 808 xennet_tx_buf_gc(queue);
0d160211 809
2688fcb7
AB
810 if (!netfront_tx_slot_available(queue))
811 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
0d160211 812
2688fcb7 813 spin_unlock_irqrestore(&queue->tx_lock, flags);
0d160211 814
6ed10654 815 return NETDEV_TX_OK;
0d160211
JF
816
817 drop:
09f75cd7 818 dev->stats.tx_dropped++;
979de8a0 819 dev_kfree_skb_any(skb);
6ed10654 820 return NETDEV_TX_OK;
0d160211
JF
821}
822
823static int xennet_close(struct net_device *dev)
824{
825 struct netfront_info *np = netdev_priv(dev);
2688fcb7
AB
826 unsigned int num_queues = dev->real_num_tx_queues;
827 unsigned int i;
828 struct netfront_queue *queue;
829 netif_tx_stop_all_queues(np->netdev);
830 for (i = 0; i < num_queues; ++i) {
831 queue = &np->queues[i];
832 napi_disable(&queue->napi);
833 }
0d160211
JF
834 return 0;
835}
836
2688fcb7 837static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
0d160211
JF
838 grant_ref_t ref)
839{
2688fcb7
AB
840 int new = xennet_rxidx(queue->rx.req_prod_pvt);
841
842 BUG_ON(queue->rx_skbs[new]);
843 queue->rx_skbs[new] = skb;
844 queue->grant_rx_ref[new] = ref;
845 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
846 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
847 queue->rx.req_prod_pvt++;
0d160211
JF
848}
849
2688fcb7 850static int xennet_get_extras(struct netfront_queue *queue,
0d160211
JF
851 struct xen_netif_extra_info *extras,
852 RING_IDX rp)
853
854{
8446066b 855 struct xen_netif_extra_info extra;
2688fcb7
AB
856 struct device *dev = &queue->info->netdev->dev;
857 RING_IDX cons = queue->rx.rsp_cons;
0d160211
JF
858 int err = 0;
859
860 do {
861 struct sk_buff *skb;
862 grant_ref_t ref;
863
864 if (unlikely(cons + 1 == rp)) {
865 if (net_ratelimit())
866 dev_warn(dev, "Missing extra info\n");
867 err = -EBADR;
868 break;
869 }
870
8446066b 871 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
0d160211 872
8446066b
JG
873 if (unlikely(!extra.type ||
874 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
0d160211
JF
875 if (net_ratelimit())
876 dev_warn(dev, "Invalid extra type: %d\n",
8446066b 877 extra.type);
0d160211
JF
878 err = -EINVAL;
879 } else {
8446066b 880 extras[extra.type - 1] = extra;
0d160211
JF
881 }
882
2688fcb7
AB
883 skb = xennet_get_rx_skb(queue, cons);
884 ref = xennet_get_rx_ref(queue, cons);
885 xennet_move_rx_slot(queue, skb, ref);
8446066b 886 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
0d160211 887
2688fcb7 888 queue->rx.rsp_cons = cons;
0d160211
JF
889 return err;
890}
891
6c5aa6fc
DK
892static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
893 struct xen_netif_rx_response *rx, struct bpf_prog *prog,
894 struct xdp_buff *xdp, bool *need_xdp_flush)
895{
896 struct xdp_frame *xdpf;
897 u32 len = rx->status;
e44f65fd 898 u32 act;
6c5aa6fc
DK
899 int err;
900
43b5169d
LB
901 xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
902 &queue->xdp_rxq);
be9df4af
LB
903 xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
904 len, false);
6c5aa6fc
DK
905
906 act = bpf_prog_run_xdp(prog, xdp);
907 switch (act) {
908 case XDP_TX:
909 get_page(pdata);
910 xdpf = xdp_convert_buff_to_frame(xdp);
911 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
fdc13979
LB
912 if (unlikely(!err))
913 xdp_return_frame_rx_napi(xdpf);
914 else if (unlikely(err < 0))
6c5aa6fc
DK
915 trace_xdp_exception(queue->info->netdev, prog, act);
916 break;
917 case XDP_REDIRECT:
918 get_page(pdata);
919 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
920 *need_xdp_flush = true;
921 if (unlikely(err))
922 trace_xdp_exception(queue->info->netdev, prog, act);
923 break;
924 case XDP_PASS:
925 case XDP_DROP:
926 break;
927
928 case XDP_ABORTED:
929 trace_xdp_exception(queue->info->netdev, prog, act);
930 break;
931
932 default:
933 bpf_warn_invalid_xdp_action(act);
934 }
935
936 return act;
937}
938
2688fcb7 939static int xennet_get_responses(struct netfront_queue *queue,
0d160211 940 struct netfront_rx_info *rinfo, RING_IDX rp,
6c5aa6fc
DK
941 struct sk_buff_head *list,
942 bool *need_xdp_flush)
0d160211 943{
8446066b 944 struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
6c5aa6fc 945 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
2688fcb7
AB
946 RING_IDX cons = queue->rx.rsp_cons;
947 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
6c5aa6fc 948 struct xen_netif_extra_info *extras = rinfo->extras;
2688fcb7 949 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
6c5aa6fc
DK
950 struct device *dev = &queue->info->netdev->dev;
951 struct bpf_prog *xdp_prog;
952 struct xdp_buff xdp;
953 unsigned long ret;
7158ff6d 954 int slots = 1;
0d160211 955 int err = 0;
6c5aa6fc 956 u32 verdict;
0d160211 957
f942dc25 958 if (rx->flags & XEN_NETRXF_extra_info) {
2688fcb7 959 err = xennet_get_extras(queue, extras, rp);
6c5aa6fc
DK
960 if (!err) {
961 if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
962 struct xen_netif_extra_info *xdp;
963
964 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
965 rx->offset = xdp->u.xdp.headroom;
966 }
967 }
2688fcb7 968 cons = queue->rx.rsp_cons;
0d160211
JF
969 }
970
971 for (;;) {
972 if (unlikely(rx->status < 0 ||
30c5d7f0 973 rx->offset + rx->status > XEN_PAGE_SIZE)) {
0d160211 974 if (net_ratelimit())
6c10127d 975 dev_warn(dev, "rx->offset: %u, size: %d\n",
0d160211 976 rx->offset, rx->status);
2688fcb7 977 xennet_move_rx_slot(queue, skb, ref);
0d160211
JF
978 err = -EINVAL;
979 goto next;
980 }
981
982 /*
983 * This definitely indicates a bug, either in this driver or in
984 * the backend driver. In future this should flag the bad
697089dc 985 * situation to the system controller to reboot the backend.
0d160211
JF
986 */
987 if (ref == GRANT_INVALID_REF) {
988 if (net_ratelimit())
989 dev_warn(dev, "Bad rx response id %d.\n",
990 rx->id);
991 err = -EINVAL;
992 goto next;
993 }
994
995 ret = gnttab_end_foreign_access_ref(ref, 0);
996 BUG_ON(!ret);
997
2688fcb7 998 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
0d160211 999
6c5aa6fc
DK
1000 rcu_read_lock();
1001 xdp_prog = rcu_dereference(queue->xdp_prog);
1002 if (xdp_prog) {
1003 if (!(rx->flags & XEN_NETRXF_more_data)) {
1004 /* currently only a single page contains data */
1005 verdict = xennet_run_xdp(queue,
1006 skb_frag_page(&skb_shinfo(skb)->frags[0]),
1007 rx, xdp_prog, &xdp, need_xdp_flush);
1008 if (verdict != XDP_PASS)
1009 err = -EINVAL;
1010 } else {
1011 /* drop the frame */
1012 err = -EINVAL;
1013 }
1014 }
1015 rcu_read_unlock();
0d160211 1016next:
6c5aa6fc 1017 __skb_queue_tail(list, skb);
f942dc25 1018 if (!(rx->flags & XEN_NETRXF_more_data))
0d160211
JF
1019 break;
1020
7158ff6d 1021 if (cons + slots == rp) {
0d160211 1022 if (net_ratelimit())
7158ff6d 1023 dev_warn(dev, "Need more slots\n");
0d160211
JF
1024 err = -ENOENT;
1025 break;
1026 }
1027
8446066b
JG
1028 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1029 rx = &rx_local;
2688fcb7
AB
1030 skb = xennet_get_rx_skb(queue, cons + slots);
1031 ref = xennet_get_rx_ref(queue, cons + slots);
7158ff6d 1032 slots++;
0d160211
JF
1033 }
1034
7158ff6d 1035 if (unlikely(slots > max)) {
0d160211 1036 if (net_ratelimit())
697089dc 1037 dev_warn(dev, "Too many slots\n");
0d160211
JF
1038 err = -E2BIG;
1039 }
1040
1041 if (unlikely(err))
2688fcb7 1042 queue->rx.rsp_cons = cons + slots;
0d160211
JF
1043
1044 return err;
1045}
1046
1047static int xennet_set_skb_gso(struct sk_buff *skb,
1048 struct xen_netif_extra_info *gso)
1049{
1050 if (!gso->u.gso.size) {
1051 if (net_ratelimit())
383eda32 1052 pr_warn("GSO size must not be zero\n");
0d160211
JF
1053 return -EINVAL;
1054 }
1055
2c0057de
PD
1056 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1057 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
0d160211 1058 if (net_ratelimit())
383eda32 1059 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
0d160211
JF
1060 return -EINVAL;
1061 }
1062
1063 skb_shinfo(skb)->gso_size = gso->u.gso.size;
2c0057de
PD
1064 skb_shinfo(skb)->gso_type =
1065 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1066 SKB_GSO_TCPV4 :
1067 SKB_GSO_TCPV6;
0d160211
JF
1068
1069 /* Header must be checked, and gso_segs computed. */
1070 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1071 skb_shinfo(skb)->gso_segs = 0;
1072
1073 return 0;
1074}
1075
a761129e
DZ
1076static int xennet_fill_frags(struct netfront_queue *queue,
1077 struct sk_buff *skb,
1078 struct sk_buff_head *list)
0d160211 1079{
2688fcb7 1080 RING_IDX cons = queue->rx.rsp_cons;
0d160211
JF
1081 struct sk_buff *nskb;
1082
1083 while ((nskb = __skb_dequeue(list))) {
8446066b 1084 struct xen_netif_rx_response rx;
01c68026 1085 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
0d160211 1086
8446066b
JG
1087 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1088
d472b3a6 1089 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
093b9c71 1090 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
0d160211 1091
d81c5054 1092 BUG_ON(pull_to < skb_headlen(skb));
093b9c71
JB
1093 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1094 }
ad4f15dc 1095 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
00b36850 1096 queue->rx.rsp_cons = ++cons + skb_queue_len(list);
ad4f15dc 1097 kfree_skb(nskb);
a761129e 1098 return -ENOENT;
ad4f15dc 1099 }
093b9c71 1100
d472b3a6
JG
1101 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1102 skb_frag_page(nfrag),
8446066b 1103 rx.offset, rx.status, PAGE_SIZE);
0d160211
JF
1104
1105 skb_shinfo(nskb)->nr_frags = 0;
1106 kfree_skb(nskb);
0d160211
JF
1107 }
1108
a761129e
DZ
1109 queue->rx.rsp_cons = cons;
1110
1111 return 0;
0d160211
JF
1112}
1113
e0ce4af9 1114static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
0d160211 1115{
b5cf66cd 1116 bool recalculate_partial_csum = false;
e0ce4af9
IC
1117
1118 /*
1119 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1120 * peers can fail to set NETRXF_csum_blank when sending a GSO
1121 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1122 * recalculate the partial checksum.
1123 */
1124 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1125 struct netfront_info *np = netdev_priv(dev);
2688fcb7 1126 atomic_inc(&np->rx_gso_checksum_fixup);
e0ce4af9 1127 skb->ip_summed = CHECKSUM_PARTIAL;
b5cf66cd 1128 recalculate_partial_csum = true;
e0ce4af9
IC
1129 }
1130
1131 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1132 if (skb->ip_summed != CHECKSUM_PARTIAL)
1133 return 0;
0d160211 1134
b5cf66cd 1135 return skb_checksum_setup(skb, recalculate_partial_csum);
0d160211
JF
1136}
1137
2688fcb7 1138static int handle_incoming_queue(struct netfront_queue *queue,
09f75cd7 1139 struct sk_buff_head *rxq)
0d160211 1140{
900e1833 1141 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
0d160211
JF
1142 int packets_dropped = 0;
1143 struct sk_buff *skb;
1144
1145 while ((skb = __skb_dequeue(rxq)) != NULL) {
3683243b 1146 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
0d160211 1147
093b9c71
JB
1148 if (pull_to > skb_headlen(skb))
1149 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
0d160211
JF
1150
1151 /* Ethernet work: Delayed to here as it peeks the header. */
2688fcb7 1152 skb->protocol = eth_type_trans(skb, queue->info->netdev);
d554f73d 1153 skb_reset_network_header(skb);
0d160211 1154
2688fcb7 1155 if (checksum_setup(queue->info->netdev, skb)) {
e0ce4af9
IC
1156 kfree_skb(skb);
1157 packets_dropped++;
2688fcb7 1158 queue->info->netdev->stats.rx_errors++;
e0ce4af9 1159 continue;
0d160211
JF
1160 }
1161
900e1833
DV
1162 u64_stats_update_begin(&rx_stats->syncp);
1163 rx_stats->packets++;
1164 rx_stats->bytes += skb->len;
1165 u64_stats_update_end(&rx_stats->syncp);
0d160211
JF
1166
1167 /* Pass it up. */
2688fcb7 1168 napi_gro_receive(&queue->napi, skb);
0d160211
JF
1169 }
1170
1171 return packets_dropped;
1172}
1173
bea3348e 1174static int xennet_poll(struct napi_struct *napi, int budget)
0d160211 1175{
2688fcb7
AB
1176 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1177 struct net_device *dev = queue->info->netdev;
0d160211
JF
1178 struct sk_buff *skb;
1179 struct netfront_rx_info rinfo;
1180 struct xen_netif_rx_response *rx = &rinfo.rx;
1181 struct xen_netif_extra_info *extras = rinfo.extras;
1182 RING_IDX i, rp;
bea3348e 1183 int work_done;
0d160211
JF
1184 struct sk_buff_head rxq;
1185 struct sk_buff_head errq;
1186 struct sk_buff_head tmpq;
0d160211 1187 int err;
6c5aa6fc 1188 bool need_xdp_flush = false;
0d160211 1189
2688fcb7 1190 spin_lock(&queue->rx_lock);
0d160211 1191
0d160211
JF
1192 skb_queue_head_init(&rxq);
1193 skb_queue_head_init(&errq);
1194 skb_queue_head_init(&tmpq);
1195
2688fcb7 1196 rp = queue->rx.sring->rsp_prod;
a884daa6
JG
1197 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1198 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1199 rp - queue->rx.rsp_cons);
1200 queue->info->broken = true;
1201 spin_unlock(&queue->rx_lock);
1202 return 0;
1203 }
0d160211
JF
1204 rmb(); /* Ensure we see queued responses up to 'rp'. */
1205
2688fcb7 1206 i = queue->rx.rsp_cons;
0d160211
JF
1207 work_done = 0;
1208 while ((i != rp) && (work_done < budget)) {
8446066b 1209 RING_COPY_RESPONSE(&queue->rx, i, rx);
0d160211
JF
1210 memset(extras, 0, sizeof(rinfo.extras));
1211
6c5aa6fc
DK
1212 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1213 &need_xdp_flush);
0d160211
JF
1214
1215 if (unlikely(err)) {
1216err:
1217 while ((skb = __skb_dequeue(&tmpq)))
1218 __skb_queue_tail(&errq, skb);
09f75cd7 1219 dev->stats.rx_errors++;
2688fcb7 1220 i = queue->rx.rsp_cons;
0d160211
JF
1221 continue;
1222 }
1223
1224 skb = __skb_dequeue(&tmpq);
1225
1226 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1227 struct xen_netif_extra_info *gso;
1228 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1229
1230 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1231 __skb_queue_head(&tmpq, skb);
2688fcb7 1232 queue->rx.rsp_cons += skb_queue_len(&tmpq);
0d160211
JF
1233 goto err;
1234 }
1235 }
1236
3683243b
IC
1237 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1238 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1239 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
0d160211 1240
b54c9d5b 1241 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
3683243b
IC
1242 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1243 skb->data_len = rx->status;
093b9c71 1244 skb->len += rx->status;
0d160211 1245
a761129e 1246 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
ad4f15dc 1247 goto err;
0d160211 1248
f942dc25 1249 if (rx->flags & XEN_NETRXF_csum_blank)
0d160211 1250 skb->ip_summed = CHECKSUM_PARTIAL;
f942dc25 1251 else if (rx->flags & XEN_NETRXF_data_validated)
0d160211
JF
1252 skb->ip_summed = CHECKSUM_UNNECESSARY;
1253
1254 __skb_queue_tail(&rxq, skb);
1255
a761129e 1256 i = ++queue->rx.rsp_cons;
0d160211
JF
1257 work_done++;
1258 }
6c5aa6fc
DK
1259 if (need_xdp_flush)
1260 xdp_do_flush();
0d160211 1261
56cfe5d0 1262 __skb_queue_purge(&errq);
0d160211 1263
2688fcb7 1264 work_done -= handle_incoming_queue(queue, &rxq);
0d160211 1265
2688fcb7 1266 xennet_alloc_rx_buffers(queue);
0d160211 1267
0d160211 1268 if (work_done < budget) {
bea3348e
SH
1269 int more_to_do = 0;
1270
6ad20165 1271 napi_complete_done(napi, work_done);
0d160211 1272
2688fcb7 1273 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
6a6dc08f
DV
1274 if (more_to_do)
1275 napi_schedule(napi);
0d160211
JF
1276 }
1277
2688fcb7 1278 spin_unlock(&queue->rx_lock);
0d160211 1279
bea3348e 1280 return work_done;
0d160211
JF
1281}
1282
1283static int xennet_change_mtu(struct net_device *dev, int mtu)
1284{
0c36820e 1285 int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
0d160211
JF
1286
1287 if (mtu > max)
1288 return -EINVAL;
1289 dev->mtu = mtu;
1290 return 0;
1291}
1292
bc1f4470 1293static void xennet_get_stats64(struct net_device *dev,
1294 struct rtnl_link_stats64 *tot)
e00f85be 1295{
1296 struct netfront_info *np = netdev_priv(dev);
1297 int cpu;
1298
1299 for_each_possible_cpu(cpu) {
900e1833
DV
1300 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1301 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
e00f85be 1302 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1303 unsigned int start;
1304
1305 do {
900e1833
DV
1306 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1307 tx_packets = tx_stats->packets;
1308 tx_bytes = tx_stats->bytes;
1309 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
e00f85be 1310
900e1833
DV
1311 do {
1312 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1313 rx_packets = rx_stats->packets;
1314 rx_bytes = rx_stats->bytes;
1315 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
e00f85be 1316
1317 tot->rx_packets += rx_packets;
1318 tot->tx_packets += tx_packets;
1319 tot->rx_bytes += rx_bytes;
1320 tot->tx_bytes += tx_bytes;
1321 }
1322
1323 tot->rx_errors = dev->stats.rx_errors;
1324 tot->tx_dropped = dev->stats.tx_dropped;
e00f85be 1325}
1326
2688fcb7 1327static void xennet_release_tx_bufs(struct netfront_queue *queue)
0d160211
JF
1328{
1329 struct sk_buff *skb;
1330 int i;
1331
1332 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1333 /* Skip over entries which are actually freelist references */
21631d2d 1334 if (!queue->tx_skbs[i])
0d160211
JF
1335 continue;
1336
21631d2d
JG
1337 skb = queue->tx_skbs[i];
1338 queue->tx_skbs[i] = NULL;
2688fcb7
AB
1339 get_page(queue->grant_tx_page[i]);
1340 gnttab_end_foreign_access(queue->grant_tx_ref[i],
cefe0078 1341 GNTMAP_readonly,
2688fcb7
AB
1342 (unsigned long)page_address(queue->grant_tx_page[i]));
1343 queue->grant_tx_page[i] = NULL;
1344 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
21631d2d 1345 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
0d160211
JF
1346 dev_kfree_skb_irq(skb);
1347 }
1348}
1349
2688fcb7 1350static void xennet_release_rx_bufs(struct netfront_queue *queue)
0d160211 1351{
0d160211
JF
1352 int id, ref;
1353
2688fcb7 1354 spin_lock_bh(&queue->rx_lock);
0d160211
JF
1355
1356 for (id = 0; id < NET_RX_RING_SIZE; id++) {
cefe0078
AL
1357 struct sk_buff *skb;
1358 struct page *page;
0d160211 1359
2688fcb7 1360 skb = queue->rx_skbs[id];
cefe0078 1361 if (!skb)
0d160211 1362 continue;
0d160211 1363
2688fcb7 1364 ref = queue->grant_rx_ref[id];
cefe0078
AL
1365 if (ref == GRANT_INVALID_REF)
1366 continue;
0d160211 1367
cefe0078 1368 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
0d160211 1369
cefe0078
AL
1370 /* gnttab_end_foreign_access() needs a page ref until
1371 * foreign access is ended (which may be deferred).
1372 */
1373 get_page(page);
1374 gnttab_end_foreign_access(ref, 0,
1375 (unsigned long)page_address(page));
2688fcb7 1376 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
0d160211 1377
cefe0078 1378 kfree_skb(skb);
0d160211
JF
1379 }
1380
2688fcb7 1381 spin_unlock_bh(&queue->rx_lock);
0d160211
JF
1382}
1383
c8f44aff
MM
1384static netdev_features_t xennet_fix_features(struct net_device *dev,
1385 netdev_features_t features)
8f7b01a1
ED
1386{
1387 struct netfront_info *np = netdev_priv(dev);
8f7b01a1 1388
2890ea5c
JG
1389 if (features & NETIF_F_SG &&
1390 !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1391 features &= ~NETIF_F_SG;
8f7b01a1 1392
2890ea5c
JG
1393 if (features & NETIF_F_IPV6_CSUM &&
1394 !xenbus_read_unsigned(np->xbdev->otherend,
1395 "feature-ipv6-csum-offload", 0))
1396 features &= ~NETIF_F_IPV6_CSUM;
8f7b01a1 1397
2890ea5c
JG
1398 if (features & NETIF_F_TSO &&
1399 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1400 features &= ~NETIF_F_TSO;
8f7b01a1 1401
2890ea5c
JG
1402 if (features & NETIF_F_TSO6 &&
1403 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1404 features &= ~NETIF_F_TSO6;
2c0057de 1405
8f7b01a1
ED
1406 return features;
1407}
1408
c8f44aff
MM
1409static int xennet_set_features(struct net_device *dev,
1410 netdev_features_t features)
8f7b01a1
ED
1411{
1412 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1413 netdev_info(dev, "Reducing MTU because no SG offload");
1414 dev->mtu = ETH_DATA_LEN;
1415 }
1416
1417 return 0;
1418}
1419
d634bf2c 1420static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
cf66f9d4 1421{
2688fcb7 1422 struct netfront_queue *queue = dev_id;
cf66f9d4
KRW
1423 unsigned long flags;
1424
a884daa6
JG
1425 if (queue->info->broken)
1426 return IRQ_HANDLED;
1427
2688fcb7
AB
1428 spin_lock_irqsave(&queue->tx_lock, flags);
1429 xennet_tx_buf_gc(queue);
1430 spin_unlock_irqrestore(&queue->tx_lock, flags);
cf66f9d4 1431
d634bf2c
WL
1432 return IRQ_HANDLED;
1433}
1434
1435static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1436{
2688fcb7
AB
1437 struct netfront_queue *queue = dev_id;
1438 struct net_device *dev = queue->info->netdev;
d634bf2c 1439
a884daa6
JG
1440 if (queue->info->broken)
1441 return IRQ_HANDLED;
1442
d634bf2c 1443 if (likely(netif_carrier_ok(dev) &&
2688fcb7 1444 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
76541869 1445 napi_schedule(&queue->napi);
cf66f9d4 1446
d634bf2c
WL
1447 return IRQ_HANDLED;
1448}
cf66f9d4 1449
d634bf2c
WL
1450static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1451{
1452 xennet_tx_interrupt(irq, dev_id);
1453 xennet_rx_interrupt(irq, dev_id);
cf66f9d4
KRW
1454 return IRQ_HANDLED;
1455}
1456
1457#ifdef CONFIG_NET_POLL_CONTROLLER
1458static void xennet_poll_controller(struct net_device *dev)
1459{
2688fcb7
AB
1460 /* Poll each queue */
1461 struct netfront_info *info = netdev_priv(dev);
1462 unsigned int num_queues = dev->real_num_tx_queues;
1463 unsigned int i;
a884daa6
JG
1464
1465 if (info->broken)
1466 return;
1467
2688fcb7
AB
1468 for (i = 0; i < num_queues; ++i)
1469 xennet_interrupt(0, &info->queues[i]);
cf66f9d4
KRW
1470}
1471#endif
1472
6c5aa6fc
DK
1473#define NETBACK_XDP_HEADROOM_DISABLE 0
1474#define NETBACK_XDP_HEADROOM_ENABLE 1
1475
1476static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1477{
1478 int err;
1479 unsigned short headroom;
1480
1481 headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1482 err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1483 "xdp-headroom", "%hu",
1484 headroom);
1485 if (err)
1486 pr_warn("Error writing xdp-headroom\n");
1487
1488 return err;
1489}
1490
1491static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1492 struct netlink_ext_ack *extack)
1493{
1494 unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1495 struct netfront_info *np = netdev_priv(dev);
1496 struct bpf_prog *old_prog;
1497 unsigned int i, err;
1498
1499 if (dev->mtu > max_mtu) {
1500 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1501 return -EINVAL;
1502 }
1503
1504 if (!np->netback_has_xdp_headroom)
1505 return 0;
1506
1507 xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1508
1509 err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1510 NETBACK_XDP_HEADROOM_DISABLE);
1511 if (err)
1512 return err;
1513
1514 /* avoid the race with XDP headroom adjustment */
1515 wait_event(module_wq,
1516 xenbus_read_driver_state(np->xbdev->otherend) ==
1517 XenbusStateReconfigured);
1518 np->netfront_xdp_enabled = true;
1519
1520 old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1521
1522 if (prog)
1523 bpf_prog_add(prog, dev->real_num_tx_queues);
1524
1525 for (i = 0; i < dev->real_num_tx_queues; ++i)
1526 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1527
1528 if (old_prog)
1529 for (i = 0; i < dev->real_num_tx_queues; ++i)
1530 bpf_prog_put(old_prog);
1531
1532 xenbus_switch_state(np->xbdev, XenbusStateConnected);
1533
1534 return 0;
1535}
1536
6c5aa6fc
DK
1537static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1538{
a884daa6
JG
1539 struct netfront_info *np = netdev_priv(dev);
1540
1541 if (np->broken)
1542 return -ENODEV;
1543
6c5aa6fc
DK
1544 switch (xdp->command) {
1545 case XDP_SETUP_PROG:
1546 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
6c5aa6fc
DK
1547 default:
1548 return -EINVAL;
1549 }
1550}
1551
0a0b9d2e
SH
1552static const struct net_device_ops xennet_netdev_ops = {
1553 .ndo_open = xennet_open,
0a0b9d2e
SH
1554 .ndo_stop = xennet_close,
1555 .ndo_start_xmit = xennet_start_xmit,
1556 .ndo_change_mtu = xennet_change_mtu,
e00f85be 1557 .ndo_get_stats64 = xennet_get_stats64,
0a0b9d2e
SH
1558 .ndo_set_mac_address = eth_mac_addr,
1559 .ndo_validate_addr = eth_validate_addr,
fb507934
MM
1560 .ndo_fix_features = xennet_fix_features,
1561 .ndo_set_features = xennet_set_features,
2688fcb7 1562 .ndo_select_queue = xennet_select_queue,
6c5aa6fc
DK
1563 .ndo_bpf = xennet_xdp,
1564 .ndo_xdp_xmit = xennet_xdp_xmit,
cf66f9d4
KRW
1565#ifdef CONFIG_NET_POLL_CONTROLLER
1566 .ndo_poll_controller = xennet_poll_controller,
1567#endif
0a0b9d2e
SH
1568};
1569
900e1833
DV
1570static void xennet_free_netdev(struct net_device *netdev)
1571{
1572 struct netfront_info *np = netdev_priv(netdev);
1573
1574 free_percpu(np->rx_stats);
1575 free_percpu(np->tx_stats);
1576 free_netdev(netdev);
1577}
1578
8e0e46bb 1579static struct net_device *xennet_create_dev(struct xenbus_device *dev)
0d160211 1580{
2688fcb7 1581 int err;
0d160211
JF
1582 struct net_device *netdev;
1583 struct netfront_info *np;
1584
50ee6061 1585 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
41de8d4c 1586 if (!netdev)
0d160211 1587 return ERR_PTR(-ENOMEM);
0d160211
JF
1588
1589 np = netdev_priv(netdev);
1590 np->xbdev = dev;
1591
2688fcb7 1592 np->queues = NULL;
0d160211 1593
e00f85be 1594 err = -ENOMEM;
900e1833
DV
1595 np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1596 if (np->rx_stats == NULL)
1597 goto exit;
1598 np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1599 if (np->tx_stats == NULL)
e00f85be 1600 goto exit;
1601
0a0b9d2e
SH
1602 netdev->netdev_ops = &xennet_netdev_ops;
1603
fb507934
MM
1604 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1605 NETIF_F_GSO_ROBUST;
2c0057de
PD
1606 netdev->hw_features = NETIF_F_SG |
1607 NETIF_F_IPV6_CSUM |
1608 NETIF_F_TSO | NETIF_F_TSO6;
0d160211 1609
fc3e5941
IC
1610 /*
1611 * Assume that all hw features are available for now. This set
1612 * will be adjusted by the call to netdev_update_features() in
1613 * xennet_connect() which is the earliest point where we can
1614 * negotiate with the backend regarding supported features.
1615 */
1616 netdev->features |= netdev->hw_features;
1617
7ad24ea4 1618 netdev->ethtool_ops = &xennet_ethtool_ops;
e1043a4b 1619 netdev->min_mtu = ETH_MIN_MTU;
d0c2c997 1620 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
0d160211
JF
1621 SET_NETDEV_DEV(netdev, &dev->dev);
1622
1623 np->netdev = netdev;
6c5aa6fc 1624 np->netfront_xdp_enabled = false;
0d160211
JF
1625
1626 netif_carrier_off(netdev);
1627
c2c63310
AR
1628 do {
1629 xenbus_switch_state(dev, XenbusStateInitialising);
1630 err = wait_event_timeout(module_wq,
1631 xenbus_read_driver_state(dev->otherend) !=
1632 XenbusStateClosed &&
1633 xenbus_read_driver_state(dev->otherend) !=
1634 XenbusStateUnknown, XENNET_TIMEOUT);
1635 } while (!err);
1636
0d160211
JF
1637 return netdev;
1638
0d160211 1639 exit:
900e1833 1640 xennet_free_netdev(netdev);
0d160211
JF
1641 return ERR_PTR(err);
1642}
1643
80708602 1644/*
0d160211
JF
1645 * Entry point to this code when a new device is created. Allocate the basic
1646 * structures and the ring buffers for communication with the backend, and
1647 * inform the backend of the appropriate details for those.
1648 */
8e0e46bb 1649static int netfront_probe(struct xenbus_device *dev,
1dd06ae8 1650 const struct xenbus_device_id *id)
0d160211
JF
1651{
1652 int err;
1653 struct net_device *netdev;
1654 struct netfront_info *info;
1655
1656 netdev = xennet_create_dev(dev);
1657 if (IS_ERR(netdev)) {
1658 err = PTR_ERR(netdev);
1659 xenbus_dev_fatal(dev, err, "creating netdev");
1660 return err;
1661 }
1662
1663 info = netdev_priv(netdev);
1b713e00 1664 dev_set_drvdata(&dev->dev, info);
27b917e5
TI
1665#ifdef CONFIG_SYSFS
1666 info->netdev->sysfs_groups[0] = &xennet_dev_group;
1667#endif
0d160211 1668
0d160211 1669 return 0;
0d160211
JF
1670}
1671
1672static void xennet_end_access(int ref, void *page)
1673{
1674 /* This frees the page as a side-effect */
1675 if (ref != GRANT_INVALID_REF)
1676 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1677}
1678
1679static void xennet_disconnect_backend(struct netfront_info *info)
1680{
2688fcb7 1681 unsigned int i = 0;
2688fcb7
AB
1682 unsigned int num_queues = info->netdev->real_num_tx_queues;
1683
f9feb1e6
DV
1684 netif_carrier_off(info->netdev);
1685
9a873c71 1686 for (i = 0; i < num_queues && info->queues; ++i) {
76541869
DV
1687 struct netfront_queue *queue = &info->queues[i];
1688
74470954
BO
1689 del_timer_sync(&queue->rx_refill_timer);
1690
2688fcb7
AB
1691 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1692 unbind_from_irqhandler(queue->tx_irq, queue);
1693 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1694 unbind_from_irqhandler(queue->tx_irq, queue);
1695 unbind_from_irqhandler(queue->rx_irq, queue);
1696 }
1697 queue->tx_evtchn = queue->rx_evtchn = 0;
1698 queue->tx_irq = queue->rx_irq = 0;
0d160211 1699
274b0455
CW
1700 if (netif_running(info->netdev))
1701 napi_synchronize(&queue->napi);
f9feb1e6 1702
a5b5dc3c
DV
1703 xennet_release_tx_bufs(queue);
1704 xennet_release_rx_bufs(queue);
1705 gnttab_free_grant_references(queue->gref_tx_head);
1706 gnttab_free_grant_references(queue->gref_rx_head);
1707
2688fcb7
AB
1708 /* End access and free the pages */
1709 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1710 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
0d160211 1711
2688fcb7
AB
1712 queue->tx_ring_ref = GRANT_INVALID_REF;
1713 queue->rx_ring_ref = GRANT_INVALID_REF;
1714 queue->tx.sring = NULL;
1715 queue->rx.sring = NULL;
6c5aa6fc
DK
1716
1717 page_pool_destroy(queue->page_pool);
2688fcb7 1718 }
0d160211
JF
1719}
1720
80708602 1721/*
0d160211
JF
1722 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1723 * driver restart. We tear down our netif structure and recreate it, but
1724 * leave the device-layer structures intact so that this is transparent to the
1725 * rest of the kernel.
1726 */
1727static int netfront_resume(struct xenbus_device *dev)
1728{
1b713e00 1729 struct netfront_info *info = dev_get_drvdata(&dev->dev);
0d160211
JF
1730
1731 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1732
1733 xennet_disconnect_backend(info);
1734 return 0;
1735}
1736
1737static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1738{
1739 char *s, *e, *macstr;
1740 int i;
1741
1742 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1743 if (IS_ERR(macstr))
1744 return PTR_ERR(macstr);
1745
1746 for (i = 0; i < ETH_ALEN; i++) {
1747 mac[i] = simple_strtoul(s, &e, 16);
1748 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1749 kfree(macstr);
1750 return -ENOENT;
1751 }
1752 s = e+1;
1753 }
1754
1755 kfree(macstr);
1756 return 0;
1757}
1758
2688fcb7 1759static int setup_netfront_single(struct netfront_queue *queue)
d634bf2c
WL
1760{
1761 int err;
1762
2688fcb7 1763 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
d634bf2c
WL
1764 if (err < 0)
1765 goto fail;
1766
2688fcb7 1767 err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
d634bf2c 1768 xennet_interrupt,
2688fcb7 1769 0, queue->info->netdev->name, queue);
d634bf2c
WL
1770 if (err < 0)
1771 goto bind_fail;
2688fcb7
AB
1772 queue->rx_evtchn = queue->tx_evtchn;
1773 queue->rx_irq = queue->tx_irq = err;
d634bf2c
WL
1774
1775 return 0;
1776
1777bind_fail:
2688fcb7
AB
1778 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1779 queue->tx_evtchn = 0;
d634bf2c
WL
1780fail:
1781 return err;
1782}
1783
2688fcb7 1784static int setup_netfront_split(struct netfront_queue *queue)
d634bf2c
WL
1785{
1786 int err;
1787
2688fcb7 1788 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
d634bf2c
WL
1789 if (err < 0)
1790 goto fail;
2688fcb7 1791 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
d634bf2c
WL
1792 if (err < 0)
1793 goto alloc_rx_evtchn_fail;
1794
2688fcb7
AB
1795 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1796 "%s-tx", queue->name);
1797 err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
d634bf2c 1798 xennet_tx_interrupt,
2688fcb7 1799 0, queue->tx_irq_name, queue);
d634bf2c
WL
1800 if (err < 0)
1801 goto bind_tx_fail;
2688fcb7 1802 queue->tx_irq = err;
d634bf2c 1803
2688fcb7
AB
1804 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1805 "%s-rx", queue->name);
1806 err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
d634bf2c 1807 xennet_rx_interrupt,
2688fcb7 1808 0, queue->rx_irq_name, queue);
d634bf2c
WL
1809 if (err < 0)
1810 goto bind_rx_fail;
2688fcb7 1811 queue->rx_irq = err;
d634bf2c
WL
1812
1813 return 0;
1814
1815bind_rx_fail:
2688fcb7
AB
1816 unbind_from_irqhandler(queue->tx_irq, queue);
1817 queue->tx_irq = 0;
d634bf2c 1818bind_tx_fail:
2688fcb7
AB
1819 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1820 queue->rx_evtchn = 0;
d634bf2c 1821alloc_rx_evtchn_fail:
2688fcb7
AB
1822 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1823 queue->tx_evtchn = 0;
d634bf2c
WL
1824fail:
1825 return err;
1826}
1827
2688fcb7
AB
1828static int setup_netfront(struct xenbus_device *dev,
1829 struct netfront_queue *queue, unsigned int feature_split_evtchn)
0d160211
JF
1830{
1831 struct xen_netif_tx_sring *txs;
1832 struct xen_netif_rx_sring *rxs;
ccc9d90a 1833 grant_ref_t gref;
0d160211 1834 int err;
0d160211 1835
2688fcb7
AB
1836 queue->tx_ring_ref = GRANT_INVALID_REF;
1837 queue->rx_ring_ref = GRANT_INVALID_REF;
1838 queue->rx.sring = NULL;
1839 queue->tx.sring = NULL;
0d160211 1840
a144ff09 1841 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
0d160211
JF
1842 if (!txs) {
1843 err = -ENOMEM;
1844 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1845 goto fail;
1846 }
1847 SHARED_RING_INIT(txs);
30c5d7f0 1848 FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
0d160211 1849
ccc9d90a 1850 err = xenbus_grant_ring(dev, txs, 1, &gref);
1ca2983a
WL
1851 if (err < 0)
1852 goto grant_tx_ring_fail;
ccc9d90a 1853 queue->tx_ring_ref = gref;
0d160211 1854
a144ff09 1855 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
0d160211
JF
1856 if (!rxs) {
1857 err = -ENOMEM;
1858 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1ca2983a 1859 goto alloc_rx_ring_fail;
0d160211
JF
1860 }
1861 SHARED_RING_INIT(rxs);
30c5d7f0 1862 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
0d160211 1863
ccc9d90a 1864 err = xenbus_grant_ring(dev, rxs, 1, &gref);
1ca2983a
WL
1865 if (err < 0)
1866 goto grant_rx_ring_fail;
ccc9d90a 1867 queue->rx_ring_ref = gref;
0d160211 1868
d634bf2c 1869 if (feature_split_evtchn)
2688fcb7 1870 err = setup_netfront_split(queue);
d634bf2c
WL
1871 /* setup single event channel if
1872 * a) feature-split-event-channels == 0
1873 * b) feature-split-event-channels == 1 but failed to setup
1874 */
e93fac3b 1875 if (!feature_split_evtchn || err)
2688fcb7 1876 err = setup_netfront_single(queue);
d634bf2c 1877
0d160211 1878 if (err)
1ca2983a 1879 goto alloc_evtchn_fail;
0d160211 1880
0d160211
JF
1881 return 0;
1882
1ca2983a
WL
1883 /* If we fail to setup netfront, it is safe to just revoke access to
1884 * granted pages because backend is not accessing it at this point.
1885 */
1ca2983a 1886alloc_evtchn_fail:
2688fcb7 1887 gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1ca2983a
WL
1888grant_rx_ring_fail:
1889 free_page((unsigned long)rxs);
1890alloc_rx_ring_fail:
2688fcb7 1891 gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1ca2983a
WL
1892grant_tx_ring_fail:
1893 free_page((unsigned long)txs);
1894fail:
0d160211
JF
1895 return err;
1896}
1897
2688fcb7
AB
1898/* Queue-specific initialisation
1899 * This used to be done in xennet_create_dev() but must now
1900 * be run per-queue.
1901 */
1902static int xennet_init_queue(struct netfront_queue *queue)
1903{
1904 unsigned short i;
1905 int err = 0;
21f2706b 1906 char *devid;
2688fcb7
AB
1907
1908 spin_lock_init(&queue->tx_lock);
1909 spin_lock_init(&queue->rx_lock);
1910
e99e88a9 1911 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2688fcb7 1912
21f2706b
XL
1913 devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
1914 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1915 devid, queue->id);
8b715010 1916
21631d2d 1917 /* Initialise tx_skb_freelist as a free chain containing every entry. */
2688fcb7 1918 queue->tx_skb_freelist = 0;
a884daa6 1919 queue->tx_pend_queue = TX_LINK_NONE;
2688fcb7 1920 for (i = 0; i < NET_TX_RING_SIZE; i++) {
21631d2d 1921 queue->tx_link[i] = i + 1;
2688fcb7
AB
1922 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1923 queue->grant_tx_page[i] = NULL;
1924 }
21631d2d 1925 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2688fcb7
AB
1926
1927 /* Clear out rx_skbs */
1928 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1929 queue->rx_skbs[i] = NULL;
1930 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1931 }
1932
1933 /* A grant for every tx ring slot */
1f3c2eba 1934 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2688fcb7
AB
1935 &queue->gref_tx_head) < 0) {
1936 pr_alert("can't alloc tx grant refs\n");
1937 err = -ENOMEM;
1938 goto exit;
1939 }
1940
1941 /* A grant for every rx ring slot */
1f3c2eba 1942 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2688fcb7
AB
1943 &queue->gref_rx_head) < 0) {
1944 pr_alert("can't alloc rx grant refs\n");
1945 err = -ENOMEM;
1946 goto exit_free_tx;
1947 }
1948
2688fcb7
AB
1949 return 0;
1950
1951 exit_free_tx:
1952 gnttab_free_grant_references(queue->gref_tx_head);
1953 exit:
1954 return err;
1955}
1956
50ee6061
AB
1957static int write_queue_xenstore_keys(struct netfront_queue *queue,
1958 struct xenbus_transaction *xbt, int write_hierarchical)
1959{
1960 /* Write the queue-specific keys into XenStore in the traditional
1961 * way for a single queue, or in a queue subkeys for multiple
1962 * queues.
1963 */
1964 struct xenbus_device *dev = queue->info->xbdev;
1965 int err;
1966 const char *message;
1967 char *path;
1968 size_t pathsize;
1969
1970 /* Choose the correct place to write the keys */
1971 if (write_hierarchical) {
1972 pathsize = strlen(dev->nodename) + 10;
1973 path = kzalloc(pathsize, GFP_KERNEL);
1974 if (!path) {
1975 err = -ENOMEM;
1976 message = "out of memory while writing ring references";
1977 goto error;
1978 }
1979 snprintf(path, pathsize, "%s/queue-%u",
1980 dev->nodename, queue->id);
1981 } else {
1982 path = (char *)dev->nodename;
1983 }
1984
1985 /* Write ring references */
1986 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1987 queue->tx_ring_ref);
1988 if (err) {
1989 message = "writing tx-ring-ref";
1990 goto error;
1991 }
1992
1993 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1994 queue->rx_ring_ref);
1995 if (err) {
1996 message = "writing rx-ring-ref";
1997 goto error;
1998 }
1999
2000 /* Write event channels; taking into account both shared
2001 * and split event channel scenarios.
2002 */
2003 if (queue->tx_evtchn == queue->rx_evtchn) {
2004 /* Shared event channel */
2005 err = xenbus_printf(*xbt, path,
2006 "event-channel", "%u", queue->tx_evtchn);
2007 if (err) {
2008 message = "writing event-channel";
2009 goto error;
2010 }
2011 } else {
2012 /* Split event channels */
2013 err = xenbus_printf(*xbt, path,
2014 "event-channel-tx", "%u", queue->tx_evtchn);
2015 if (err) {
2016 message = "writing event-channel-tx";
2017 goto error;
2018 }
2019
2020 err = xenbus_printf(*xbt, path,
2021 "event-channel-rx", "%u", queue->rx_evtchn);
2022 if (err) {
2023 message = "writing event-channel-rx";
2024 goto error;
2025 }
2026 }
2027
2028 if (write_hierarchical)
2029 kfree(path);
2030 return 0;
2031
2032error:
2033 if (write_hierarchical)
2034 kfree(path);
2035 xenbus_dev_fatal(dev, err, "%s", message);
2036 return err;
2037}
2038
ce58725f
DV
2039static void xennet_destroy_queues(struct netfront_info *info)
2040{
2041 unsigned int i;
2042
ce58725f
DV
2043 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
2044 struct netfront_queue *queue = &info->queues[i];
2045
2046 if (netif_running(info->netdev))
2047 napi_disable(&queue->napi);
2048 netif_napi_del(&queue->napi);
2049 }
2050
ce58725f
DV
2051 kfree(info->queues);
2052 info->queues = NULL;
2053}
2054
6c5aa6fc
DK
2055
2056
2057static int xennet_create_page_pool(struct netfront_queue *queue)
2058{
2059 int err;
2060 struct page_pool_params pp_params = {
2061 .order = 0,
2062 .flags = 0,
2063 .pool_size = NET_RX_RING_SIZE,
2064 .nid = NUMA_NO_NODE,
2065 .dev = &queue->info->netdev->dev,
2066 .offset = XDP_PACKET_HEADROOM,
2067 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2068 };
2069
2070 queue->page_pool = page_pool_create(&pp_params);
2071 if (IS_ERR(queue->page_pool)) {
2072 err = PTR_ERR(queue->page_pool);
2073 queue->page_pool = NULL;
2074 return err;
2075 }
2076
2077 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
b02e5a0e 2078 queue->id, 0);
6c5aa6fc
DK
2079 if (err) {
2080 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2081 goto err_free_pp;
2082 }
2083
2084 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2085 MEM_TYPE_PAGE_POOL, queue->page_pool);
2086 if (err) {
2087 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2088 goto err_unregister_rxq;
2089 }
2090 return 0;
2091
2092err_unregister_rxq:
2093 xdp_rxq_info_unreg(&queue->xdp_rxq);
2094err_free_pp:
2095 page_pool_destroy(queue->page_pool);
2096 queue->page_pool = NULL;
2097 return err;
2098}
2099
ce58725f 2100static int xennet_create_queues(struct netfront_info *info,
ca88ea12 2101 unsigned int *num_queues)
ce58725f
DV
2102{
2103 unsigned int i;
2104 int ret;
2105
ca88ea12 2106 info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
ce58725f
DV
2107 GFP_KERNEL);
2108 if (!info->queues)
2109 return -ENOMEM;
2110
ca88ea12 2111 for (i = 0; i < *num_queues; i++) {
ce58725f
DV
2112 struct netfront_queue *queue = &info->queues[i];
2113
2114 queue->id = i;
2115 queue->info = info;
2116
2117 ret = xennet_init_queue(queue);
2118 if (ret < 0) {
f599c64f 2119 dev_warn(&info->xbdev->dev,
69cb8524 2120 "only created %d queues\n", i);
ca88ea12 2121 *num_queues = i;
ce58725f
DV
2122 break;
2123 }
2124
6c5aa6fc
DK
2125 /* use page pool recycling instead of buddy allocator */
2126 ret = xennet_create_page_pool(queue);
2127 if (ret < 0) {
2128 dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2129 *num_queues = i;
2130 return ret;
2131 }
2132
ce58725f
DV
2133 netif_napi_add(queue->info->netdev, &queue->napi,
2134 xennet_poll, 64);
2135 if (netif_running(info->netdev))
2136 napi_enable(&queue->napi);
2137 }
2138
ca88ea12 2139 netif_set_real_num_tx_queues(info->netdev, *num_queues);
ce58725f 2140
ca88ea12 2141 if (*num_queues == 0) {
f599c64f 2142 dev_err(&info->xbdev->dev, "no queues\n");
ce58725f
DV
2143 return -EINVAL;
2144 }
2145 return 0;
2146}
2147
0d160211 2148/* Common code used when first setting up, and when resuming. */
f502bf2b 2149static int talk_to_netback(struct xenbus_device *dev,
0d160211
JF
2150 struct netfront_info *info)
2151{
2152 const char *message;
2153 struct xenbus_transaction xbt;
2154 int err;
2688fcb7
AB
2155 unsigned int feature_split_evtchn;
2156 unsigned int i = 0;
50ee6061 2157 unsigned int max_queues = 0;
2688fcb7
AB
2158 struct netfront_queue *queue = NULL;
2159 unsigned int num_queues = 1;
0d160211 2160
2688fcb7
AB
2161 info->netdev->irq = 0;
2162
50ee6061 2163 /* Check if backend supports multiple queues */
2890ea5c
JG
2164 max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2165 "multi-queue-max-queues", 1);
50ee6061
AB
2166 num_queues = min(max_queues, xennet_max_queues);
2167
2688fcb7 2168 /* Check feature-split-event-channels */
2890ea5c
JG
2169 feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2170 "feature-split-event-channels", 0);
2688fcb7
AB
2171
2172 /* Read mac addr. */
2173 err = xen_net_read_mac(dev, info->netdev->dev_addr);
2174 if (err) {
2175 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
cb257783 2176 goto out_unlocked;
2688fcb7
AB
2177 }
2178
6c5aa6fc
DK
2179 info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2180 "feature-xdp-headroom", 0);
2181 if (info->netback_has_xdp_headroom) {
2182 /* set the current xen-netfront xdp state */
2183 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2184 NETBACK_XDP_HEADROOM_ENABLE :
2185 NETBACK_XDP_HEADROOM_DISABLE);
2186 if (err)
2187 goto out_unlocked;
2188 }
2189
f599c64f 2190 rtnl_lock();
ce58725f
DV
2191 if (info->queues)
2192 xennet_destroy_queues(info);
2193
a884daa6
JG
2194 /* For the case of a reconnect reset the "broken" indicator. */
2195 info->broken = false;
2196
ca88ea12 2197 err = xennet_create_queues(info, &num_queues);
e2e004ac
RL
2198 if (err < 0) {
2199 xenbus_dev_fatal(dev, err, "creating queues");
2200 kfree(info->queues);
2201 info->queues = NULL;
2202 goto out;
2203 }
f599c64f 2204 rtnl_unlock();
2688fcb7
AB
2205
2206 /* Create shared ring, alloc event channel -- for each queue */
2207 for (i = 0; i < num_queues; ++i) {
2208 queue = &info->queues[i];
2688fcb7 2209 err = setup_netfront(dev, queue, feature_split_evtchn);
e2e004ac
RL
2210 if (err)
2211 goto destroy_ring;
2688fcb7 2212 }
0d160211
JF
2213
2214again:
2215 err = xenbus_transaction_start(&xbt);
2216 if (err) {
2217 xenbus_dev_fatal(dev, err, "starting transaction");
2218 goto destroy_ring;
2219 }
2220
812494d9 2221 if (xenbus_exists(XBT_NIL,
2222 info->xbdev->otherend, "multi-queue-max-queues")) {
50ee6061 2223 /* Write the number of queues */
812494d9 2224 err = xenbus_printf(xbt, dev->nodename,
2225 "multi-queue-num-queues", "%u", num_queues);
d634bf2c 2226 if (err) {
50ee6061
AB
2227 message = "writing multi-queue-num-queues";
2228 goto abort_transaction_no_dev_fatal;
d634bf2c 2229 }
812494d9 2230 }
50ee6061 2231
812494d9 2232 if (num_queues == 1) {
2233 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2234 if (err)
2235 goto abort_transaction_no_dev_fatal;
2236 } else {
50ee6061
AB
2237 /* Write the keys for each queue */
2238 for (i = 0; i < num_queues; ++i) {
2239 queue = &info->queues[i];
2240 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2241 if (err)
2242 goto abort_transaction_no_dev_fatal;
d634bf2c 2243 }
0d160211
JF
2244 }
2245
50ee6061 2246 /* The remaining keys are not queue-specific */
0d160211
JF
2247 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2248 1);
2249 if (err) {
2250 message = "writing request-rx-copy";
2251 goto abort_transaction;
2252 }
2253
2254 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2255 if (err) {
2256 message = "writing feature-rx-notify";
2257 goto abort_transaction;
2258 }
2259
2260 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2261 if (err) {
2262 message = "writing feature-sg";
2263 goto abort_transaction;
2264 }
2265
2266 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2267 if (err) {
2268 message = "writing feature-gso-tcpv4";
2269 goto abort_transaction;
2270 }
2271
2c0057de
PD
2272 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2273 if (err) {
2274 message = "writing feature-gso-tcpv6";
2275 goto abort_transaction;
2276 }
2277
2278 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2279 "1");
2280 if (err) {
2281 message = "writing feature-ipv6-csum-offload";
2282 goto abort_transaction;
2283 }
2284
0d160211
JF
2285 err = xenbus_transaction_end(xbt, 0);
2286 if (err) {
2287 if (err == -EAGAIN)
2288 goto again;
2289 xenbus_dev_fatal(dev, err, "completing transaction");
2290 goto destroy_ring;
2291 }
2292
2293 return 0;
2294
2295 abort_transaction:
0d160211 2296 xenbus_dev_fatal(dev, err, "%s", message);
50ee6061
AB
2297abort_transaction_no_dev_fatal:
2298 xenbus_transaction_end(xbt, 1);
0d160211
JF
2299 destroy_ring:
2300 xennet_disconnect_backend(info);
f599c64f 2301 rtnl_lock();
e2e004ac 2302 xennet_destroy_queues(info);
0d160211 2303 out:
f599c64f 2304 rtnl_unlock();
cb257783 2305out_unlocked:
d86b5672 2306 device_unregister(&dev->dev);
0d160211
JF
2307 return err;
2308}
2309
0d160211
JF
2310static int xennet_connect(struct net_device *dev)
2311{
2312 struct netfront_info *np = netdev_priv(dev);
2688fcb7 2313 unsigned int num_queues = 0;
a5b5dc3c 2314 int err;
2688fcb7
AB
2315 unsigned int j = 0;
2316 struct netfront_queue *queue = NULL;
0d160211 2317
2890ea5c 2318 if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
0d160211 2319 dev_info(&dev->dev,
898eb71c 2320 "backend does not support copying receive path\n");
0d160211
JF
2321 return -ENODEV;
2322 }
2323
f502bf2b 2324 err = talk_to_netback(np->xbdev, np);
0d160211
JF
2325 if (err)
2326 return err;
6c5aa6fc
DK
2327 if (np->netback_has_xdp_headroom)
2328 pr_info("backend supports XDP headroom\n");
0d160211 2329
2688fcb7
AB
2330 /* talk_to_netback() sets the correct number of queues */
2331 num_queues = dev->real_num_tx_queues;
2332
f599c64f
RL
2333 if (dev->reg_state == NETREG_UNINITIALIZED) {
2334 err = register_netdev(dev);
2335 if (err) {
2336 pr_warn("%s: register_netdev err=%d\n", __func__, err);
2337 device_unregister(&np->xbdev->dev);
2338 return err;
2339 }
2340 }
2341
45c8184c
RL
2342 rtnl_lock();
2343 netdev_update_features(dev);
2344 rtnl_unlock();
2345
0d160211 2346 /*
a5b5dc3c 2347 * All public and private state should now be sane. Get
0d160211
JF
2348 * ready to start sending and receiving packets and give the driver
2349 * domain a kick because we've probably just requeued some
2350 * packets.
2351 */
2352 netif_carrier_on(np->netdev);
2688fcb7
AB
2353 for (j = 0; j < num_queues; ++j) {
2354 queue = &np->queues[j];
f50b4076 2355
2688fcb7
AB
2356 notify_remote_via_irq(queue->tx_irq);
2357 if (queue->tx_irq != queue->rx_irq)
2358 notify_remote_via_irq(queue->rx_irq);
2688fcb7 2359
f50b4076
DV
2360 spin_lock_irq(&queue->tx_lock);
2361 xennet_tx_buf_gc(queue);
2688fcb7 2362 spin_unlock_irq(&queue->tx_lock);
f50b4076
DV
2363
2364 spin_lock_bh(&queue->rx_lock);
2365 xennet_alloc_rx_buffers(queue);
2688fcb7
AB
2366 spin_unlock_bh(&queue->rx_lock);
2367 }
0d160211
JF
2368
2369 return 0;
2370}
2371
80708602 2372/*
0d160211
JF
2373 * Callback received when the backend's state changes.
2374 */
f502bf2b 2375static void netback_changed(struct xenbus_device *dev,
0d160211
JF
2376 enum xenbus_state backend_state)
2377{
1b713e00 2378 struct netfront_info *np = dev_get_drvdata(&dev->dev);
0d160211
JF
2379 struct net_device *netdev = np->netdev;
2380
2381 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2382
8edfe2e9
JG
2383 wake_up_all(&module_wq);
2384
0d160211
JF
2385 switch (backend_state) {
2386 case XenbusStateInitialising:
2387 case XenbusStateInitialised:
b78c9512
NI
2388 case XenbusStateReconfiguring:
2389 case XenbusStateReconfigured:
0d160211 2390 case XenbusStateUnknown:
0d160211
JF
2391 break;
2392
2393 case XenbusStateInitWait:
2394 if (dev->state != XenbusStateInitialising)
2395 break;
2396 if (xennet_connect(netdev) != 0)
2397 break;
2398 xenbus_switch_state(dev, XenbusStateConnected);
08e34eb1
LE
2399 break;
2400
2401 case XenbusStateConnected:
ee89bab1 2402 netdev_notify_peers(netdev);
0d160211
JF
2403 break;
2404
bce3ea81
DV
2405 case XenbusStateClosed:
2406 if (dev->state == XenbusStateClosed)
2407 break;
df561f66 2408 fallthrough; /* Missed the backend's CLOSING state */
0d160211
JF
2409 case XenbusStateClosing:
2410 xenbus_frontend_closed(dev);
2411 break;
2412 }
2413}
2414
e0ce4af9
IC
2415static const struct xennet_stat {
2416 char name[ETH_GSTRING_LEN];
2417 u16 offset;
2418} xennet_stats[] = {
2419 {
2420 "rx_gso_checksum_fixup",
2421 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2422 },
2423};
2424
2425static int xennet_get_sset_count(struct net_device *dev, int string_set)
2426{
2427 switch (string_set) {
2428 case ETH_SS_STATS:
2429 return ARRAY_SIZE(xennet_stats);
2430 default:
2431 return -EINVAL;
2432 }
2433}
2434
2435static void xennet_get_ethtool_stats(struct net_device *dev,
2436 struct ethtool_stats *stats, u64 * data)
2437{
2438 void *np = netdev_priv(dev);
2439 int i;
2440
2441 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2688fcb7 2442 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
e0ce4af9
IC
2443}
2444
2445static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2446{
2447 int i;
2448
2449 switch (stringset) {
2450 case ETH_SS_STATS:
2451 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2452 memcpy(data + i * ETH_GSTRING_LEN,
2453 xennet_stats[i].name, ETH_GSTRING_LEN);
2454 break;
2455 }
2456}
2457
0fc0b732 2458static const struct ethtool_ops xennet_ethtool_ops =
0d160211 2459{
0d160211 2460 .get_link = ethtool_op_get_link,
e0ce4af9
IC
2461
2462 .get_sset_count = xennet_get_sset_count,
2463 .get_ethtool_stats = xennet_get_ethtool_stats,
2464 .get_strings = xennet_get_strings,
91ffb9d3 2465 .get_ts_info = ethtool_op_get_ts_info,
0d160211
JF
2466};
2467
2468#ifdef CONFIG_SYSFS
1f3c2eba
DV
2469static ssize_t show_rxbuf(struct device *dev,
2470 struct device_attribute *attr, char *buf)
0d160211 2471{
1f3c2eba 2472 return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
0d160211
JF
2473}
2474
1f3c2eba
DV
2475static ssize_t store_rxbuf(struct device *dev,
2476 struct device_attribute *attr,
2477 const char *buf, size_t len)
0d160211 2478{
0d160211 2479 char *endp;
0d160211
JF
2480
2481 if (!capable(CAP_NET_ADMIN))
2482 return -EPERM;
2483
8ed7ec13 2484 simple_strtoul(buf, &endp, 0);
0d160211
JF
2485 if (endp == buf)
2486 return -EBADMSG;
2487
1f3c2eba 2488 /* rxbuf_min and rxbuf_max are no longer configurable. */
0d160211 2489
0d160211
JF
2490 return len;
2491}
2492
d61e4038
JP
2493static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2494static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2495static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
0d160211 2496
27b917e5
TI
2497static struct attribute *xennet_dev_attrs[] = {
2498 &dev_attr_rxbuf_min.attr,
2499 &dev_attr_rxbuf_max.attr,
2500 &dev_attr_rxbuf_cur.attr,
2501 NULL
2502};
0d160211 2503
27b917e5
TI
2504static const struct attribute_group xennet_dev_group = {
2505 .attrs = xennet_dev_attrs
2506};
0d160211
JF
2507#endif /* CONFIG_SYSFS */
2508
c2c63310 2509static void xennet_bus_close(struct xenbus_device *dev)
0d160211 2510{
c2c63310 2511 int ret;
0d160211 2512
c2c63310
AR
2513 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2514 return;
2515 do {
5b5971df 2516 xenbus_switch_state(dev, XenbusStateClosing);
c2c63310
AR
2517 ret = wait_event_timeout(module_wq,
2518 xenbus_read_driver_state(dev->otherend) ==
2519 XenbusStateClosing ||
2520 xenbus_read_driver_state(dev->otherend) ==
2521 XenbusStateClosed ||
2522 xenbus_read_driver_state(dev->otherend) ==
2523 XenbusStateUnknown,
2524 XENNET_TIMEOUT);
2525 } while (!ret);
2526
2527 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2528 return;
5b5971df 2529
c2c63310 2530 do {
5b5971df 2531 xenbus_switch_state(dev, XenbusStateClosed);
c2c63310
AR
2532 ret = wait_event_timeout(module_wq,
2533 xenbus_read_driver_state(dev->otherend) ==
2534 XenbusStateClosed ||
2535 xenbus_read_driver_state(dev->otherend) ==
2536 XenbusStateUnknown,
2537 XENNET_TIMEOUT);
2538 } while (!ret);
2539}
2540
2541static int xennet_remove(struct xenbus_device *dev)
2542{
2543 struct netfront_info *info = dev_get_drvdata(&dev->dev);
5b5971df 2544
c2c63310 2545 xennet_bus_close(dev);
0d160211
JF
2546 xennet_disconnect_backend(info);
2547
f599c64f
RL
2548 if (info->netdev->reg_state == NETREG_REGISTERED)
2549 unregister_netdev(info->netdev);
6bc96d04 2550
f599c64f
RL
2551 if (info->queues) {
2552 rtnl_lock();
9a873c71 2553 xennet_destroy_queues(info);
f599c64f
RL
2554 rtnl_unlock();
2555 }
900e1833 2556 xennet_free_netdev(info->netdev);
0d160211
JF
2557
2558 return 0;
2559}
2560
95afae48
DV
2561static const struct xenbus_device_id netfront_ids[] = {
2562 { "vif" },
2563 { "" }
2564};
2565
2566static struct xenbus_driver netfront_driver = {
2567 .ids = netfront_ids,
0d160211 2568 .probe = netfront_probe,
8e0e46bb 2569 .remove = xennet_remove,
0d160211 2570 .resume = netfront_resume,
f502bf2b 2571 .otherend_changed = netback_changed,
95afae48 2572};
0d160211
JF
2573
2574static int __init netif_init(void)
2575{
6e833587 2576 if (!xen_domain())
0d160211
JF
2577 return -ENODEV;
2578
51c71a3b 2579 if (!xen_has_pv_nic_devices())
b9136d20
IM
2580 return -ENODEV;
2581
383eda32 2582 pr_info("Initialising Xen virtual ethernet driver\n");
0d160211 2583
034702a6 2584 /* Allow as many queues as there are CPUs inut max. 8 if user has not
32a84405
WL
2585 * specified a value.
2586 */
2587 if (xennet_max_queues == 0)
034702a6
JG
2588 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2589 num_online_cpus());
50ee6061 2590
ffb78a26 2591 return xenbus_register_frontend(&netfront_driver);
0d160211
JF
2592}
2593module_init(netif_init);
2594
2595
2596static void __exit netif_exit(void)
2597{
ffb78a26 2598 xenbus_unregister_driver(&netfront_driver);
0d160211
JF
2599}
2600module_exit(netif_exit);
2601
2602MODULE_DESCRIPTION("Xen virtual network device frontend");
2603MODULE_LICENSE("GPL");
d2f0c52b 2604MODULE_ALIAS("xen:vif");
4f93f09b 2605MODULE_ALIAS("xennet");