2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/tcp.h>
40 #include <linux/udp.h>
41 #include <linux/moduleparam.h>
43 #include <linux/slab.h>
46 #include <asm/xen/page.h>
48 #include <xen/xenbus.h>
49 #include <xen/events.h>
51 #include <xen/platform_pci.h>
52 #include <xen/grant_table.h>
54 #include <xen/interface/io/netif.h>
55 #include <xen/interface/memory.h>
56 #include <xen/interface/grant_table.h>
58 static const struct ethtool_ops xennet_ethtool_ops
;
64 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
66 #define RX_COPY_THRESHOLD 256
68 #define GRANT_INVALID_REF 0
70 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
71 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
72 #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
74 struct netfront_stats
{
79 struct u64_stats_sync syncp
;
82 struct netfront_info
{
83 struct list_head list
;
84 struct net_device
*netdev
;
86 struct napi_struct napi
;
89 struct xenbus_device
*xbdev
;
92 struct xen_netif_tx_front_ring tx
;
96 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
97 * are linked from tx_skb_freelist through skb_entry.link.
99 * NB. Freelist index entries are always going to be less than
100 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
101 * greater than PAGE_OFFSET: we use this property to distinguish
107 } tx_skbs
[NET_TX_RING_SIZE
];
108 grant_ref_t gref_tx_head
;
109 grant_ref_t grant_tx_ref
[NET_TX_RING_SIZE
];
110 unsigned tx_skb_freelist
;
112 spinlock_t rx_lock ____cacheline_aligned_in_smp
;
113 struct xen_netif_rx_front_ring rx
;
116 /* Receive-ring batched refills. */
117 #define RX_MIN_TARGET 8
118 #define RX_DFL_MIN_TARGET 64
119 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
120 unsigned rx_min_target
, rx_max_target
, rx_target
;
121 struct sk_buff_head rx_batch
;
123 struct timer_list rx_refill_timer
;
125 struct sk_buff
*rx_skbs
[NET_RX_RING_SIZE
];
126 grant_ref_t gref_rx_head
;
127 grant_ref_t grant_rx_ref
[NET_RX_RING_SIZE
];
129 unsigned long rx_pfn_array
[NET_RX_RING_SIZE
];
130 struct multicall_entry rx_mcl
[NET_RX_RING_SIZE
+1];
131 struct mmu_update rx_mmu
[NET_RX_RING_SIZE
];
134 struct netfront_stats __percpu
*stats
;
136 unsigned long rx_gso_checksum_fixup
;
139 struct netfront_rx_info
{
140 struct xen_netif_rx_response rx
;
141 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
- 1];
144 static void skb_entry_set_link(union skb_entry
*list
, unsigned short id
)
149 static int skb_entry_is_link(const union skb_entry
*list
)
151 BUILD_BUG_ON(sizeof(list
->skb
) != sizeof(list
->link
));
152 return (unsigned long)list
->skb
< PAGE_OFFSET
;
156 * Access macros for acquiring freeing slots in tx_skbs[].
159 static void add_id_to_freelist(unsigned *head
, union skb_entry
*list
,
162 skb_entry_set_link(&list
[id
], *head
);
166 static unsigned short get_id_from_freelist(unsigned *head
,
167 union skb_entry
*list
)
169 unsigned int id
= *head
;
170 *head
= list
[id
].link
;
174 static int xennet_rxidx(RING_IDX idx
)
176 return idx
& (NET_RX_RING_SIZE
- 1);
179 static struct sk_buff
*xennet_get_rx_skb(struct netfront_info
*np
,
182 int i
= xennet_rxidx(ri
);
183 struct sk_buff
*skb
= np
->rx_skbs
[i
];
184 np
->rx_skbs
[i
] = NULL
;
188 static grant_ref_t
xennet_get_rx_ref(struct netfront_info
*np
,
191 int i
= xennet_rxidx(ri
);
192 grant_ref_t ref
= np
->grant_rx_ref
[i
];
193 np
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
198 static int xennet_sysfs_addif(struct net_device
*netdev
);
199 static void xennet_sysfs_delif(struct net_device
*netdev
);
200 #else /* !CONFIG_SYSFS */
201 #define xennet_sysfs_addif(dev) (0)
202 #define xennet_sysfs_delif(dev) do { } while (0)
205 static bool xennet_can_sg(struct net_device
*dev
)
207 return dev
->features
& NETIF_F_SG
;
211 static void rx_refill_timeout(unsigned long data
)
213 struct net_device
*dev
= (struct net_device
*)data
;
214 struct netfront_info
*np
= netdev_priv(dev
);
215 napi_schedule(&np
->napi
);
218 static int netfront_tx_slot_available(struct netfront_info
*np
)
220 return (np
->tx
.req_prod_pvt
- np
->tx
.rsp_cons
) <
221 (TX_MAX_TARGET
- MAX_SKB_FRAGS
- 2);
224 static void xennet_maybe_wake_tx(struct net_device
*dev
)
226 struct netfront_info
*np
= netdev_priv(dev
);
228 if (unlikely(netif_queue_stopped(dev
)) &&
229 netfront_tx_slot_available(np
) &&
230 likely(netif_running(dev
)))
231 netif_wake_queue(dev
);
234 static void xennet_alloc_rx_buffers(struct net_device
*dev
)
237 struct netfront_info
*np
= netdev_priv(dev
);
240 int i
, batch_target
, notify
;
241 RING_IDX req_prod
= np
->rx
.req_prod_pvt
;
245 struct xen_netif_rx_request
*req
;
247 if (unlikely(!netif_carrier_ok(dev
)))
251 * Allocate skbuffs greedily, even though we batch updates to the
252 * receive ring. This creates a less bursty demand on the memory
253 * allocator, so should reduce the chance of failed allocation requests
254 * both for ourself and for other kernel subsystems.
256 batch_target
= np
->rx_target
- (req_prod
- np
->rx
.rsp_cons
);
257 for (i
= skb_queue_len(&np
->rx_batch
); i
< batch_target
; i
++) {
258 skb
= __netdev_alloc_skb(dev
, RX_COPY_THRESHOLD
+ NET_IP_ALIGN
,
259 GFP_ATOMIC
| __GFP_NOWARN
);
263 /* Align ip header to a 16 bytes boundary */
264 skb_reserve(skb
, NET_IP_ALIGN
);
266 page
= alloc_page(GFP_ATOMIC
| __GFP_NOWARN
);
270 /* Any skbuffs queued for refill? Force them out. */
273 /* Could not allocate any skbuffs. Try again later. */
274 mod_timer(&np
->rx_refill_timer
,
279 __skb_fill_page_desc(skb
, 0, page
, 0, 0);
280 skb_shinfo(skb
)->nr_frags
= 1;
281 __skb_queue_tail(&np
->rx_batch
, skb
);
284 /* Is the batch large enough to be worthwhile? */
285 if (i
< (np
->rx_target
/2)) {
286 if (req_prod
> np
->rx
.sring
->req_prod
)
291 /* Adjust our fill target if we risked running out of buffers. */
292 if (((req_prod
- np
->rx
.sring
->rsp_prod
) < (np
->rx_target
/ 4)) &&
293 ((np
->rx_target
*= 2) > np
->rx_max_target
))
294 np
->rx_target
= np
->rx_max_target
;
298 skb
= __skb_dequeue(&np
->rx_batch
);
304 id
= xennet_rxidx(req_prod
+ i
);
306 BUG_ON(np
->rx_skbs
[id
]);
307 np
->rx_skbs
[id
] = skb
;
309 ref
= gnttab_claim_grant_reference(&np
->gref_rx_head
);
310 BUG_ON((signed short)ref
< 0);
311 np
->grant_rx_ref
[id
] = ref
;
313 pfn
= page_to_pfn(skb_frag_page(&skb_shinfo(skb
)->frags
[0]));
314 vaddr
= page_address(skb_frag_page(&skb_shinfo(skb
)->frags
[0]));
316 req
= RING_GET_REQUEST(&np
->rx
, req_prod
+ i
);
317 gnttab_grant_foreign_access_ref(ref
,
318 np
->xbdev
->otherend_id
,
326 wmb(); /* barrier so backend seens requests */
328 /* Above is a suitable barrier to ensure backend will see requests. */
329 np
->rx
.req_prod_pvt
= req_prod
+ i
;
331 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np
->rx
, notify
);
333 notify_remote_via_irq(np
->netdev
->irq
);
336 static int xennet_open(struct net_device
*dev
)
338 struct netfront_info
*np
= netdev_priv(dev
);
340 napi_enable(&np
->napi
);
342 spin_lock_bh(&np
->rx_lock
);
343 if (netif_carrier_ok(dev
)) {
344 xennet_alloc_rx_buffers(dev
);
345 np
->rx
.sring
->rsp_event
= np
->rx
.rsp_cons
+ 1;
346 if (RING_HAS_UNCONSUMED_RESPONSES(&np
->rx
))
347 napi_schedule(&np
->napi
);
349 spin_unlock_bh(&np
->rx_lock
);
351 netif_start_queue(dev
);
356 static void xennet_tx_buf_gc(struct net_device
*dev
)
360 struct netfront_info
*np
= netdev_priv(dev
);
363 BUG_ON(!netif_carrier_ok(dev
));
366 prod
= np
->tx
.sring
->rsp_prod
;
367 rmb(); /* Ensure we see responses up to 'rp'. */
369 for (cons
= np
->tx
.rsp_cons
; cons
!= prod
; cons
++) {
370 struct xen_netif_tx_response
*txrsp
;
372 txrsp
= RING_GET_RESPONSE(&np
->tx
, cons
);
373 if (txrsp
->status
== XEN_NETIF_RSP_NULL
)
377 skb
= np
->tx_skbs
[id
].skb
;
378 if (unlikely(gnttab_query_foreign_access(
379 np
->grant_tx_ref
[id
]) != 0)) {
380 printk(KERN_ALERT
"xennet_tx_buf_gc: warning "
381 "-- grant still in use by backend "
385 gnttab_end_foreign_access_ref(
386 np
->grant_tx_ref
[id
], GNTMAP_readonly
);
387 gnttab_release_grant_reference(
388 &np
->gref_tx_head
, np
->grant_tx_ref
[id
]);
389 np
->grant_tx_ref
[id
] = GRANT_INVALID_REF
;
390 add_id_to_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
, id
);
391 dev_kfree_skb_irq(skb
);
394 np
->tx
.rsp_cons
= prod
;
397 * Set a new event, then check for race with update of tx_cons.
398 * Note that it is essential to schedule a callback, no matter
399 * how few buffers are pending. Even if there is space in the
400 * transmit ring, higher layers may be blocked because too much
401 * data is outstanding: in such cases notification from Xen is
402 * likely to be the only kick that we'll get.
404 np
->tx
.sring
->rsp_event
=
405 prod
+ ((np
->tx
.sring
->req_prod
- prod
) >> 1) + 1;
406 mb(); /* update shared area */
407 } while ((cons
== prod
) && (prod
!= np
->tx
.sring
->rsp_prod
));
409 xennet_maybe_wake_tx(dev
);
412 static void xennet_make_frags(struct sk_buff
*skb
, struct net_device
*dev
,
413 struct xen_netif_tx_request
*tx
)
415 struct netfront_info
*np
= netdev_priv(dev
);
416 char *data
= skb
->data
;
418 RING_IDX prod
= np
->tx
.req_prod_pvt
;
419 int frags
= skb_shinfo(skb
)->nr_frags
;
420 unsigned int offset
= offset_in_page(data
);
421 unsigned int len
= skb_headlen(skb
);
426 /* While the header overlaps a page boundary (including being
427 larger than a page), split it it into page-sized chunks. */
428 while (len
> PAGE_SIZE
- offset
) {
429 tx
->size
= PAGE_SIZE
- offset
;
430 tx
->flags
|= XEN_NETTXF_more_data
;
435 id
= get_id_from_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
);
436 np
->tx_skbs
[id
].skb
= skb_get(skb
);
437 tx
= RING_GET_REQUEST(&np
->tx
, prod
++);
439 ref
= gnttab_claim_grant_reference(&np
->gref_tx_head
);
440 BUG_ON((signed short)ref
< 0);
442 mfn
= virt_to_mfn(data
);
443 gnttab_grant_foreign_access_ref(ref
, np
->xbdev
->otherend_id
,
444 mfn
, GNTMAP_readonly
);
446 tx
->gref
= np
->grant_tx_ref
[id
] = ref
;
452 /* Grant backend access to each skb fragment page. */
453 for (i
= 0; i
< frags
; i
++) {
454 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
456 tx
->flags
|= XEN_NETTXF_more_data
;
458 id
= get_id_from_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
);
459 np
->tx_skbs
[id
].skb
= skb_get(skb
);
460 tx
= RING_GET_REQUEST(&np
->tx
, prod
++);
462 ref
= gnttab_claim_grant_reference(&np
->gref_tx_head
);
463 BUG_ON((signed short)ref
< 0);
465 mfn
= pfn_to_mfn(page_to_pfn(skb_frag_page(frag
)));
466 gnttab_grant_foreign_access_ref(ref
, np
->xbdev
->otherend_id
,
467 mfn
, GNTMAP_readonly
);
469 tx
->gref
= np
->grant_tx_ref
[id
] = ref
;
470 tx
->offset
= frag
->page_offset
;
471 tx
->size
= skb_frag_size(frag
);
475 np
->tx
.req_prod_pvt
= prod
;
478 static int xennet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
481 struct netfront_info
*np
= netdev_priv(dev
);
482 struct netfront_stats
*stats
= this_cpu_ptr(np
->stats
);
483 struct xen_netif_tx_request
*tx
;
484 struct xen_netif_extra_info
*extra
;
485 char *data
= skb
->data
;
490 int frags
= skb_shinfo(skb
)->nr_frags
;
491 unsigned int offset
= offset_in_page(data
);
492 unsigned int len
= skb_headlen(skb
);
495 frags
+= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
);
496 if (unlikely(frags
> MAX_SKB_FRAGS
+ 1)) {
497 printk(KERN_ALERT
"xennet: skb rides the rocket: %d frags\n",
503 spin_lock_irqsave(&np
->tx_lock
, flags
);
505 if (unlikely(!netif_carrier_ok(dev
) ||
506 (frags
> 1 && !xennet_can_sg(dev
)) ||
507 netif_needs_gso(skb
, netif_skb_features(skb
)))) {
508 spin_unlock_irqrestore(&np
->tx_lock
, flags
);
512 i
= np
->tx
.req_prod_pvt
;
514 id
= get_id_from_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
);
515 np
->tx_skbs
[id
].skb
= skb
;
517 tx
= RING_GET_REQUEST(&np
->tx
, i
);
520 ref
= gnttab_claim_grant_reference(&np
->gref_tx_head
);
521 BUG_ON((signed short)ref
< 0);
522 mfn
= virt_to_mfn(data
);
523 gnttab_grant_foreign_access_ref(
524 ref
, np
->xbdev
->otherend_id
, mfn
, GNTMAP_readonly
);
525 tx
->gref
= np
->grant_tx_ref
[id
] = ref
;
531 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
533 tx
->flags
|= XEN_NETTXF_csum_blank
| XEN_NETTXF_data_validated
;
534 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
535 /* remote but checksummed. */
536 tx
->flags
|= XEN_NETTXF_data_validated
;
538 if (skb_shinfo(skb
)->gso_size
) {
539 struct xen_netif_extra_info
*gso
;
541 gso
= (struct xen_netif_extra_info
*)
542 RING_GET_REQUEST(&np
->tx
, ++i
);
545 extra
->flags
|= XEN_NETIF_EXTRA_FLAG_MORE
;
547 tx
->flags
|= XEN_NETTXF_extra_info
;
549 gso
->u
.gso
.size
= skb_shinfo(skb
)->gso_size
;
550 gso
->u
.gso
.type
= XEN_NETIF_GSO_TYPE_TCPV4
;
552 gso
->u
.gso
.features
= 0;
554 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
559 np
->tx
.req_prod_pvt
= i
+ 1;
561 xennet_make_frags(skb
, dev
, tx
);
564 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np
->tx
, notify
);
566 notify_remote_via_irq(np
->netdev
->irq
);
568 u64_stats_update_begin(&stats
->syncp
);
569 stats
->tx_bytes
+= skb
->len
;
571 u64_stats_update_end(&stats
->syncp
);
573 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
574 xennet_tx_buf_gc(dev
);
576 if (!netfront_tx_slot_available(np
))
577 netif_stop_queue(dev
);
579 spin_unlock_irqrestore(&np
->tx_lock
, flags
);
584 dev
->stats
.tx_dropped
++;
589 static int xennet_close(struct net_device
*dev
)
591 struct netfront_info
*np
= netdev_priv(dev
);
592 netif_stop_queue(np
->netdev
);
593 napi_disable(&np
->napi
);
597 static void xennet_move_rx_slot(struct netfront_info
*np
, struct sk_buff
*skb
,
600 int new = xennet_rxidx(np
->rx
.req_prod_pvt
);
602 BUG_ON(np
->rx_skbs
[new]);
603 np
->rx_skbs
[new] = skb
;
604 np
->grant_rx_ref
[new] = ref
;
605 RING_GET_REQUEST(&np
->rx
, np
->rx
.req_prod_pvt
)->id
= new;
606 RING_GET_REQUEST(&np
->rx
, np
->rx
.req_prod_pvt
)->gref
= ref
;
607 np
->rx
.req_prod_pvt
++;
610 static int xennet_get_extras(struct netfront_info
*np
,
611 struct xen_netif_extra_info
*extras
,
615 struct xen_netif_extra_info
*extra
;
616 struct device
*dev
= &np
->netdev
->dev
;
617 RING_IDX cons
= np
->rx
.rsp_cons
;
624 if (unlikely(cons
+ 1 == rp
)) {
626 dev_warn(dev
, "Missing extra info\n");
631 extra
= (struct xen_netif_extra_info
*)
632 RING_GET_RESPONSE(&np
->rx
, ++cons
);
634 if (unlikely(!extra
->type
||
635 extra
->type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
637 dev_warn(dev
, "Invalid extra type: %d\n",
641 memcpy(&extras
[extra
->type
- 1], extra
,
645 skb
= xennet_get_rx_skb(np
, cons
);
646 ref
= xennet_get_rx_ref(np
, cons
);
647 xennet_move_rx_slot(np
, skb
, ref
);
648 } while (extra
->flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
650 np
->rx
.rsp_cons
= cons
;
654 static int xennet_get_responses(struct netfront_info
*np
,
655 struct netfront_rx_info
*rinfo
, RING_IDX rp
,
656 struct sk_buff_head
*list
)
658 struct xen_netif_rx_response
*rx
= &rinfo
->rx
;
659 struct xen_netif_extra_info
*extras
= rinfo
->extras
;
660 struct device
*dev
= &np
->netdev
->dev
;
661 RING_IDX cons
= np
->rx
.rsp_cons
;
662 struct sk_buff
*skb
= xennet_get_rx_skb(np
, cons
);
663 grant_ref_t ref
= xennet_get_rx_ref(np
, cons
);
664 int max
= MAX_SKB_FRAGS
+ (rx
->status
<= RX_COPY_THRESHOLD
);
669 if (rx
->flags
& XEN_NETRXF_extra_info
) {
670 err
= xennet_get_extras(np
, extras
, rp
);
671 cons
= np
->rx
.rsp_cons
;
675 if (unlikely(rx
->status
< 0 ||
676 rx
->offset
+ rx
->status
> PAGE_SIZE
)) {
678 dev_warn(dev
, "rx->offset: %x, size: %u\n",
679 rx
->offset
, rx
->status
);
680 xennet_move_rx_slot(np
, skb
, ref
);
686 * This definitely indicates a bug, either in this driver or in
687 * the backend driver. In future this should flag the bad
688 * situation to the system controller to reboot the backed.
690 if (ref
== GRANT_INVALID_REF
) {
692 dev_warn(dev
, "Bad rx response id %d.\n",
698 ret
= gnttab_end_foreign_access_ref(ref
, 0);
701 gnttab_release_grant_reference(&np
->gref_rx_head
, ref
);
703 __skb_queue_tail(list
, skb
);
706 if (!(rx
->flags
& XEN_NETRXF_more_data
))
709 if (cons
+ frags
== rp
) {
711 dev_warn(dev
, "Need more frags\n");
716 rx
= RING_GET_RESPONSE(&np
->rx
, cons
+ frags
);
717 skb
= xennet_get_rx_skb(np
, cons
+ frags
);
718 ref
= xennet_get_rx_ref(np
, cons
+ frags
);
722 if (unlikely(frags
> max
)) {
724 dev_warn(dev
, "Too many frags\n");
729 np
->rx
.rsp_cons
= cons
+ frags
;
734 static int xennet_set_skb_gso(struct sk_buff
*skb
,
735 struct xen_netif_extra_info
*gso
)
737 if (!gso
->u
.gso
.size
) {
739 printk(KERN_WARNING
"GSO size must not be zero.\n");
743 /* Currently only TCPv4 S.O. is supported. */
744 if (gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV4
) {
746 printk(KERN_WARNING
"Bad GSO type %d.\n", gso
->u
.gso
.type
);
750 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
751 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
753 /* Header must be checked, and gso_segs computed. */
754 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
755 skb_shinfo(skb
)->gso_segs
= 0;
760 static RING_IDX
xennet_fill_frags(struct netfront_info
*np
,
762 struct sk_buff_head
*list
)
764 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
765 int nr_frags
= shinfo
->nr_frags
;
766 RING_IDX cons
= np
->rx
.rsp_cons
;
767 struct sk_buff
*nskb
;
769 while ((nskb
= __skb_dequeue(list
))) {
770 struct xen_netif_rx_response
*rx
=
771 RING_GET_RESPONSE(&np
->rx
, ++cons
);
772 skb_frag_t
*nfrag
= &skb_shinfo(nskb
)->frags
[0];
774 __skb_fill_page_desc(skb
, nr_frags
,
775 skb_frag_page(nfrag
),
776 rx
->offset
, rx
->status
);
778 skb
->data_len
+= rx
->status
;
780 skb_shinfo(nskb
)->nr_frags
= 0;
786 shinfo
->nr_frags
= nr_frags
;
790 static int checksum_setup(struct net_device
*dev
, struct sk_buff
*skb
)
795 int recalculate_partial_csum
= 0;
798 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
799 * peers can fail to set NETRXF_csum_blank when sending a GSO
800 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
801 * recalculate the partial checksum.
803 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
804 struct netfront_info
*np
= netdev_priv(dev
);
805 np
->rx_gso_checksum_fixup
++;
806 skb
->ip_summed
= CHECKSUM_PARTIAL
;
807 recalculate_partial_csum
= 1;
810 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
811 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
814 if (skb
->protocol
!= htons(ETH_P_IP
))
817 iph
= (void *)skb
->data
;
818 th
= skb
->data
+ 4 * iph
->ihl
;
819 if (th
>= skb_tail_pointer(skb
))
822 skb
->csum_start
= th
- skb
->head
;
823 switch (iph
->protocol
) {
825 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
827 if (recalculate_partial_csum
) {
828 struct tcphdr
*tcph
= (struct tcphdr
*)th
;
829 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
830 skb
->len
- iph
->ihl
*4,
835 skb
->csum_offset
= offsetof(struct udphdr
, check
);
837 if (recalculate_partial_csum
) {
838 struct udphdr
*udph
= (struct udphdr
*)th
;
839 udph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
840 skb
->len
- iph
->ihl
*4,
846 printk(KERN_ERR
"Attempting to checksum a non-"
847 "TCP/UDP packet, dropping a protocol"
848 " %d packet", iph
->protocol
);
852 if ((th
+ skb
->csum_offset
+ 2) > skb_tail_pointer(skb
))
861 static int handle_incoming_queue(struct net_device
*dev
,
862 struct sk_buff_head
*rxq
)
864 struct netfront_info
*np
= netdev_priv(dev
);
865 struct netfront_stats
*stats
= this_cpu_ptr(np
->stats
);
866 int packets_dropped
= 0;
869 while ((skb
= __skb_dequeue(rxq
)) != NULL
) {
870 int pull_to
= NETFRONT_SKB_CB(skb
)->pull_to
;
872 __pskb_pull_tail(skb
, pull_to
- skb_headlen(skb
));
874 /* Ethernet work: Delayed to here as it peeks the header. */
875 skb
->protocol
= eth_type_trans(skb
, dev
);
877 if (checksum_setup(dev
, skb
)) {
880 dev
->stats
.rx_errors
++;
884 u64_stats_update_begin(&stats
->syncp
);
886 stats
->rx_bytes
+= skb
->len
;
887 u64_stats_update_end(&stats
->syncp
);
890 netif_receive_skb(skb
);
893 return packets_dropped
;
896 static int xennet_poll(struct napi_struct
*napi
, int budget
)
898 struct netfront_info
*np
= container_of(napi
, struct netfront_info
, napi
);
899 struct net_device
*dev
= np
->netdev
;
901 struct netfront_rx_info rinfo
;
902 struct xen_netif_rx_response
*rx
= &rinfo
.rx
;
903 struct xen_netif_extra_info
*extras
= rinfo
.extras
;
906 struct sk_buff_head rxq
;
907 struct sk_buff_head errq
;
908 struct sk_buff_head tmpq
;
912 spin_lock(&np
->rx_lock
);
914 skb_queue_head_init(&rxq
);
915 skb_queue_head_init(&errq
);
916 skb_queue_head_init(&tmpq
);
918 rp
= np
->rx
.sring
->rsp_prod
;
919 rmb(); /* Ensure we see queued responses up to 'rp'. */
923 while ((i
!= rp
) && (work_done
< budget
)) {
924 memcpy(rx
, RING_GET_RESPONSE(&np
->rx
, i
), sizeof(*rx
));
925 memset(extras
, 0, sizeof(rinfo
.extras
));
927 err
= xennet_get_responses(np
, &rinfo
, rp
, &tmpq
);
931 while ((skb
= __skb_dequeue(&tmpq
)))
932 __skb_queue_tail(&errq
, skb
);
933 dev
->stats
.rx_errors
++;
938 skb
= __skb_dequeue(&tmpq
);
940 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
941 struct xen_netif_extra_info
*gso
;
942 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
944 if (unlikely(xennet_set_skb_gso(skb
, gso
))) {
945 __skb_queue_head(&tmpq
, skb
);
946 np
->rx
.rsp_cons
+= skb_queue_len(&tmpq
);
951 NETFRONT_SKB_CB(skb
)->pull_to
= rx
->status
;
952 if (NETFRONT_SKB_CB(skb
)->pull_to
> RX_COPY_THRESHOLD
)
953 NETFRONT_SKB_CB(skb
)->pull_to
= RX_COPY_THRESHOLD
;
955 skb_shinfo(skb
)->frags
[0].page_offset
= rx
->offset
;
956 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], rx
->status
);
957 skb
->data_len
= rx
->status
;
959 i
= xennet_fill_frags(np
, skb
, &tmpq
);
962 * Truesize approximates the size of true data plus
963 * any supervisor overheads. Adding hypervisor
964 * overheads has been shown to significantly reduce
965 * achievable bandwidth with the default receive
966 * buffer size. It is therefore not wise to account
969 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
970 * to RX_COPY_THRESHOLD + the supervisor
971 * overheads. Here, we add the size of the data pulled
972 * in xennet_fill_frags().
974 * We also adjust for any unused space in the main
975 * data area by subtracting (RX_COPY_THRESHOLD -
976 * len). This is especially important with drivers
977 * which split incoming packets into header and data,
978 * using only 66 bytes of the main data area (see the
979 * e1000 driver for example.) On such systems,
980 * without this last adjustement, our achievable
981 * receive throughout using the standard receive
982 * buffer size was cut by 25%(!!!).
984 skb
->truesize
+= skb
->data_len
- RX_COPY_THRESHOLD
;
985 skb
->len
+= skb
->data_len
;
987 if (rx
->flags
& XEN_NETRXF_csum_blank
)
988 skb
->ip_summed
= CHECKSUM_PARTIAL
;
989 else if (rx
->flags
& XEN_NETRXF_data_validated
)
990 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
992 __skb_queue_tail(&rxq
, skb
);
994 np
->rx
.rsp_cons
= ++i
;
998 __skb_queue_purge(&errq
);
1000 work_done
-= handle_incoming_queue(dev
, &rxq
);
1002 /* If we get a callback with very few responses, reduce fill target. */
1003 /* NB. Note exponential increase, linear decrease. */
1004 if (((np
->rx
.req_prod_pvt
- np
->rx
.sring
->rsp_prod
) >
1005 ((3*np
->rx_target
) / 4)) &&
1006 (--np
->rx_target
< np
->rx_min_target
))
1007 np
->rx_target
= np
->rx_min_target
;
1009 xennet_alloc_rx_buffers(dev
);
1011 if (work_done
< budget
) {
1014 local_irq_save(flags
);
1016 RING_FINAL_CHECK_FOR_RESPONSES(&np
->rx
, more_to_do
);
1018 __napi_complete(napi
);
1020 local_irq_restore(flags
);
1023 spin_unlock(&np
->rx_lock
);
1028 static int xennet_change_mtu(struct net_device
*dev
, int mtu
)
1030 int max
= xennet_can_sg(dev
) ? 65535 - ETH_HLEN
: ETH_DATA_LEN
;
1038 static struct rtnl_link_stats64
*xennet_get_stats64(struct net_device
*dev
,
1039 struct rtnl_link_stats64
*tot
)
1041 struct netfront_info
*np
= netdev_priv(dev
);
1044 for_each_possible_cpu(cpu
) {
1045 struct netfront_stats
*stats
= per_cpu_ptr(np
->stats
, cpu
);
1046 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1050 start
= u64_stats_fetch_begin_bh(&stats
->syncp
);
1052 rx_packets
= stats
->rx_packets
;
1053 tx_packets
= stats
->tx_packets
;
1054 rx_bytes
= stats
->rx_bytes
;
1055 tx_bytes
= stats
->tx_bytes
;
1056 } while (u64_stats_fetch_retry_bh(&stats
->syncp
, start
));
1058 tot
->rx_packets
+= rx_packets
;
1059 tot
->tx_packets
+= tx_packets
;
1060 tot
->rx_bytes
+= rx_bytes
;
1061 tot
->tx_bytes
+= tx_bytes
;
1064 tot
->rx_errors
= dev
->stats
.rx_errors
;
1065 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
1070 static void xennet_release_tx_bufs(struct netfront_info
*np
)
1072 struct sk_buff
*skb
;
1075 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1076 /* Skip over entries which are actually freelist references */
1077 if (skb_entry_is_link(&np
->tx_skbs
[i
]))
1080 skb
= np
->tx_skbs
[i
].skb
;
1081 gnttab_end_foreign_access_ref(np
->grant_tx_ref
[i
],
1083 gnttab_release_grant_reference(&np
->gref_tx_head
,
1084 np
->grant_tx_ref
[i
]);
1085 np
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1086 add_id_to_freelist(&np
->tx_skb_freelist
, np
->tx_skbs
, i
);
1087 dev_kfree_skb_irq(skb
);
1091 static void xennet_release_rx_bufs(struct netfront_info
*np
)
1093 struct mmu_update
*mmu
= np
->rx_mmu
;
1094 struct multicall_entry
*mcl
= np
->rx_mcl
;
1095 struct sk_buff_head free_list
;
1096 struct sk_buff
*skb
;
1098 int xfer
= 0, noxfer
= 0, unused
= 0;
1101 dev_warn(&np
->netdev
->dev
, "%s: fix me for copying receiver.\n",
1105 skb_queue_head_init(&free_list
);
1107 spin_lock_bh(&np
->rx_lock
);
1109 for (id
= 0; id
< NET_RX_RING_SIZE
; id
++) {
1110 ref
= np
->grant_rx_ref
[id
];
1111 if (ref
== GRANT_INVALID_REF
) {
1116 skb
= np
->rx_skbs
[id
];
1117 mfn
= gnttab_end_foreign_transfer_ref(ref
);
1118 gnttab_release_grant_reference(&np
->gref_rx_head
, ref
);
1119 np
->grant_rx_ref
[id
] = GRANT_INVALID_REF
;
1122 skb_shinfo(skb
)->nr_frags
= 0;
1128 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
1129 /* Remap the page. */
1130 const struct page
*page
=
1131 skb_frag_page(&skb_shinfo(skb
)->frags
[0]);
1132 unsigned long pfn
= page_to_pfn(page
);
1133 void *vaddr
= page_address(page
);
1135 MULTI_update_va_mapping(mcl
, (unsigned long)vaddr
,
1136 mfn_pte(mfn
, PAGE_KERNEL
),
1139 mmu
->ptr
= ((u64
)mfn
<< PAGE_SHIFT
)
1140 | MMU_MACHPHYS_UPDATE
;
1144 set_phys_to_machine(pfn
, mfn
);
1146 __skb_queue_tail(&free_list
, skb
);
1150 dev_info(&np
->netdev
->dev
, "%s: %d xfer, %d noxfer, %d unused\n",
1151 __func__
, xfer
, noxfer
, unused
);
1154 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
1155 /* Do all the remapping work and M2P updates. */
1156 MULTI_mmu_update(mcl
, np
->rx_mmu
, mmu
- np
->rx_mmu
,
1159 HYPERVISOR_multicall(np
->rx_mcl
, mcl
- np
->rx_mcl
);
1163 __skb_queue_purge(&free_list
);
1165 spin_unlock_bh(&np
->rx_lock
);
1168 static void xennet_uninit(struct net_device
*dev
)
1170 struct netfront_info
*np
= netdev_priv(dev
);
1171 xennet_release_tx_bufs(np
);
1172 xennet_release_rx_bufs(np
);
1173 gnttab_free_grant_references(np
->gref_tx_head
);
1174 gnttab_free_grant_references(np
->gref_rx_head
);
1177 static netdev_features_t
xennet_fix_features(struct net_device
*dev
,
1178 netdev_features_t features
)
1180 struct netfront_info
*np
= netdev_priv(dev
);
1183 if (features
& NETIF_F_SG
) {
1184 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
, "feature-sg",
1189 features
&= ~NETIF_F_SG
;
1192 if (features
& NETIF_F_TSO
) {
1193 if (xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1194 "feature-gso-tcpv4", "%d", &val
) < 0)
1198 features
&= ~NETIF_F_TSO
;
1204 static int xennet_set_features(struct net_device
*dev
,
1205 netdev_features_t features
)
1207 if (!(features
& NETIF_F_SG
) && dev
->mtu
> ETH_DATA_LEN
) {
1208 netdev_info(dev
, "Reducing MTU because no SG offload");
1209 dev
->mtu
= ETH_DATA_LEN
;
1215 static irqreturn_t
xennet_interrupt(int irq
, void *dev_id
)
1217 struct net_device
*dev
= dev_id
;
1218 struct netfront_info
*np
= netdev_priv(dev
);
1219 unsigned long flags
;
1221 spin_lock_irqsave(&np
->tx_lock
, flags
);
1223 if (likely(netif_carrier_ok(dev
))) {
1224 xennet_tx_buf_gc(dev
);
1225 /* Under tx_lock: protects access to rx shared-ring indexes. */
1226 if (RING_HAS_UNCONSUMED_RESPONSES(&np
->rx
))
1227 napi_schedule(&np
->napi
);
1230 spin_unlock_irqrestore(&np
->tx_lock
, flags
);
1235 #ifdef CONFIG_NET_POLL_CONTROLLER
1236 static void xennet_poll_controller(struct net_device
*dev
)
1238 xennet_interrupt(0, dev
);
1242 static const struct net_device_ops xennet_netdev_ops
= {
1243 .ndo_open
= xennet_open
,
1244 .ndo_uninit
= xennet_uninit
,
1245 .ndo_stop
= xennet_close
,
1246 .ndo_start_xmit
= xennet_start_xmit
,
1247 .ndo_change_mtu
= xennet_change_mtu
,
1248 .ndo_get_stats64
= xennet_get_stats64
,
1249 .ndo_set_mac_address
= eth_mac_addr
,
1250 .ndo_validate_addr
= eth_validate_addr
,
1251 .ndo_fix_features
= xennet_fix_features
,
1252 .ndo_set_features
= xennet_set_features
,
1253 #ifdef CONFIG_NET_POLL_CONTROLLER
1254 .ndo_poll_controller
= xennet_poll_controller
,
1258 static struct net_device
* __devinit
xennet_create_dev(struct xenbus_device
*dev
)
1261 struct net_device
*netdev
;
1262 struct netfront_info
*np
;
1264 netdev
= alloc_etherdev(sizeof(struct netfront_info
));
1266 return ERR_PTR(-ENOMEM
);
1268 np
= netdev_priv(netdev
);
1271 spin_lock_init(&np
->tx_lock
);
1272 spin_lock_init(&np
->rx_lock
);
1274 skb_queue_head_init(&np
->rx_batch
);
1275 np
->rx_target
= RX_DFL_MIN_TARGET
;
1276 np
->rx_min_target
= RX_DFL_MIN_TARGET
;
1277 np
->rx_max_target
= RX_MAX_TARGET
;
1279 init_timer(&np
->rx_refill_timer
);
1280 np
->rx_refill_timer
.data
= (unsigned long)netdev
;
1281 np
->rx_refill_timer
.function
= rx_refill_timeout
;
1284 np
->stats
= alloc_percpu(struct netfront_stats
);
1285 if (np
->stats
== NULL
)
1288 /* Initialise tx_skbs as a free chain containing every entry. */
1289 np
->tx_skb_freelist
= 0;
1290 for (i
= 0; i
< NET_TX_RING_SIZE
; i
++) {
1291 skb_entry_set_link(&np
->tx_skbs
[i
], i
+1);
1292 np
->grant_tx_ref
[i
] = GRANT_INVALID_REF
;
1295 /* Clear out rx_skbs */
1296 for (i
= 0; i
< NET_RX_RING_SIZE
; i
++) {
1297 np
->rx_skbs
[i
] = NULL
;
1298 np
->grant_rx_ref
[i
] = GRANT_INVALID_REF
;
1301 /* A grant for every tx ring slot */
1302 if (gnttab_alloc_grant_references(TX_MAX_TARGET
,
1303 &np
->gref_tx_head
) < 0) {
1304 printk(KERN_ALERT
"#### netfront can't alloc tx grant refs\n");
1306 goto exit_free_stats
;
1308 /* A grant for every rx ring slot */
1309 if (gnttab_alloc_grant_references(RX_MAX_TARGET
,
1310 &np
->gref_rx_head
) < 0) {
1311 printk(KERN_ALERT
"#### netfront can't alloc rx grant refs\n");
1316 netdev
->netdev_ops
= &xennet_netdev_ops
;
1318 netif_napi_add(netdev
, &np
->napi
, xennet_poll
, 64);
1319 netdev
->features
= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
|
1321 netdev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_SG
| NETIF_F_TSO
;
1324 * Assume that all hw features are available for now. This set
1325 * will be adjusted by the call to netdev_update_features() in
1326 * xennet_connect() which is the earliest point where we can
1327 * negotiate with the backend regarding supported features.
1329 netdev
->features
|= netdev
->hw_features
;
1331 SET_ETHTOOL_OPS(netdev
, &xennet_ethtool_ops
);
1332 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1334 np
->netdev
= netdev
;
1336 netif_carrier_off(netdev
);
1341 gnttab_free_grant_references(np
->gref_tx_head
);
1343 free_percpu(np
->stats
);
1345 free_netdev(netdev
);
1346 return ERR_PTR(err
);
1350 * Entry point to this code when a new device is created. Allocate the basic
1351 * structures and the ring buffers for communication with the backend, and
1352 * inform the backend of the appropriate details for those.
1354 static int __devinit
netfront_probe(struct xenbus_device
*dev
,
1355 const struct xenbus_device_id
*id
)
1358 struct net_device
*netdev
;
1359 struct netfront_info
*info
;
1361 netdev
= xennet_create_dev(dev
);
1362 if (IS_ERR(netdev
)) {
1363 err
= PTR_ERR(netdev
);
1364 xenbus_dev_fatal(dev
, err
, "creating netdev");
1368 info
= netdev_priv(netdev
);
1369 dev_set_drvdata(&dev
->dev
, info
);
1371 err
= register_netdev(info
->netdev
);
1373 printk(KERN_WARNING
"%s: register_netdev err=%d\n",
1378 err
= xennet_sysfs_addif(info
->netdev
);
1380 unregister_netdev(info
->netdev
);
1381 printk(KERN_WARNING
"%s: add sysfs failed err=%d\n",
1389 free_netdev(netdev
);
1390 dev_set_drvdata(&dev
->dev
, NULL
);
1394 static void xennet_end_access(int ref
, void *page
)
1396 /* This frees the page as a side-effect */
1397 if (ref
!= GRANT_INVALID_REF
)
1398 gnttab_end_foreign_access(ref
, 0, (unsigned long)page
);
1401 static void xennet_disconnect_backend(struct netfront_info
*info
)
1403 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1404 spin_lock_bh(&info
->rx_lock
);
1405 spin_lock_irq(&info
->tx_lock
);
1406 netif_carrier_off(info
->netdev
);
1407 spin_unlock_irq(&info
->tx_lock
);
1408 spin_unlock_bh(&info
->rx_lock
);
1410 if (info
->netdev
->irq
)
1411 unbind_from_irqhandler(info
->netdev
->irq
, info
->netdev
);
1412 info
->evtchn
= info
->netdev
->irq
= 0;
1414 /* End access and free the pages */
1415 xennet_end_access(info
->tx_ring_ref
, info
->tx
.sring
);
1416 xennet_end_access(info
->rx_ring_ref
, info
->rx
.sring
);
1418 info
->tx_ring_ref
= GRANT_INVALID_REF
;
1419 info
->rx_ring_ref
= GRANT_INVALID_REF
;
1420 info
->tx
.sring
= NULL
;
1421 info
->rx
.sring
= NULL
;
1425 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1426 * driver restart. We tear down our netif structure and recreate it, but
1427 * leave the device-layer structures intact so that this is transparent to the
1428 * rest of the kernel.
1430 static int netfront_resume(struct xenbus_device
*dev
)
1432 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1434 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
1436 xennet_disconnect_backend(info
);
1440 static int xen_net_read_mac(struct xenbus_device
*dev
, u8 mac
[])
1442 char *s
, *e
, *macstr
;
1445 macstr
= s
= xenbus_read(XBT_NIL
, dev
->nodename
, "mac", NULL
);
1447 return PTR_ERR(macstr
);
1449 for (i
= 0; i
< ETH_ALEN
; i
++) {
1450 mac
[i
] = simple_strtoul(s
, &e
, 16);
1451 if ((s
== e
) || (*e
!= ((i
== ETH_ALEN
-1) ? '\0' : ':'))) {
1462 static int setup_netfront(struct xenbus_device
*dev
, struct netfront_info
*info
)
1464 struct xen_netif_tx_sring
*txs
;
1465 struct xen_netif_rx_sring
*rxs
;
1467 struct net_device
*netdev
= info
->netdev
;
1469 info
->tx_ring_ref
= GRANT_INVALID_REF
;
1470 info
->rx_ring_ref
= GRANT_INVALID_REF
;
1471 info
->rx
.sring
= NULL
;
1472 info
->tx
.sring
= NULL
;
1475 err
= xen_net_read_mac(dev
, netdev
->dev_addr
);
1477 xenbus_dev_fatal(dev
, err
, "parsing %s/mac", dev
->nodename
);
1481 txs
= (struct xen_netif_tx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1484 xenbus_dev_fatal(dev
, err
, "allocating tx ring page");
1487 SHARED_RING_INIT(txs
);
1488 FRONT_RING_INIT(&info
->tx
, txs
, PAGE_SIZE
);
1490 err
= xenbus_grant_ring(dev
, virt_to_mfn(txs
));
1492 free_page((unsigned long)txs
);
1496 info
->tx_ring_ref
= err
;
1497 rxs
= (struct xen_netif_rx_sring
*)get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
1500 xenbus_dev_fatal(dev
, err
, "allocating rx ring page");
1503 SHARED_RING_INIT(rxs
);
1504 FRONT_RING_INIT(&info
->rx
, rxs
, PAGE_SIZE
);
1506 err
= xenbus_grant_ring(dev
, virt_to_mfn(rxs
));
1508 free_page((unsigned long)rxs
);
1511 info
->rx_ring_ref
= err
;
1513 err
= xenbus_alloc_evtchn(dev
, &info
->evtchn
);
1517 err
= bind_evtchn_to_irqhandler(info
->evtchn
, xennet_interrupt
,
1518 0, netdev
->name
, netdev
);
1528 /* Common code used when first setting up, and when resuming. */
1529 static int talk_to_netback(struct xenbus_device
*dev
,
1530 struct netfront_info
*info
)
1532 const char *message
;
1533 struct xenbus_transaction xbt
;
1536 /* Create shared ring, alloc event channel. */
1537 err
= setup_netfront(dev
, info
);
1542 err
= xenbus_transaction_start(&xbt
);
1544 xenbus_dev_fatal(dev
, err
, "starting transaction");
1548 err
= xenbus_printf(xbt
, dev
->nodename
, "tx-ring-ref", "%u",
1551 message
= "writing tx ring-ref";
1552 goto abort_transaction
;
1554 err
= xenbus_printf(xbt
, dev
->nodename
, "rx-ring-ref", "%u",
1557 message
= "writing rx ring-ref";
1558 goto abort_transaction
;
1560 err
= xenbus_printf(xbt
, dev
->nodename
,
1561 "event-channel", "%u", info
->evtchn
);
1563 message
= "writing event-channel";
1564 goto abort_transaction
;
1567 err
= xenbus_printf(xbt
, dev
->nodename
, "request-rx-copy", "%u",
1570 message
= "writing request-rx-copy";
1571 goto abort_transaction
;
1574 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-rx-notify", "%d", 1);
1576 message
= "writing feature-rx-notify";
1577 goto abort_transaction
;
1580 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-sg", "%d", 1);
1582 message
= "writing feature-sg";
1583 goto abort_transaction
;
1586 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-gso-tcpv4", "%d", 1);
1588 message
= "writing feature-gso-tcpv4";
1589 goto abort_transaction
;
1592 err
= xenbus_transaction_end(xbt
, 0);
1596 xenbus_dev_fatal(dev
, err
, "completing transaction");
1603 xenbus_transaction_end(xbt
, 1);
1604 xenbus_dev_fatal(dev
, err
, "%s", message
);
1606 xennet_disconnect_backend(info
);
1611 static int xennet_connect(struct net_device
*dev
)
1613 struct netfront_info
*np
= netdev_priv(dev
);
1614 int i
, requeue_idx
, err
;
1615 struct sk_buff
*skb
;
1617 struct xen_netif_rx_request
*req
;
1618 unsigned int feature_rx_copy
;
1620 err
= xenbus_scanf(XBT_NIL
, np
->xbdev
->otherend
,
1621 "feature-rx-copy", "%u", &feature_rx_copy
);
1623 feature_rx_copy
= 0;
1625 if (!feature_rx_copy
) {
1627 "backend does not support copying receive path\n");
1631 err
= talk_to_netback(np
->xbdev
, np
);
1636 netdev_update_features(dev
);
1639 spin_lock_bh(&np
->rx_lock
);
1640 spin_lock_irq(&np
->tx_lock
);
1642 /* Step 1: Discard all pending TX packet fragments. */
1643 xennet_release_tx_bufs(np
);
1645 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1646 for (requeue_idx
= 0, i
= 0; i
< NET_RX_RING_SIZE
; i
++) {
1648 const struct page
*page
;
1649 if (!np
->rx_skbs
[i
])
1652 skb
= np
->rx_skbs
[requeue_idx
] = xennet_get_rx_skb(np
, i
);
1653 ref
= np
->grant_rx_ref
[requeue_idx
] = xennet_get_rx_ref(np
, i
);
1654 req
= RING_GET_REQUEST(&np
->rx
, requeue_idx
);
1656 frag
= &skb_shinfo(skb
)->frags
[0];
1657 page
= skb_frag_page(frag
);
1658 gnttab_grant_foreign_access_ref(
1659 ref
, np
->xbdev
->otherend_id
,
1660 pfn_to_mfn(page_to_pfn(page
)),
1663 req
->id
= requeue_idx
;
1668 np
->rx
.req_prod_pvt
= requeue_idx
;
1671 * Step 3: All public and private state should now be sane. Get
1672 * ready to start sending and receiving packets and give the driver
1673 * domain a kick because we've probably just requeued some
1676 netif_carrier_on(np
->netdev
);
1677 notify_remote_via_irq(np
->netdev
->irq
);
1678 xennet_tx_buf_gc(dev
);
1679 xennet_alloc_rx_buffers(dev
);
1681 spin_unlock_irq(&np
->tx_lock
);
1682 spin_unlock_bh(&np
->rx_lock
);
1688 * Callback received when the backend's state changes.
1690 static void netback_changed(struct xenbus_device
*dev
,
1691 enum xenbus_state backend_state
)
1693 struct netfront_info
*np
= dev_get_drvdata(&dev
->dev
);
1694 struct net_device
*netdev
= np
->netdev
;
1696 dev_dbg(&dev
->dev
, "%s\n", xenbus_strstate(backend_state
));
1698 switch (backend_state
) {
1699 case XenbusStateInitialising
:
1700 case XenbusStateInitialised
:
1701 case XenbusStateReconfiguring
:
1702 case XenbusStateReconfigured
:
1703 case XenbusStateUnknown
:
1704 case XenbusStateClosed
:
1707 case XenbusStateInitWait
:
1708 if (dev
->state
!= XenbusStateInitialising
)
1710 if (xennet_connect(netdev
) != 0)
1712 xenbus_switch_state(dev
, XenbusStateConnected
);
1715 case XenbusStateConnected
:
1716 netdev_notify_peers(netdev
);
1719 case XenbusStateClosing
:
1720 xenbus_frontend_closed(dev
);
1725 static const struct xennet_stat
{
1726 char name
[ETH_GSTRING_LEN
];
1728 } xennet_stats
[] = {
1730 "rx_gso_checksum_fixup",
1731 offsetof(struct netfront_info
, rx_gso_checksum_fixup
)
1735 static int xennet_get_sset_count(struct net_device
*dev
, int string_set
)
1737 switch (string_set
) {
1739 return ARRAY_SIZE(xennet_stats
);
1745 static void xennet_get_ethtool_stats(struct net_device
*dev
,
1746 struct ethtool_stats
*stats
, u64
* data
)
1748 void *np
= netdev_priv(dev
);
1751 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
1752 data
[i
] = *(unsigned long *)(np
+ xennet_stats
[i
].offset
);
1755 static void xennet_get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
1759 switch (stringset
) {
1761 for (i
= 0; i
< ARRAY_SIZE(xennet_stats
); i
++)
1762 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1763 xennet_stats
[i
].name
, ETH_GSTRING_LEN
);
1768 static const struct ethtool_ops xennet_ethtool_ops
=
1770 .get_link
= ethtool_op_get_link
,
1772 .get_sset_count
= xennet_get_sset_count
,
1773 .get_ethtool_stats
= xennet_get_ethtool_stats
,
1774 .get_strings
= xennet_get_strings
,
1778 static ssize_t
show_rxbuf_min(struct device
*dev
,
1779 struct device_attribute
*attr
, char *buf
)
1781 struct net_device
*netdev
= to_net_dev(dev
);
1782 struct netfront_info
*info
= netdev_priv(netdev
);
1784 return sprintf(buf
, "%u\n", info
->rx_min_target
);
1787 static ssize_t
store_rxbuf_min(struct device
*dev
,
1788 struct device_attribute
*attr
,
1789 const char *buf
, size_t len
)
1791 struct net_device
*netdev
= to_net_dev(dev
);
1792 struct netfront_info
*np
= netdev_priv(netdev
);
1794 unsigned long target
;
1796 if (!capable(CAP_NET_ADMIN
))
1799 target
= simple_strtoul(buf
, &endp
, 0);
1803 if (target
< RX_MIN_TARGET
)
1804 target
= RX_MIN_TARGET
;
1805 if (target
> RX_MAX_TARGET
)
1806 target
= RX_MAX_TARGET
;
1808 spin_lock_bh(&np
->rx_lock
);
1809 if (target
> np
->rx_max_target
)
1810 np
->rx_max_target
= target
;
1811 np
->rx_min_target
= target
;
1812 if (target
> np
->rx_target
)
1813 np
->rx_target
= target
;
1815 xennet_alloc_rx_buffers(netdev
);
1817 spin_unlock_bh(&np
->rx_lock
);
1821 static ssize_t
show_rxbuf_max(struct device
*dev
,
1822 struct device_attribute
*attr
, char *buf
)
1824 struct net_device
*netdev
= to_net_dev(dev
);
1825 struct netfront_info
*info
= netdev_priv(netdev
);
1827 return sprintf(buf
, "%u\n", info
->rx_max_target
);
1830 static ssize_t
store_rxbuf_max(struct device
*dev
,
1831 struct device_attribute
*attr
,
1832 const char *buf
, size_t len
)
1834 struct net_device
*netdev
= to_net_dev(dev
);
1835 struct netfront_info
*np
= netdev_priv(netdev
);
1837 unsigned long target
;
1839 if (!capable(CAP_NET_ADMIN
))
1842 target
= simple_strtoul(buf
, &endp
, 0);
1846 if (target
< RX_MIN_TARGET
)
1847 target
= RX_MIN_TARGET
;
1848 if (target
> RX_MAX_TARGET
)
1849 target
= RX_MAX_TARGET
;
1851 spin_lock_bh(&np
->rx_lock
);
1852 if (target
< np
->rx_min_target
)
1853 np
->rx_min_target
= target
;
1854 np
->rx_max_target
= target
;
1855 if (target
< np
->rx_target
)
1856 np
->rx_target
= target
;
1858 xennet_alloc_rx_buffers(netdev
);
1860 spin_unlock_bh(&np
->rx_lock
);
1864 static ssize_t
show_rxbuf_cur(struct device
*dev
,
1865 struct device_attribute
*attr
, char *buf
)
1867 struct net_device
*netdev
= to_net_dev(dev
);
1868 struct netfront_info
*info
= netdev_priv(netdev
);
1870 return sprintf(buf
, "%u\n", info
->rx_target
);
1873 static struct device_attribute xennet_attrs
[] = {
1874 __ATTR(rxbuf_min
, S_IRUGO
|S_IWUSR
, show_rxbuf_min
, store_rxbuf_min
),
1875 __ATTR(rxbuf_max
, S_IRUGO
|S_IWUSR
, show_rxbuf_max
, store_rxbuf_max
),
1876 __ATTR(rxbuf_cur
, S_IRUGO
, show_rxbuf_cur
, NULL
),
1879 static int xennet_sysfs_addif(struct net_device
*netdev
)
1884 for (i
= 0; i
< ARRAY_SIZE(xennet_attrs
); i
++) {
1885 err
= device_create_file(&netdev
->dev
,
1894 device_remove_file(&netdev
->dev
, &xennet_attrs
[i
]);
1898 static void xennet_sysfs_delif(struct net_device
*netdev
)
1902 for (i
= 0; i
< ARRAY_SIZE(xennet_attrs
); i
++)
1903 device_remove_file(&netdev
->dev
, &xennet_attrs
[i
]);
1906 #endif /* CONFIG_SYSFS */
1908 static const struct xenbus_device_id netfront_ids
[] = {
1914 static int __devexit
xennet_remove(struct xenbus_device
*dev
)
1916 struct netfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1918 dev_dbg(&dev
->dev
, "%s\n", dev
->nodename
);
1920 xennet_disconnect_backend(info
);
1922 xennet_sysfs_delif(info
->netdev
);
1924 unregister_netdev(info
->netdev
);
1926 del_timer_sync(&info
->rx_refill_timer
);
1928 free_percpu(info
->stats
);
1930 free_netdev(info
->netdev
);
1935 static DEFINE_XENBUS_DRIVER(netfront
, ,
1936 .probe
= netfront_probe
,
1937 .remove
= __devexit_p(xennet_remove
),
1938 .resume
= netfront_resume
,
1939 .otherend_changed
= netback_changed
,
1942 static int __init
netif_init(void)
1947 if (xen_hvm_domain() && !xen_platform_pci_unplug
)
1950 printk(KERN_INFO
"Initialising Xen virtual ethernet driver.\n");
1952 return xenbus_register_frontend(&netfront_driver
);
1954 module_init(netif_init
);
1957 static void __exit
netif_exit(void)
1959 xenbus_unregister_driver(&netfront_driver
);
1961 module_exit(netif_exit
);
1963 MODULE_DESCRIPTION("Xen virtual network device frontend");
1964 MODULE_LICENSE("GPL");
1965 MODULE_ALIAS("xen:vif");
1966 MODULE_ALIAS("xennet");