2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
48 #include <asm/xen/hypercall.h>
49 #include <asm/xen/page.h>
51 /* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
55 bool separate_tx_rx_irq
= 1;
56 module_param(separate_tx_rx_irq
, bool, 0644);
58 /* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
61 unsigned int rx_drain_timeout_msecs
= 10000;
62 module_param(rx_drain_timeout_msecs
, uint
, 0444);
63 unsigned int rx_drain_timeout_jiffies
;
65 /* The length of time before the frontend is considered unresponsive
66 * because it isn't providing Rx slots.
68 static unsigned int rx_stall_timeout_msecs
= 60000;
69 module_param(rx_stall_timeout_msecs
, uint
, 0444);
70 static unsigned int rx_stall_timeout_jiffies
;
72 unsigned int xenvif_max_queues
;
73 module_param_named(max_queues
, xenvif_max_queues
, uint
, 0644);
74 MODULE_PARM_DESC(max_queues
,
75 "Maximum number of queues per virtual interface");
78 * This is the maximum slots a skb can have. If a guest sends a skb
79 * which exceeds this limit it is considered malicious.
81 #define FATAL_SKB_SLOTS_DEFAULT 20
82 static unsigned int fatal_skb_slots
= FATAL_SKB_SLOTS_DEFAULT
;
83 module_param(fatal_skb_slots
, uint
, 0444);
85 static void xenvif_idx_release(struct xenvif_queue
*queue
, u16 pending_idx
,
88 static void make_tx_response(struct xenvif_queue
*queue
,
89 struct xen_netif_tx_request
*txp
,
92 static inline int tx_work_todo(struct xenvif_queue
*queue
);
94 static struct xen_netif_rx_response
*make_rx_response(struct xenvif_queue
*queue
,
101 static inline unsigned long idx_to_pfn(struct xenvif_queue
*queue
,
104 return page_to_pfn(queue
->mmap_pages
[idx
]);
107 static inline unsigned long idx_to_kaddr(struct xenvif_queue
*queue
,
110 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue
, idx
));
113 #define callback_param(vif, pending_idx) \
114 (vif->pending_tx_info[pending_idx].callback_struct)
116 /* Find the containing VIF's structure from a pointer in pending_tx_info array
118 static inline struct xenvif_queue
*ubuf_to_queue(const struct ubuf_info
*ubuf
)
120 u16 pending_idx
= ubuf
->desc
;
121 struct pending_tx_info
*temp
=
122 container_of(ubuf
, struct pending_tx_info
, callback_struct
);
123 return container_of(temp
- pending_idx
,
128 /* This is a miniumum size for the linear area to avoid lots of
129 * calls to __pskb_pull_tail() as we set up checksum offsets. The
130 * value 128 was chosen as it covers all IPv4 and most likely
133 #define PKT_PROT_LEN 128
135 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
137 return (u16
)frag
->page_offset
;
140 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
142 frag
->page_offset
= pending_idx
;
145 static inline pending_ring_idx_t
pending_index(unsigned i
)
147 return i
& (MAX_PENDING_REQS
-1);
150 bool xenvif_rx_ring_slots_available(struct xenvif_queue
*queue
, int needed
)
155 prod
= queue
->rx
.sring
->req_prod
;
156 cons
= queue
->rx
.req_cons
;
158 if (prod
- cons
>= needed
)
161 queue
->rx
.sring
->req_event
= prod
+ 1;
163 /* Make sure event is visible before we check prod
167 } while (queue
->rx
.sring
->req_prod
!= prod
);
172 void xenvif_rx_queue_tail(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
176 spin_lock_irqsave(&queue
->rx_queue
.lock
, flags
);
178 __skb_queue_tail(&queue
->rx_queue
, skb
);
180 queue
->rx_queue_len
+= skb
->len
;
181 if (queue
->rx_queue_len
> queue
->rx_queue_max
)
182 netif_tx_stop_queue(netdev_get_tx_queue(queue
->vif
->dev
, queue
->id
));
184 spin_unlock_irqrestore(&queue
->rx_queue
.lock
, flags
);
187 static struct sk_buff
*xenvif_rx_dequeue(struct xenvif_queue
*queue
)
191 spin_lock_irq(&queue
->rx_queue
.lock
);
193 skb
= __skb_dequeue(&queue
->rx_queue
);
195 queue
->rx_queue_len
-= skb
->len
;
197 spin_unlock_irq(&queue
->rx_queue
.lock
);
202 static void xenvif_rx_queue_maybe_wake(struct xenvif_queue
*queue
)
204 spin_lock_irq(&queue
->rx_queue
.lock
);
206 if (queue
->rx_queue_len
< queue
->rx_queue_max
)
207 netif_tx_wake_queue(netdev_get_tx_queue(queue
->vif
->dev
, queue
->id
));
209 spin_unlock_irq(&queue
->rx_queue
.lock
);
213 static void xenvif_rx_queue_purge(struct xenvif_queue
*queue
)
216 while ((skb
= xenvif_rx_dequeue(queue
)) != NULL
)
220 static void xenvif_rx_queue_drop_expired(struct xenvif_queue
*queue
)
225 skb
= skb_peek(&queue
->rx_queue
);
228 if (time_before(jiffies
, XENVIF_RX_CB(skb
)->expires
))
230 xenvif_rx_dequeue(queue
);
236 * Returns true if we should start a new receive buffer instead of
237 * adding 'size' bytes to a buffer which currently contains 'offset'
240 static bool start_new_rx_buffer(int offset
, unsigned long size
, int head
,
243 /* simple case: we have completely filled the current buffer. */
244 if (offset
== MAX_BUFFER_OFFSET
)
248 * complex case: start a fresh buffer if the current frag
249 * would overflow the current buffer but only if:
250 * (i) this frag would fit completely in the next buffer
251 * and (ii) there is already some data in the current buffer
252 * and (iii) this is not the head buffer.
253 * and (iv) there is no need to fully utilize the buffers
256 * - (i) stops us splitting a frag into two copies
257 * unless the frag is too large for a single buffer.
258 * - (ii) stops us from leaving a buffer pointlessly empty.
259 * - (iii) stops us leaving the first buffer
260 * empty. Strictly speaking this is already covered
261 * by (ii) but is explicitly checked because
262 * netfront relies on the first buffer being
263 * non-empty and can crash otherwise.
264 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
267 * This means we will effectively linearise small
268 * frags but do not needlessly split large buffers
269 * into multiple copies tend to give large frags their
270 * own buffers as before.
272 BUG_ON(size
> MAX_BUFFER_OFFSET
);
273 if ((offset
+ size
> MAX_BUFFER_OFFSET
) && offset
&& !head
&&
280 struct netrx_pending_operations
{
281 unsigned copy_prod
, copy_cons
;
282 unsigned meta_prod
, meta_cons
;
283 struct gnttab_copy
*copy
;
284 struct xenvif_rx_meta
*meta
;
286 grant_ref_t copy_gref
;
289 static struct xenvif_rx_meta
*get_next_rx_buffer(struct xenvif_queue
*queue
,
290 struct netrx_pending_operations
*npo
)
292 struct xenvif_rx_meta
*meta
;
293 struct xen_netif_rx_request
*req
;
295 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
297 meta
= npo
->meta
+ npo
->meta_prod
++;
298 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
304 npo
->copy_gref
= req
->gref
;
310 * Set up the grant operations for this fragment. If it's a flipping
311 * interface, we also set up the unmap request from here.
313 static void xenvif_gop_frag_copy(struct xenvif_queue
*queue
, struct sk_buff
*skb
,
314 struct netrx_pending_operations
*npo
,
315 struct page
*page
, unsigned long size
,
316 unsigned long offset
, int *head
,
317 struct xenvif_queue
*foreign_queue
,
318 grant_ref_t foreign_gref
)
320 struct gnttab_copy
*copy_gop
;
321 struct xenvif_rx_meta
*meta
;
323 int gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
325 /* Data must not cross a page boundary. */
326 BUG_ON(size
+ offset
> PAGE_SIZE
<<compound_order(page
));
328 meta
= npo
->meta
+ npo
->meta_prod
- 1;
330 /* Skip unused frames from start of page */
331 page
+= offset
>> PAGE_SHIFT
;
332 offset
&= ~PAGE_MASK
;
335 BUG_ON(offset
>= PAGE_SIZE
);
336 BUG_ON(npo
->copy_off
> MAX_BUFFER_OFFSET
);
338 bytes
= PAGE_SIZE
- offset
;
343 if (start_new_rx_buffer(npo
->copy_off
,
346 XENVIF_RX_CB(skb
)->full_coalesce
)) {
348 * Netfront requires there to be some data in the head
353 meta
= get_next_rx_buffer(queue
, npo
);
356 if (npo
->copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
357 bytes
= MAX_BUFFER_OFFSET
- npo
->copy_off
;
359 copy_gop
= npo
->copy
+ npo
->copy_prod
++;
360 copy_gop
->flags
= GNTCOPY_dest_gref
;
361 copy_gop
->len
= bytes
;
364 copy_gop
->source
.domid
= foreign_queue
->vif
->domid
;
365 copy_gop
->source
.u
.ref
= foreign_gref
;
366 copy_gop
->flags
|= GNTCOPY_source_gref
;
368 copy_gop
->source
.domid
= DOMID_SELF
;
369 copy_gop
->source
.u
.gmfn
=
370 virt_to_mfn(page_address(page
));
372 copy_gop
->source
.offset
= offset
;
374 copy_gop
->dest
.domid
= queue
->vif
->domid
;
375 copy_gop
->dest
.offset
= npo
->copy_off
;
376 copy_gop
->dest
.u
.ref
= npo
->copy_gref
;
378 npo
->copy_off
+= bytes
;
385 if (offset
== PAGE_SIZE
&& size
) {
386 BUG_ON(!PageCompound(page
));
391 /* Leave a gap for the GSO descriptor. */
392 if (skb_is_gso(skb
)) {
393 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
394 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
395 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
396 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
399 if (*head
&& ((1 << gso_type
) & queue
->vif
->gso_mask
))
400 queue
->rx
.req_cons
++;
402 *head
= 0; /* There must be something in this buffer now. */
408 * Find the grant ref for a given frag in a chain of struct ubuf_info's
409 * skb: the skb itself
410 * i: the frag's number
411 * ubuf: a pointer to an element in the chain. It should not be NULL
413 * Returns a pointer to the element in the chain where the page were found. If
414 * not found, returns NULL.
415 * See the definition of callback_struct in common.h for more details about
418 static const struct ubuf_info
*xenvif_find_gref(const struct sk_buff
*const skb
,
420 const struct ubuf_info
*ubuf
)
422 struct xenvif_queue
*foreign_queue
= ubuf_to_queue(ubuf
);
425 u16 pending_idx
= ubuf
->desc
;
427 if (skb_shinfo(skb
)->frags
[i
].page
.p
==
428 foreign_queue
->mmap_pages
[pending_idx
])
430 ubuf
= (struct ubuf_info
*) ubuf
->ctx
;
437 * Prepare an SKB to be transmitted to the frontend.
439 * This function is responsible for allocating grant operations, meta
442 * It returns the number of meta structures consumed. The number of
443 * ring slots used is always equal to the number of meta slots used
444 * plus the number of GSO descriptors used. Currently, we use either
445 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
446 * frontend-side LRO).
448 static int xenvif_gop_skb(struct sk_buff
*skb
,
449 struct netrx_pending_operations
*npo
,
450 struct xenvif_queue
*queue
)
452 struct xenvif
*vif
= netdev_priv(skb
->dev
);
453 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
455 struct xen_netif_rx_request
*req
;
456 struct xenvif_rx_meta
*meta
;
461 const struct ubuf_info
*ubuf
= skb_shinfo(skb
)->destructor_arg
;
462 const struct ubuf_info
*const head_ubuf
= ubuf
;
464 old_meta_prod
= npo
->meta_prod
;
466 gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
467 if (skb_is_gso(skb
)) {
468 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
469 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
470 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
471 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
474 /* Set up a GSO prefix descriptor, if necessary */
475 if ((1 << gso_type
) & vif
->gso_prefix_mask
) {
476 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
477 meta
= npo
->meta
+ npo
->meta_prod
++;
478 meta
->gso_type
= gso_type
;
479 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
484 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
485 meta
= npo
->meta
+ npo
->meta_prod
++;
487 if ((1 << gso_type
) & vif
->gso_mask
) {
488 meta
->gso_type
= gso_type
;
489 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
491 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
498 npo
->copy_gref
= req
->gref
;
501 while (data
< skb_tail_pointer(skb
)) {
502 unsigned int offset
= offset_in_page(data
);
503 unsigned int len
= PAGE_SIZE
- offset
;
505 if (data
+ len
> skb_tail_pointer(skb
))
506 len
= skb_tail_pointer(skb
) - data
;
508 xenvif_gop_frag_copy(queue
, skb
, npo
,
509 virt_to_page(data
), len
, offset
, &head
,
515 for (i
= 0; i
< nr_frags
; i
++) {
516 /* This variable also signals whether foreign_gref has a real
519 struct xenvif_queue
*foreign_queue
= NULL
;
520 grant_ref_t foreign_gref
;
522 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_DEV_ZEROCOPY
) &&
523 (ubuf
->callback
== &xenvif_zerocopy_callback
)) {
524 const struct ubuf_info
*const startpoint
= ubuf
;
526 /* Ideally ubuf points to the chain element which
527 * belongs to this frag. Or if frags were removed from
528 * the beginning, then shortly before it.
530 ubuf
= xenvif_find_gref(skb
, i
, ubuf
);
532 /* Try again from the beginning of the list, if we
533 * haven't tried from there. This only makes sense in
534 * the unlikely event of reordering the original frags.
535 * For injected local pages it's an unnecessary second
538 if (unlikely(!ubuf
) && startpoint
!= head_ubuf
)
539 ubuf
= xenvif_find_gref(skb
, i
, head_ubuf
);
542 u16 pending_idx
= ubuf
->desc
;
544 foreign_queue
= ubuf_to_queue(ubuf
);
546 foreign_queue
->pending_tx_info
[pending_idx
].req
.gref
;
547 /* Just a safety measure. If this was the last
548 * element on the list, the for loop will
549 * iterate again if a local page were added to
550 * the end. Using head_ubuf here prevents the
551 * second search on the chain. Or the original
552 * frags changed order, but that's less likely.
553 * In any way, ubuf shouldn't be NULL.
556 (struct ubuf_info
*) ubuf
->ctx
:
559 /* This frag was a local page, added to the
560 * array after the skb left netback.
564 xenvif_gop_frag_copy(queue
, skb
, npo
,
565 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
566 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
567 skb_shinfo(skb
)->frags
[i
].page_offset
,
570 foreign_queue
? foreign_gref
: UINT_MAX
);
573 return npo
->meta_prod
- old_meta_prod
;
577 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
578 * used to set up the operations on the top of
579 * netrx_pending_operations, which have since been done. Check that
580 * they didn't give any errors and advance over them.
582 static int xenvif_check_gop(struct xenvif
*vif
, int nr_meta_slots
,
583 struct netrx_pending_operations
*npo
)
585 struct gnttab_copy
*copy_op
;
586 int status
= XEN_NETIF_RSP_OKAY
;
589 for (i
= 0; i
< nr_meta_slots
; i
++) {
590 copy_op
= npo
->copy
+ npo
->copy_cons
++;
591 if (copy_op
->status
!= GNTST_okay
) {
593 "Bad status %d from copy to DOM%d.\n",
594 copy_op
->status
, vif
->domid
);
595 status
= XEN_NETIF_RSP_ERROR
;
602 static void xenvif_add_frag_responses(struct xenvif_queue
*queue
, int status
,
603 struct xenvif_rx_meta
*meta
,
607 unsigned long offset
;
609 /* No fragments used */
610 if (nr_meta_slots
<= 1)
615 for (i
= 0; i
< nr_meta_slots
; i
++) {
617 if (i
== nr_meta_slots
- 1)
620 flags
= XEN_NETRXF_more_data
;
623 make_rx_response(queue
, meta
[i
].id
, status
, offset
,
624 meta
[i
].size
, flags
);
628 void xenvif_kick_thread(struct xenvif_queue
*queue
)
633 static void xenvif_rx_action(struct xenvif_queue
*queue
)
637 struct xen_netif_rx_response
*resp
;
638 struct sk_buff_head rxq
;
642 unsigned long offset
;
643 bool need_to_notify
= false;
645 struct netrx_pending_operations npo
= {
646 .copy
= queue
->grant_copy_op
,
650 skb_queue_head_init(&rxq
);
652 while (xenvif_rx_ring_slots_available(queue
, XEN_NETBK_RX_SLOTS_MAX
)
653 && (skb
= xenvif_rx_dequeue(queue
)) != NULL
) {
654 RING_IDX max_slots_needed
;
655 RING_IDX old_req_cons
;
656 RING_IDX ring_slots_used
;
659 queue
->last_rx_time
= jiffies
;
661 /* We need a cheap worse case estimate for the number of
665 max_slots_needed
= DIV_ROUND_UP(offset_in_page(skb
->data
) +
668 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
672 size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
673 offset
= skb_shinfo(skb
)->frags
[i
].page_offset
;
675 /* For a worse-case estimate we need to factor in
676 * the fragment page offset as this will affect the
677 * number of times xenvif_gop_frag_copy() will
678 * call start_new_rx_buffer().
680 max_slots_needed
+= DIV_ROUND_UP(offset
+ size
,
684 /* To avoid the estimate becoming too pessimal for some
685 * frontends that limit posted rx requests, cap the estimate
686 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
687 * the skb into the provided slots.
689 if (max_slots_needed
> MAX_SKB_FRAGS
) {
690 max_slots_needed
= MAX_SKB_FRAGS
;
691 XENVIF_RX_CB(skb
)->full_coalesce
= true;
693 XENVIF_RX_CB(skb
)->full_coalesce
= false;
696 /* We may need one more slot for GSO metadata */
697 if (skb_is_gso(skb
) &&
698 (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
||
699 skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
))
702 old_req_cons
= queue
->rx
.req_cons
;
703 XENVIF_RX_CB(skb
)->meta_slots_used
= xenvif_gop_skb(skb
, &npo
, queue
);
704 ring_slots_used
= queue
->rx
.req_cons
- old_req_cons
;
706 BUG_ON(ring_slots_used
> max_slots_needed
);
708 __skb_queue_tail(&rxq
, skb
);
711 BUG_ON(npo
.meta_prod
> ARRAY_SIZE(queue
->meta
));
716 BUG_ON(npo
.copy_prod
> MAX_GRANT_COPY_OPS
);
717 gnttab_batch_copy(queue
->grant_copy_op
, npo
.copy_prod
);
719 while ((skb
= __skb_dequeue(&rxq
)) != NULL
) {
721 if ((1 << queue
->meta
[npo
.meta_cons
].gso_type
) &
722 queue
->vif
->gso_prefix_mask
) {
723 resp
= RING_GET_RESPONSE(&queue
->rx
,
724 queue
->rx
.rsp_prod_pvt
++);
726 resp
->flags
= XEN_NETRXF_gso_prefix
| XEN_NETRXF_more_data
;
728 resp
->offset
= queue
->meta
[npo
.meta_cons
].gso_size
;
729 resp
->id
= queue
->meta
[npo
.meta_cons
].id
;
730 resp
->status
= XENVIF_RX_CB(skb
)->meta_slots_used
;
733 XENVIF_RX_CB(skb
)->meta_slots_used
--;
737 queue
->stats
.tx_bytes
+= skb
->len
;
738 queue
->stats
.tx_packets
++;
740 status
= xenvif_check_gop(queue
->vif
,
741 XENVIF_RX_CB(skb
)->meta_slots_used
,
744 if (XENVIF_RX_CB(skb
)->meta_slots_used
== 1)
747 flags
= XEN_NETRXF_more_data
;
749 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) /* local packet? */
750 flags
|= XEN_NETRXF_csum_blank
| XEN_NETRXF_data_validated
;
751 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
752 /* remote but checksummed. */
753 flags
|= XEN_NETRXF_data_validated
;
756 resp
= make_rx_response(queue
, queue
->meta
[npo
.meta_cons
].id
,
758 queue
->meta
[npo
.meta_cons
].size
,
761 if ((1 << queue
->meta
[npo
.meta_cons
].gso_type
) &
762 queue
->vif
->gso_mask
) {
763 struct xen_netif_extra_info
*gso
=
764 (struct xen_netif_extra_info
*)
765 RING_GET_RESPONSE(&queue
->rx
,
766 queue
->rx
.rsp_prod_pvt
++);
768 resp
->flags
|= XEN_NETRXF_extra_info
;
770 gso
->u
.gso
.type
= queue
->meta
[npo
.meta_cons
].gso_type
;
771 gso
->u
.gso
.size
= queue
->meta
[npo
.meta_cons
].gso_size
;
773 gso
->u
.gso
.features
= 0;
775 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
779 xenvif_add_frag_responses(queue
, status
,
780 queue
->meta
+ npo
.meta_cons
+ 1,
781 XENVIF_RX_CB(skb
)->meta_slots_used
);
783 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue
->rx
, ret
);
785 need_to_notify
|= !!ret
;
787 npo
.meta_cons
+= XENVIF_RX_CB(skb
)->meta_slots_used
;
793 notify_remote_via_irq(queue
->rx_irq
);
796 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue
*queue
)
800 RING_FINAL_CHECK_FOR_REQUESTS(&queue
->tx
, more_to_do
);
803 napi_schedule(&queue
->napi
);
806 static void tx_add_credit(struct xenvif_queue
*queue
)
808 unsigned long max_burst
, max_credit
;
811 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
812 * Otherwise the interface can seize up due to insufficient credit.
814 max_burst
= RING_GET_REQUEST(&queue
->tx
, queue
->tx
.req_cons
)->size
;
815 max_burst
= min(max_burst
, 131072UL);
816 max_burst
= max(max_burst
, queue
->credit_bytes
);
818 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
819 max_credit
= queue
->remaining_credit
+ queue
->credit_bytes
;
820 if (max_credit
< queue
->remaining_credit
)
821 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
823 queue
->remaining_credit
= min(max_credit
, max_burst
);
826 static void tx_credit_callback(unsigned long data
)
828 struct xenvif_queue
*queue
= (struct xenvif_queue
*)data
;
829 tx_add_credit(queue
);
830 xenvif_napi_schedule_or_enable_events(queue
);
833 static void xenvif_tx_err(struct xenvif_queue
*queue
,
834 struct xen_netif_tx_request
*txp
, RING_IDX end
)
836 RING_IDX cons
= queue
->tx
.req_cons
;
840 spin_lock_irqsave(&queue
->response_lock
, flags
);
841 make_tx_response(queue
, txp
, XEN_NETIF_RSP_ERROR
);
842 spin_unlock_irqrestore(&queue
->response_lock
, flags
);
845 txp
= RING_GET_REQUEST(&queue
->tx
, cons
++);
847 queue
->tx
.req_cons
= cons
;
850 static void xenvif_fatal_tx_err(struct xenvif
*vif
)
852 netdev_err(vif
->dev
, "fatal error; disabling device\n");
853 vif
->disabled
= true;
854 /* Disable the vif from queue 0's kthread */
856 xenvif_kick_thread(&vif
->queues
[0]);
859 static int xenvif_count_requests(struct xenvif_queue
*queue
,
860 struct xen_netif_tx_request
*first
,
861 struct xen_netif_tx_request
*txp
,
864 RING_IDX cons
= queue
->tx
.req_cons
;
869 if (!(first
->flags
& XEN_NETTXF_more_data
))
873 struct xen_netif_tx_request dropped_tx
= { 0 };
875 if (slots
>= work_to_do
) {
876 netdev_err(queue
->vif
->dev
,
877 "Asked for %d slots but exceeds this limit\n",
879 xenvif_fatal_tx_err(queue
->vif
);
883 /* This guest is really using too many slots and
884 * considered malicious.
886 if (unlikely(slots
>= fatal_skb_slots
)) {
887 netdev_err(queue
->vif
->dev
,
888 "Malicious frontend using %d slots, threshold %u\n",
889 slots
, fatal_skb_slots
);
890 xenvif_fatal_tx_err(queue
->vif
);
894 /* Xen network protocol had implicit dependency on
895 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
896 * the historical MAX_SKB_FRAGS value 18 to honor the
897 * same behavior as before. Any packet using more than
898 * 18 slots but less than fatal_skb_slots slots is
901 if (!drop_err
&& slots
>= XEN_NETBK_LEGACY_SLOTS_MAX
) {
903 netdev_dbg(queue
->vif
->dev
,
904 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
905 slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
912 memcpy(txp
, RING_GET_REQUEST(&queue
->tx
, cons
+ slots
),
915 /* If the guest submitted a frame >= 64 KiB then
916 * first->size overflowed and following slots will
917 * appear to be larger than the frame.
919 * This cannot be fatal error as there are buggy
920 * frontends that do this.
922 * Consume all slots and drop the packet.
924 if (!drop_err
&& txp
->size
> first
->size
) {
926 netdev_dbg(queue
->vif
->dev
,
927 "Invalid tx request, slot size %u > remaining size %u\n",
928 txp
->size
, first
->size
);
932 first
->size
-= txp
->size
;
935 if (unlikely((txp
->offset
+ txp
->size
) > PAGE_SIZE
)) {
936 netdev_err(queue
->vif
->dev
, "Cross page boundary, txp->offset: %x, size: %u\n",
937 txp
->offset
, txp
->size
);
938 xenvif_fatal_tx_err(queue
->vif
);
942 more_data
= txp
->flags
& XEN_NETTXF_more_data
;
950 xenvif_tx_err(queue
, first
, cons
+ slots
);
958 struct xenvif_tx_cb
{
962 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
964 static inline void xenvif_tx_create_map_op(struct xenvif_queue
*queue
,
966 struct xen_netif_tx_request
*txp
,
967 struct gnttab_map_grant_ref
*mop
)
969 queue
->pages_to_map
[mop
-queue
->tx_map_ops
] = queue
->mmap_pages
[pending_idx
];
970 gnttab_set_map_op(mop
, idx_to_kaddr(queue
, pending_idx
),
971 GNTMAP_host_map
| GNTMAP_readonly
,
972 txp
->gref
, queue
->vif
->domid
);
974 memcpy(&queue
->pending_tx_info
[pending_idx
].req
, txp
,
978 static inline struct sk_buff
*xenvif_alloc_skb(unsigned int size
)
980 struct sk_buff
*skb
=
981 alloc_skb(size
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
982 GFP_ATOMIC
| __GFP_NOWARN
);
983 if (unlikely(skb
== NULL
))
986 /* Packets passed to netif_rx() must have some headroom. */
987 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
989 /* Initialize it here to avoid later surprises */
990 skb_shinfo(skb
)->destructor_arg
= NULL
;
995 static struct gnttab_map_grant_ref
*xenvif_get_requests(struct xenvif_queue
*queue
,
997 struct xen_netif_tx_request
*txp
,
998 struct gnttab_map_grant_ref
*gop
)
1000 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1001 skb_frag_t
*frags
= shinfo
->frags
;
1002 u16 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
1004 pending_ring_idx_t index
;
1005 unsigned int nr_slots
, frag_overflow
= 0;
1007 /* At this point shinfo->nr_frags is in fact the number of
1008 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1010 if (shinfo
->nr_frags
> MAX_SKB_FRAGS
) {
1011 frag_overflow
= shinfo
->nr_frags
- MAX_SKB_FRAGS
;
1012 BUG_ON(frag_overflow
> MAX_SKB_FRAGS
);
1013 shinfo
->nr_frags
= MAX_SKB_FRAGS
;
1015 nr_slots
= shinfo
->nr_frags
;
1017 /* Skip first skb fragment if it is on same page as header fragment. */
1018 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
1020 for (shinfo
->nr_frags
= start
; shinfo
->nr_frags
< nr_slots
;
1021 shinfo
->nr_frags
++, txp
++, gop
++) {
1022 index
= pending_index(queue
->pending_cons
++);
1023 pending_idx
= queue
->pending_ring
[index
];
1024 xenvif_tx_create_map_op(queue
, pending_idx
, txp
, gop
);
1025 frag_set_pending_idx(&frags
[shinfo
->nr_frags
], pending_idx
);
1028 if (frag_overflow
) {
1029 struct sk_buff
*nskb
= xenvif_alloc_skb(0);
1030 if (unlikely(nskb
== NULL
)) {
1031 if (net_ratelimit())
1032 netdev_err(queue
->vif
->dev
,
1033 "Can't allocate the frag_list skb.\n");
1037 shinfo
= skb_shinfo(nskb
);
1038 frags
= shinfo
->frags
;
1040 for (shinfo
->nr_frags
= 0; shinfo
->nr_frags
< frag_overflow
;
1041 shinfo
->nr_frags
++, txp
++, gop
++) {
1042 index
= pending_index(queue
->pending_cons
++);
1043 pending_idx
= queue
->pending_ring
[index
];
1044 xenvif_tx_create_map_op(queue
, pending_idx
, txp
, gop
);
1045 frag_set_pending_idx(&frags
[shinfo
->nr_frags
],
1049 skb_shinfo(skb
)->frag_list
= nskb
;
1055 static inline void xenvif_grant_handle_set(struct xenvif_queue
*queue
,
1057 grant_handle_t handle
)
1059 if (unlikely(queue
->grant_tx_handle
[pending_idx
] !=
1060 NETBACK_INVALID_HANDLE
)) {
1061 netdev_err(queue
->vif
->dev
,
1062 "Trying to overwrite active handle! pending_idx: %x\n",
1066 queue
->grant_tx_handle
[pending_idx
] = handle
;
1069 static inline void xenvif_grant_handle_reset(struct xenvif_queue
*queue
,
1072 if (unlikely(queue
->grant_tx_handle
[pending_idx
] ==
1073 NETBACK_INVALID_HANDLE
)) {
1074 netdev_err(queue
->vif
->dev
,
1075 "Trying to unmap invalid handle! pending_idx: %x\n",
1079 queue
->grant_tx_handle
[pending_idx
] = NETBACK_INVALID_HANDLE
;
1082 static int xenvif_tx_check_gop(struct xenvif_queue
*queue
,
1083 struct sk_buff
*skb
,
1084 struct gnttab_map_grant_ref
**gopp_map
,
1085 struct gnttab_copy
**gopp_copy
)
1087 struct gnttab_map_grant_ref
*gop_map
= *gopp_map
;
1088 u16 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
1089 /* This always points to the shinfo of the skb being checked, which
1090 * could be either the first or the one on the frag_list
1092 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1093 /* If this is non-NULL, we are currently checking the frag_list skb, and
1094 * this points to the shinfo of the first one
1096 struct skb_shared_info
*first_shinfo
= NULL
;
1097 int nr_frags
= shinfo
->nr_frags
;
1098 const bool sharedslot
= nr_frags
&&
1099 frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
;
1102 /* Check status of header. */
1103 err
= (*gopp_copy
)->status
;
1104 if (unlikely(err
)) {
1105 if (net_ratelimit())
1106 netdev_dbg(queue
->vif
->dev
,
1107 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
1108 (*gopp_copy
)->status
,
1110 (*gopp_copy
)->source
.u
.ref
);
1111 /* The first frag might still have this slot mapped */
1113 xenvif_idx_release(queue
, pending_idx
,
1114 XEN_NETIF_RSP_ERROR
);
1119 for (i
= 0; i
< nr_frags
; i
++, gop_map
++) {
1122 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
1124 /* Check error status: if okay then remember grant handle. */
1125 newerr
= gop_map
->status
;
1127 if (likely(!newerr
)) {
1128 xenvif_grant_handle_set(queue
,
1131 /* Had a previous error? Invalidate this fragment. */
1132 if (unlikely(err
)) {
1133 xenvif_idx_unmap(queue
, pending_idx
);
1134 /* If the mapping of the first frag was OK, but
1135 * the header's copy failed, and they are
1136 * sharing a slot, send an error
1138 if (i
== 0 && sharedslot
)
1139 xenvif_idx_release(queue
, pending_idx
,
1140 XEN_NETIF_RSP_ERROR
);
1142 xenvif_idx_release(queue
, pending_idx
,
1143 XEN_NETIF_RSP_OKAY
);
1148 /* Error on this fragment: respond to client with an error. */
1149 if (net_ratelimit())
1150 netdev_dbg(queue
->vif
->dev
,
1151 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1157 xenvif_idx_release(queue
, pending_idx
, XEN_NETIF_RSP_ERROR
);
1159 /* Not the first error? Preceding frags already invalidated. */
1163 /* First error: if the header haven't shared a slot with the
1164 * first frag, release it as well.
1167 xenvif_idx_release(queue
,
1168 XENVIF_TX_CB(skb
)->pending_idx
,
1169 XEN_NETIF_RSP_OKAY
);
1171 /* Invalidate preceding fragments of this skb. */
1172 for (j
= 0; j
< i
; j
++) {
1173 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
1174 xenvif_idx_unmap(queue
, pending_idx
);
1175 xenvif_idx_release(queue
, pending_idx
,
1176 XEN_NETIF_RSP_OKAY
);
1179 /* And if we found the error while checking the frag_list, unmap
1180 * the first skb's frags
1183 for (j
= 0; j
< first_shinfo
->nr_frags
; j
++) {
1184 pending_idx
= frag_get_pending_idx(&first_shinfo
->frags
[j
]);
1185 xenvif_idx_unmap(queue
, pending_idx
);
1186 xenvif_idx_release(queue
, pending_idx
,
1187 XEN_NETIF_RSP_OKAY
);
1191 /* Remember the error: invalidate all subsequent fragments. */
1195 if (skb_has_frag_list(skb
) && !first_shinfo
) {
1196 first_shinfo
= skb_shinfo(skb
);
1197 shinfo
= skb_shinfo(skb_shinfo(skb
)->frag_list
);
1198 nr_frags
= shinfo
->nr_frags
;
1203 *gopp_map
= gop_map
;
1207 static void xenvif_fill_frags(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1209 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1210 int nr_frags
= shinfo
->nr_frags
;
1212 u16 prev_pending_idx
= INVALID_PENDING_IDX
;
1214 for (i
= 0; i
< nr_frags
; i
++) {
1215 skb_frag_t
*frag
= shinfo
->frags
+ i
;
1216 struct xen_netif_tx_request
*txp
;
1220 pending_idx
= frag_get_pending_idx(frag
);
1222 /* If this is not the first frag, chain it to the previous*/
1223 if (prev_pending_idx
== INVALID_PENDING_IDX
)
1224 skb_shinfo(skb
)->destructor_arg
=
1225 &callback_param(queue
, pending_idx
);
1227 callback_param(queue
, prev_pending_idx
).ctx
=
1228 &callback_param(queue
, pending_idx
);
1230 callback_param(queue
, pending_idx
).ctx
= NULL
;
1231 prev_pending_idx
= pending_idx
;
1233 txp
= &queue
->pending_tx_info
[pending_idx
].req
;
1234 page
= virt_to_page(idx_to_kaddr(queue
, pending_idx
));
1235 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
1236 skb
->len
+= txp
->size
;
1237 skb
->data_len
+= txp
->size
;
1238 skb
->truesize
+= txp
->size
;
1240 /* Take an extra reference to offset network stack's put_page */
1241 get_page(queue
->mmap_pages
[pending_idx
]);
1243 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1244 * overlaps with "index", and "mapping" is not set. I think mapping
1245 * should be set. If delivered to local stack, it would drop this
1246 * skb in sk_filter unless the socket has the right to use it.
1248 skb
->pfmemalloc
= false;
1251 static int xenvif_get_extras(struct xenvif_queue
*queue
,
1252 struct xen_netif_extra_info
*extras
,
1255 struct xen_netif_extra_info extra
;
1256 RING_IDX cons
= queue
->tx
.req_cons
;
1259 if (unlikely(work_to_do
-- <= 0)) {
1260 netdev_err(queue
->vif
->dev
, "Missing extra info\n");
1261 xenvif_fatal_tx_err(queue
->vif
);
1265 memcpy(&extra
, RING_GET_REQUEST(&queue
->tx
, cons
),
1267 if (unlikely(!extra
.type
||
1268 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
1269 queue
->tx
.req_cons
= ++cons
;
1270 netdev_err(queue
->vif
->dev
,
1271 "Invalid extra type: %d\n", extra
.type
);
1272 xenvif_fatal_tx_err(queue
->vif
);
1276 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
1277 queue
->tx
.req_cons
= ++cons
;
1278 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
1283 static int xenvif_set_skb_gso(struct xenvif
*vif
,
1284 struct sk_buff
*skb
,
1285 struct xen_netif_extra_info
*gso
)
1287 if (!gso
->u
.gso
.size
) {
1288 netdev_err(vif
->dev
, "GSO size must not be zero.\n");
1289 xenvif_fatal_tx_err(vif
);
1293 switch (gso
->u
.gso
.type
) {
1294 case XEN_NETIF_GSO_TYPE_TCPV4
:
1295 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1297 case XEN_NETIF_GSO_TYPE_TCPV6
:
1298 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1301 netdev_err(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
1302 xenvif_fatal_tx_err(vif
);
1306 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1307 /* gso_segs will be calculated later */
1312 static int checksum_setup(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1314 bool recalculate_partial_csum
= false;
1316 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1317 * peers can fail to set NETRXF_csum_blank when sending a GSO
1318 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1319 * recalculate the partial checksum.
1321 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1322 queue
->stats
.rx_gso_checksum_fixup
++;
1323 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1324 recalculate_partial_csum
= true;
1327 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1328 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1331 return skb_checksum_setup(skb
, recalculate_partial_csum
);
1334 static bool tx_credit_exceeded(struct xenvif_queue
*queue
, unsigned size
)
1336 u64 now
= get_jiffies_64();
1337 u64 next_credit
= queue
->credit_window_start
+
1338 msecs_to_jiffies(queue
->credit_usec
/ 1000);
1340 /* Timer could already be pending in rare cases. */
1341 if (timer_pending(&queue
->credit_timeout
))
1344 /* Passed the point where we can replenish credit? */
1345 if (time_after_eq64(now
, next_credit
)) {
1346 queue
->credit_window_start
= now
;
1347 tx_add_credit(queue
);
1350 /* Still too big to send right now? Set a callback. */
1351 if (size
> queue
->remaining_credit
) {
1352 queue
->credit_timeout
.data
=
1353 (unsigned long)queue
;
1354 queue
->credit_timeout
.function
=
1356 mod_timer(&queue
->credit_timeout
,
1358 queue
->credit_window_start
= next_credit
;
1366 static void xenvif_tx_build_gops(struct xenvif_queue
*queue
,
1371 struct gnttab_map_grant_ref
*gop
= queue
->tx_map_ops
, *request_gop
;
1372 struct sk_buff
*skb
;
1375 while (skb_queue_len(&queue
->tx_queue
) < budget
) {
1376 struct xen_netif_tx_request txreq
;
1377 struct xen_netif_tx_request txfrags
[XEN_NETBK_LEGACY_SLOTS_MAX
];
1378 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
1382 unsigned int data_len
;
1383 pending_ring_idx_t index
;
1385 if (queue
->tx
.sring
->req_prod
- queue
->tx
.req_cons
>
1386 XEN_NETIF_TX_RING_SIZE
) {
1387 netdev_err(queue
->vif
->dev
,
1388 "Impossible number of requests. "
1389 "req_prod %d, req_cons %d, size %ld\n",
1390 queue
->tx
.sring
->req_prod
, queue
->tx
.req_cons
,
1391 XEN_NETIF_TX_RING_SIZE
);
1392 xenvif_fatal_tx_err(queue
->vif
);
1396 work_to_do
= RING_HAS_UNCONSUMED_REQUESTS(&queue
->tx
);
1400 idx
= queue
->tx
.req_cons
;
1401 rmb(); /* Ensure that we see the request before we copy it. */
1402 memcpy(&txreq
, RING_GET_REQUEST(&queue
->tx
, idx
), sizeof(txreq
));
1404 /* Credit-based scheduling. */
1405 if (txreq
.size
> queue
->remaining_credit
&&
1406 tx_credit_exceeded(queue
, txreq
.size
))
1409 queue
->remaining_credit
-= txreq
.size
;
1412 queue
->tx
.req_cons
= ++idx
;
1414 memset(extras
, 0, sizeof(extras
));
1415 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
1416 work_to_do
= xenvif_get_extras(queue
, extras
,
1418 idx
= queue
->tx
.req_cons
;
1419 if (unlikely(work_to_do
< 0))
1423 ret
= xenvif_count_requests(queue
, &txreq
, txfrags
, work_to_do
);
1424 if (unlikely(ret
< 0))
1429 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1430 netdev_dbg(queue
->vif
->dev
,
1431 "Bad packet size: %d\n", txreq
.size
);
1432 xenvif_tx_err(queue
, &txreq
, idx
);
1436 /* No crossing a page as the payload mustn't fragment. */
1437 if (unlikely((txreq
.offset
+ txreq
.size
) > PAGE_SIZE
)) {
1438 netdev_err(queue
->vif
->dev
,
1439 "txreq.offset: %x, size: %u, end: %lu\n",
1440 txreq
.offset
, txreq
.size
,
1441 (txreq
.offset
&~PAGE_MASK
) + txreq
.size
);
1442 xenvif_fatal_tx_err(queue
->vif
);
1446 index
= pending_index(queue
->pending_cons
);
1447 pending_idx
= queue
->pending_ring
[index
];
1449 data_len
= (txreq
.size
> PKT_PROT_LEN
&&
1450 ret
< XEN_NETBK_LEGACY_SLOTS_MAX
) ?
1451 PKT_PROT_LEN
: txreq
.size
;
1453 skb
= xenvif_alloc_skb(data_len
);
1454 if (unlikely(skb
== NULL
)) {
1455 netdev_dbg(queue
->vif
->dev
,
1456 "Can't allocate a skb in start_xmit.\n");
1457 xenvif_tx_err(queue
, &txreq
, idx
);
1461 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1462 struct xen_netif_extra_info
*gso
;
1463 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1465 if (xenvif_set_skb_gso(queue
->vif
, skb
, gso
)) {
1466 /* Failure in xenvif_set_skb_gso is fatal. */
1472 XENVIF_TX_CB(skb
)->pending_idx
= pending_idx
;
1474 __skb_put(skb
, data_len
);
1475 queue
->tx_copy_ops
[*copy_ops
].source
.u
.ref
= txreq
.gref
;
1476 queue
->tx_copy_ops
[*copy_ops
].source
.domid
= queue
->vif
->domid
;
1477 queue
->tx_copy_ops
[*copy_ops
].source
.offset
= txreq
.offset
;
1479 queue
->tx_copy_ops
[*copy_ops
].dest
.u
.gmfn
=
1480 virt_to_mfn(skb
->data
);
1481 queue
->tx_copy_ops
[*copy_ops
].dest
.domid
= DOMID_SELF
;
1482 queue
->tx_copy_ops
[*copy_ops
].dest
.offset
=
1483 offset_in_page(skb
->data
);
1485 queue
->tx_copy_ops
[*copy_ops
].len
= data_len
;
1486 queue
->tx_copy_ops
[*copy_ops
].flags
= GNTCOPY_source_gref
;
1490 skb_shinfo(skb
)->nr_frags
= ret
;
1491 if (data_len
< txreq
.size
) {
1492 skb_shinfo(skb
)->nr_frags
++;
1493 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1495 xenvif_tx_create_map_op(queue
, pending_idx
, &txreq
, gop
);
1498 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1499 INVALID_PENDING_IDX
);
1500 memcpy(&queue
->pending_tx_info
[pending_idx
].req
, &txreq
,
1504 queue
->pending_cons
++;
1506 request_gop
= xenvif_get_requests(queue
, skb
, txfrags
, gop
);
1507 if (request_gop
== NULL
) {
1509 xenvif_tx_err(queue
, &txreq
, idx
);
1514 __skb_queue_tail(&queue
->tx_queue
, skb
);
1516 queue
->tx
.req_cons
= idx
;
1518 if (((gop
-queue
->tx_map_ops
) >= ARRAY_SIZE(queue
->tx_map_ops
)) ||
1519 (*copy_ops
>= ARRAY_SIZE(queue
->tx_copy_ops
)))
1523 (*map_ops
) = gop
- queue
->tx_map_ops
;
1527 /* Consolidate skb with a frag_list into a brand new one with local pages on
1528 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1530 static int xenvif_handle_frag_list(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1532 unsigned int offset
= skb_headlen(skb
);
1533 skb_frag_t frags
[MAX_SKB_FRAGS
];
1535 struct ubuf_info
*uarg
;
1536 struct sk_buff
*nskb
= skb_shinfo(skb
)->frag_list
;
1538 queue
->stats
.tx_zerocopy_sent
+= 2;
1539 queue
->stats
.tx_frag_overflow
++;
1541 xenvif_fill_frags(queue
, nskb
);
1542 /* Subtract frags size, we will correct it later */
1543 skb
->truesize
-= skb
->data_len
;
1544 skb
->len
+= nskb
->len
;
1545 skb
->data_len
+= nskb
->len
;
1547 /* create a brand new frags array and coalesce there */
1548 for (i
= 0; offset
< skb
->len
; i
++) {
1552 BUG_ON(i
>= MAX_SKB_FRAGS
);
1553 page
= alloc_page(GFP_ATOMIC
|__GFP_COLD
);
1556 skb
->truesize
+= skb
->data_len
;
1557 for (j
= 0; j
< i
; j
++)
1558 put_page(frags
[j
].page
.p
);
1562 if (offset
+ PAGE_SIZE
< skb
->len
)
1565 len
= skb
->len
- offset
;
1566 if (skb_copy_bits(skb
, offset
, page_address(page
), len
))
1570 frags
[i
].page
.p
= page
;
1571 frags
[i
].page_offset
= 0;
1572 skb_frag_size_set(&frags
[i
], len
);
1574 /* swap out with old one */
1575 memcpy(skb_shinfo(skb
)->frags
,
1577 i
* sizeof(skb_frag_t
));
1578 skb_shinfo(skb
)->nr_frags
= i
;
1579 skb
->truesize
+= i
* PAGE_SIZE
;
1581 /* remove traces of mapped pages and frag_list */
1582 skb_frag_list_init(skb
);
1583 uarg
= skb_shinfo(skb
)->destructor_arg
;
1584 /* increase inflight counter to offset decrement in callback */
1585 atomic_inc(&queue
->inflight_packets
);
1586 uarg
->callback(uarg
, true);
1587 skb_shinfo(skb
)->destructor_arg
= NULL
;
1589 xenvif_skb_zerocopy_prepare(queue
, nskb
);
1595 static int xenvif_tx_submit(struct xenvif_queue
*queue
)
1597 struct gnttab_map_grant_ref
*gop_map
= queue
->tx_map_ops
;
1598 struct gnttab_copy
*gop_copy
= queue
->tx_copy_ops
;
1599 struct sk_buff
*skb
;
1602 while ((skb
= __skb_dequeue(&queue
->tx_queue
)) != NULL
) {
1603 struct xen_netif_tx_request
*txp
;
1607 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
1608 txp
= &queue
->pending_tx_info
[pending_idx
].req
;
1610 /* Check the remap error code. */
1611 if (unlikely(xenvif_tx_check_gop(queue
, skb
, &gop_map
, &gop_copy
))) {
1612 /* If there was an error, xenvif_tx_check_gop is
1613 * expected to release all the frags which were mapped,
1614 * so kfree_skb shouldn't do it again
1616 skb_shinfo(skb
)->nr_frags
= 0;
1617 if (skb_has_frag_list(skb
)) {
1618 struct sk_buff
*nskb
=
1619 skb_shinfo(skb
)->frag_list
;
1620 skb_shinfo(nskb
)->nr_frags
= 0;
1626 data_len
= skb
->len
;
1627 callback_param(queue
, pending_idx
).ctx
= NULL
;
1628 if (data_len
< txp
->size
) {
1629 /* Append the packet payload as a fragment. */
1630 txp
->offset
+= data_len
;
1631 txp
->size
-= data_len
;
1633 /* Schedule a response immediately. */
1634 xenvif_idx_release(queue
, pending_idx
,
1635 XEN_NETIF_RSP_OKAY
);
1638 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1639 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1640 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1641 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1643 xenvif_fill_frags(queue
, skb
);
1645 if (unlikely(skb_has_frag_list(skb
))) {
1646 if (xenvif_handle_frag_list(queue
, skb
)) {
1647 if (net_ratelimit())
1648 netdev_err(queue
->vif
->dev
,
1649 "Not enough memory to consolidate frag_list!\n");
1650 xenvif_skb_zerocopy_prepare(queue
, skb
);
1656 if (skb_is_nonlinear(skb
) && skb_headlen(skb
) < PKT_PROT_LEN
) {
1657 int target
= min_t(int, skb
->len
, PKT_PROT_LEN
);
1658 __pskb_pull_tail(skb
, target
- skb_headlen(skb
));
1661 skb
->dev
= queue
->vif
->dev
;
1662 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1663 skb_reset_network_header(skb
);
1665 if (checksum_setup(queue
, skb
)) {
1666 netdev_dbg(queue
->vif
->dev
,
1667 "Can't setup checksum in net_tx_action\n");
1668 /* We have to set this flag to trigger the callback */
1669 if (skb_shinfo(skb
)->destructor_arg
)
1670 xenvif_skb_zerocopy_prepare(queue
, skb
);
1675 skb_probe_transport_header(skb
, 0);
1677 /* If the packet is GSO then we will have just set up the
1678 * transport header offset in checksum_setup so it's now
1679 * straightforward to calculate gso_segs.
1681 if (skb_is_gso(skb
)) {
1682 int mss
= skb_shinfo(skb
)->gso_size
;
1683 int hdrlen
= skb_transport_header(skb
) -
1684 skb_mac_header(skb
) +
1687 skb_shinfo(skb
)->gso_segs
=
1688 DIV_ROUND_UP(skb
->len
- hdrlen
, mss
);
1691 queue
->stats
.rx_bytes
+= skb
->len
;
1692 queue
->stats
.rx_packets
++;
1696 /* Set this flag right before netif_receive_skb, otherwise
1697 * someone might think this packet already left netback, and
1698 * do a skb_copy_ubufs while we are still in control of the
1699 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1701 if (skb_shinfo(skb
)->destructor_arg
) {
1702 xenvif_skb_zerocopy_prepare(queue
, skb
);
1703 queue
->stats
.tx_zerocopy_sent
++;
1706 netif_receive_skb(skb
);
1712 void xenvif_zerocopy_callback(struct ubuf_info
*ubuf
, bool zerocopy_success
)
1714 unsigned long flags
;
1715 pending_ring_idx_t index
;
1716 struct xenvif_queue
*queue
= ubuf_to_queue(ubuf
);
1718 /* This is the only place where we grab this lock, to protect callbacks
1721 spin_lock_irqsave(&queue
->callback_lock
, flags
);
1723 u16 pending_idx
= ubuf
->desc
;
1724 ubuf
= (struct ubuf_info
*) ubuf
->ctx
;
1725 BUG_ON(queue
->dealloc_prod
- queue
->dealloc_cons
>=
1727 index
= pending_index(queue
->dealloc_prod
);
1728 queue
->dealloc_ring
[index
] = pending_idx
;
1729 /* Sync with xenvif_tx_dealloc_action:
1730 * insert idx then incr producer.
1733 queue
->dealloc_prod
++;
1735 wake_up(&queue
->dealloc_wq
);
1736 spin_unlock_irqrestore(&queue
->callback_lock
, flags
);
1738 if (likely(zerocopy_success
))
1739 queue
->stats
.tx_zerocopy_success
++;
1741 queue
->stats
.tx_zerocopy_fail
++;
1742 xenvif_skb_zerocopy_complete(queue
);
1745 static inline void xenvif_tx_dealloc_action(struct xenvif_queue
*queue
)
1747 struct gnttab_unmap_grant_ref
*gop
;
1748 pending_ring_idx_t dc
, dp
;
1749 u16 pending_idx
, pending_idx_release
[MAX_PENDING_REQS
];
1752 dc
= queue
->dealloc_cons
;
1753 gop
= queue
->tx_unmap_ops
;
1755 /* Free up any grants we have finished using */
1757 dp
= queue
->dealloc_prod
;
1759 /* Ensure we see all indices enqueued by all
1760 * xenvif_zerocopy_callback().
1765 BUG_ON(gop
- queue
->tx_unmap_ops
> MAX_PENDING_REQS
);
1767 queue
->dealloc_ring
[pending_index(dc
++)];
1769 pending_idx_release
[gop
-queue
->tx_unmap_ops
] =
1771 queue
->pages_to_unmap
[gop
-queue
->tx_unmap_ops
] =
1772 queue
->mmap_pages
[pending_idx
];
1773 gnttab_set_unmap_op(gop
,
1774 idx_to_kaddr(queue
, pending_idx
),
1776 queue
->grant_tx_handle
[pending_idx
]);
1777 xenvif_grant_handle_reset(queue
, pending_idx
);
1781 } while (dp
!= queue
->dealloc_prod
);
1783 queue
->dealloc_cons
= dc
;
1785 if (gop
- queue
->tx_unmap_ops
> 0) {
1787 ret
= gnttab_unmap_refs(queue
->tx_unmap_ops
,
1789 queue
->pages_to_unmap
,
1790 gop
- queue
->tx_unmap_ops
);
1792 netdev_err(queue
->vif
->dev
, "Unmap fail: nr_ops %tx ret %d\n",
1793 gop
- queue
->tx_unmap_ops
, ret
);
1794 for (i
= 0; i
< gop
- queue
->tx_unmap_ops
; ++i
) {
1795 if (gop
[i
].status
!= GNTST_okay
)
1796 netdev_err(queue
->vif
->dev
,
1797 " host_addr: %llx handle: %x status: %d\n",
1806 for (i
= 0; i
< gop
- queue
->tx_unmap_ops
; ++i
)
1807 xenvif_idx_release(queue
, pending_idx_release
[i
],
1808 XEN_NETIF_RSP_OKAY
);
1812 /* Called after netfront has transmitted */
1813 int xenvif_tx_action(struct xenvif_queue
*queue
, int budget
)
1815 unsigned nr_mops
, nr_cops
= 0;
1818 if (unlikely(!tx_work_todo(queue
)))
1821 xenvif_tx_build_gops(queue
, budget
, &nr_cops
, &nr_mops
);
1826 gnttab_batch_copy(queue
->tx_copy_ops
, nr_cops
);
1828 ret
= gnttab_map_refs(queue
->tx_map_ops
,
1830 queue
->pages_to_map
,
1835 work_done
= xenvif_tx_submit(queue
);
1840 static void xenvif_idx_release(struct xenvif_queue
*queue
, u16 pending_idx
,
1843 struct pending_tx_info
*pending_tx_info
;
1844 pending_ring_idx_t index
;
1845 unsigned long flags
;
1847 pending_tx_info
= &queue
->pending_tx_info
[pending_idx
];
1848 spin_lock_irqsave(&queue
->response_lock
, flags
);
1849 make_tx_response(queue
, &pending_tx_info
->req
, status
);
1850 index
= pending_index(queue
->pending_prod
);
1851 queue
->pending_ring
[index
] = pending_idx
;
1852 /* TX shouldn't use the index before we give it back here */
1854 queue
->pending_prod
++;
1855 spin_unlock_irqrestore(&queue
->response_lock
, flags
);
1859 static void make_tx_response(struct xenvif_queue
*queue
,
1860 struct xen_netif_tx_request
*txp
,
1863 RING_IDX i
= queue
->tx
.rsp_prod_pvt
;
1864 struct xen_netif_tx_response
*resp
;
1867 resp
= RING_GET_RESPONSE(&queue
->tx
, i
);
1871 if (txp
->flags
& XEN_NETTXF_extra_info
)
1872 RING_GET_RESPONSE(&queue
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1874 queue
->tx
.rsp_prod_pvt
= ++i
;
1875 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue
->tx
, notify
);
1877 notify_remote_via_irq(queue
->tx_irq
);
1880 static struct xen_netif_rx_response
*make_rx_response(struct xenvif_queue
*queue
,
1887 RING_IDX i
= queue
->rx
.rsp_prod_pvt
;
1888 struct xen_netif_rx_response
*resp
;
1890 resp
= RING_GET_RESPONSE(&queue
->rx
, i
);
1891 resp
->offset
= offset
;
1892 resp
->flags
= flags
;
1894 resp
->status
= (s16
)size
;
1896 resp
->status
= (s16
)st
;
1898 queue
->rx
.rsp_prod_pvt
= ++i
;
1903 void xenvif_idx_unmap(struct xenvif_queue
*queue
, u16 pending_idx
)
1906 struct gnttab_unmap_grant_ref tx_unmap_op
;
1908 gnttab_set_unmap_op(&tx_unmap_op
,
1909 idx_to_kaddr(queue
, pending_idx
),
1911 queue
->grant_tx_handle
[pending_idx
]);
1912 xenvif_grant_handle_reset(queue
, pending_idx
);
1914 ret
= gnttab_unmap_refs(&tx_unmap_op
, NULL
,
1915 &queue
->mmap_pages
[pending_idx
], 1);
1917 netdev_err(queue
->vif
->dev
,
1918 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1921 tx_unmap_op
.host_addr
,
1923 tx_unmap_op
.status
);
1928 static inline int tx_work_todo(struct xenvif_queue
*queue
)
1930 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue
->tx
)))
1936 static inline bool tx_dealloc_work_todo(struct xenvif_queue
*queue
)
1938 return queue
->dealloc_cons
!= queue
->dealloc_prod
;
1941 void xenvif_unmap_frontend_rings(struct xenvif_queue
*queue
)
1943 if (queue
->tx
.sring
)
1944 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue
->vif
),
1946 if (queue
->rx
.sring
)
1947 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue
->vif
),
1951 int xenvif_map_frontend_rings(struct xenvif_queue
*queue
,
1952 grant_ref_t tx_ring_ref
,
1953 grant_ref_t rx_ring_ref
)
1956 struct xen_netif_tx_sring
*txs
;
1957 struct xen_netif_rx_sring
*rxs
;
1961 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue
->vif
),
1962 tx_ring_ref
, &addr
);
1966 txs
= (struct xen_netif_tx_sring
*)addr
;
1967 BACK_RING_INIT(&queue
->tx
, txs
, PAGE_SIZE
);
1969 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue
->vif
),
1970 rx_ring_ref
, &addr
);
1974 rxs
= (struct xen_netif_rx_sring
*)addr
;
1975 BACK_RING_INIT(&queue
->rx
, rxs
, PAGE_SIZE
);
1980 xenvif_unmap_frontend_rings(queue
);
1984 static void xenvif_queue_carrier_off(struct xenvif_queue
*queue
)
1986 struct xenvif
*vif
= queue
->vif
;
1988 queue
->stalled
= true;
1990 /* At least one queue has stalled? Disable the carrier. */
1991 spin_lock(&vif
->lock
);
1992 if (vif
->stalled_queues
++ == 0) {
1993 netdev_info(vif
->dev
, "Guest Rx stalled");
1994 netif_carrier_off(vif
->dev
);
1996 spin_unlock(&vif
->lock
);
1999 static void xenvif_queue_carrier_on(struct xenvif_queue
*queue
)
2001 struct xenvif
*vif
= queue
->vif
;
2003 queue
->last_rx_time
= jiffies
; /* Reset Rx stall detection. */
2004 queue
->stalled
= false;
2006 /* All queues are ready? Enable the carrier. */
2007 spin_lock(&vif
->lock
);
2008 if (--vif
->stalled_queues
== 0) {
2009 netdev_info(vif
->dev
, "Guest Rx ready");
2010 netif_carrier_on(vif
->dev
);
2012 spin_unlock(&vif
->lock
);
2015 static bool xenvif_rx_queue_stalled(struct xenvif_queue
*queue
)
2017 RING_IDX prod
, cons
;
2019 prod
= queue
->rx
.sring
->req_prod
;
2020 cons
= queue
->rx
.req_cons
;
2022 return !queue
->stalled
2023 && prod
- cons
< XEN_NETBK_RX_SLOTS_MAX
2024 && time_after(jiffies
,
2025 queue
->last_rx_time
+ rx_stall_timeout_jiffies
);
2028 static bool xenvif_rx_queue_ready(struct xenvif_queue
*queue
)
2030 RING_IDX prod
, cons
;
2032 prod
= queue
->rx
.sring
->req_prod
;
2033 cons
= queue
->rx
.req_cons
;
2035 return queue
->stalled
2036 && prod
- cons
>= XEN_NETBK_RX_SLOTS_MAX
;
2039 static bool xenvif_have_rx_work(struct xenvif_queue
*queue
)
2041 return (!skb_queue_empty(&queue
->rx_queue
)
2042 && xenvif_rx_ring_slots_available(queue
, XEN_NETBK_RX_SLOTS_MAX
))
2043 || xenvif_rx_queue_stalled(queue
)
2044 || xenvif_rx_queue_ready(queue
)
2045 || kthread_should_stop()
2046 || queue
->vif
->disabled
;
2049 static long xenvif_rx_queue_timeout(struct xenvif_queue
*queue
)
2051 struct sk_buff
*skb
;
2054 skb
= skb_peek(&queue
->rx_queue
);
2056 return MAX_SCHEDULE_TIMEOUT
;
2058 timeout
= XENVIF_RX_CB(skb
)->expires
- jiffies
;
2059 return timeout
< 0 ? 0 : timeout
;
2062 /* Wait until the guest Rx thread has work.
2064 * The timeout needs to be adjusted based on the current head of the
2065 * queue (and not just the head at the beginning). In particular, if
2066 * the queue is initially empty an infinite timeout is used and this
2067 * needs to be reduced when a skb is queued.
2069 * This cannot be done with wait_event_timeout() because it only
2070 * calculates the timeout once.
2072 static void xenvif_wait_for_rx_work(struct xenvif_queue
*queue
)
2076 if (xenvif_have_rx_work(queue
))
2082 prepare_to_wait(&queue
->wq
, &wait
, TASK_INTERRUPTIBLE
);
2083 if (xenvif_have_rx_work(queue
))
2085 ret
= schedule_timeout(xenvif_rx_queue_timeout(queue
));
2089 finish_wait(&queue
->wq
, &wait
);
2092 int xenvif_kthread_guest_rx(void *data
)
2094 struct xenvif_queue
*queue
= data
;
2095 struct xenvif
*vif
= queue
->vif
;
2098 xenvif_wait_for_rx_work(queue
);
2100 if (kthread_should_stop())
2103 /* This frontend is found to be rogue, disable it in
2104 * kthread context. Currently this is only set when
2105 * netback finds out frontend sends malformed packet,
2106 * but we cannot disable the interface in softirq
2107 * context so we defer it here, if this thread is
2108 * associated with queue 0.
2110 if (unlikely(vif
->disabled
&& queue
->id
== 0)) {
2111 xenvif_carrier_off(vif
);
2112 xenvif_rx_queue_purge(queue
);
2116 if (!skb_queue_empty(&queue
->rx_queue
))
2117 xenvif_rx_action(queue
);
2119 /* If the guest hasn't provided any Rx slots for a
2120 * while it's probably not responsive, drop the
2121 * carrier so packets are dropped earlier.
2123 if (xenvif_rx_queue_stalled(queue
))
2124 xenvif_queue_carrier_off(queue
);
2125 else if (xenvif_rx_queue_ready(queue
))
2126 xenvif_queue_carrier_on(queue
);
2128 /* Queued packets may have foreign pages from other
2129 * domains. These cannot be queued indefinitely as
2130 * this would starve guests of grant refs and transmit
2133 xenvif_rx_queue_drop_expired(queue
);
2135 xenvif_rx_queue_maybe_wake(queue
);
2140 /* Bin any remaining skbs */
2141 xenvif_rx_queue_purge(queue
);
2146 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue
*queue
)
2148 /* Dealloc thread must remain running until all inflight
2151 return kthread_should_stop() &&
2152 !atomic_read(&queue
->inflight_packets
);
2155 int xenvif_dealloc_kthread(void *data
)
2157 struct xenvif_queue
*queue
= data
;
2160 wait_event_interruptible(queue
->dealloc_wq
,
2161 tx_dealloc_work_todo(queue
) ||
2162 xenvif_dealloc_kthread_should_stop(queue
));
2163 if (xenvif_dealloc_kthread_should_stop(queue
))
2166 xenvif_tx_dealloc_action(queue
);
2170 /* Unmap anything remaining*/
2171 if (tx_dealloc_work_todo(queue
))
2172 xenvif_tx_dealloc_action(queue
);
2177 static int __init
netback_init(void)
2184 /* Allow as many queues as there are CPUs, by default */
2185 xenvif_max_queues
= num_online_cpus();
2187 if (fatal_skb_slots
< XEN_NETBK_LEGACY_SLOTS_MAX
) {
2188 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2189 fatal_skb_slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
2190 fatal_skb_slots
= XEN_NETBK_LEGACY_SLOTS_MAX
;
2193 rc
= xenvif_xenbus_init();
2197 rx_drain_timeout_jiffies
= msecs_to_jiffies(rx_drain_timeout_msecs
);
2198 rx_stall_timeout_jiffies
= msecs_to_jiffies(rx_stall_timeout_msecs
);
2200 #ifdef CONFIG_DEBUG_FS
2201 xen_netback_dbg_root
= debugfs_create_dir("xen-netback", NULL
);
2202 if (IS_ERR_OR_NULL(xen_netback_dbg_root
))
2203 pr_warn("Init of debugfs returned %ld!\n",
2204 PTR_ERR(xen_netback_dbg_root
));
2205 #endif /* CONFIG_DEBUG_FS */
2213 module_init(netback_init
);
2215 static void __exit
netback_fini(void)
2217 #ifdef CONFIG_DEBUG_FS
2218 if (!IS_ERR_OR_NULL(xen_netback_dbg_root
))
2219 debugfs_remove_recursive(xen_netback_dbg_root
);
2220 #endif /* CONFIG_DEBUG_FS */
2221 xenvif_xenbus_fini();
2223 module_exit(netback_fini
);
2225 MODULE_LICENSE("Dual BSD/GPL");
2226 MODULE_ALIAS("xen-backend:vif");