]>
Commit | Line | Data |
---|---|---|
3254f836 PD |
1 | /* |
2 | * Copyright (c) 2016 Citrix Systems Inc. | |
3 | * Copyright (c) 2002-2005, K A Fraser | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation; or, when distributed | |
8 | * separately from the Linux kernel or incorporated into other | |
9 | * software packages, subject to the following license: | |
10 | * | |
11 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
12 | * of this source file (the "Software"), to deal in the Software without | |
13 | * restriction, including without limitation the rights to use, copy, modify, | |
14 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
15 | * and to permit persons to whom the Software is furnished to do so, subject to | |
16 | * the following conditions: | |
17 | * | |
18 | * The above copyright notice and this permission notice shall be included in | |
19 | * all copies or substantial portions of the Software. | |
20 | * | |
21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
23 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
24 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
26 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
27 | * IN THE SOFTWARE. | |
28 | */ | |
3254f836 PD |
29 | #include "common.h" |
30 | ||
31 | #include <linux/kthread.h> | |
32 | ||
33 | #include <xen/xen.h> | |
34 | #include <xen/events.h> | |
35 | ||
36 | static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) | |
37 | { | |
38 | RING_IDX prod, cons; | |
39 | struct sk_buff *skb; | |
40 | int needed; | |
41 | ||
42 | skb = skb_peek(&queue->rx_queue); | |
43 | if (!skb) | |
44 | return false; | |
45 | ||
46 | needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); | |
47 | if (skb_is_gso(skb)) | |
48 | needed++; | |
49 | if (skb->sw_hash) | |
50 | needed++; | |
51 | ||
52 | do { | |
53 | prod = queue->rx.sring->req_prod; | |
54 | cons = queue->rx.req_cons; | |
55 | ||
56 | if (prod - cons >= needed) | |
57 | return true; | |
58 | ||
59 | queue->rx.sring->req_event = prod + 1; | |
60 | ||
61 | /* Make sure event is visible before we check prod | |
62 | * again. | |
63 | */ | |
64 | mb(); | |
65 | } while (queue->rx.sring->req_prod != prod); | |
66 | ||
67 | return false; | |
68 | } | |
69 | ||
70 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) | |
71 | { | |
72 | unsigned long flags; | |
73 | ||
74 | spin_lock_irqsave(&queue->rx_queue.lock, flags); | |
75 | ||
76 | __skb_queue_tail(&queue->rx_queue, skb); | |
77 | ||
78 | queue->rx_queue_len += skb->len; | |
79 | if (queue->rx_queue_len > queue->rx_queue_max) { | |
80 | struct net_device *dev = queue->vif->dev; | |
81 | ||
82 | netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); | |
83 | } | |
84 | ||
85 | spin_unlock_irqrestore(&queue->rx_queue.lock, flags); | |
86 | } | |
87 | ||
88 | static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) | |
89 | { | |
90 | struct sk_buff *skb; | |
91 | ||
92 | spin_lock_irq(&queue->rx_queue.lock); | |
93 | ||
94 | skb = __skb_dequeue(&queue->rx_queue); | |
7c0b1a23 | 95 | if (skb) { |
3254f836 | 96 | queue->rx_queue_len -= skb->len; |
7c0b1a23 DV |
97 | if (queue->rx_queue_len < queue->rx_queue_max) { |
98 | struct netdev_queue *txq; | |
3254f836 | 99 | |
7c0b1a23 DV |
100 | txq = netdev_get_tx_queue(queue->vif->dev, queue->id); |
101 | netif_tx_wake_queue(txq); | |
102 | } | |
3254f836 PD |
103 | } |
104 | ||
105 | spin_unlock_irq(&queue->rx_queue.lock); | |
7c0b1a23 DV |
106 | |
107 | return skb; | |
3254f836 PD |
108 | } |
109 | ||
110 | static void xenvif_rx_queue_purge(struct xenvif_queue *queue) | |
111 | { | |
112 | struct sk_buff *skb; | |
113 | ||
114 | while ((skb = xenvif_rx_dequeue(queue)) != NULL) | |
115 | kfree_skb(skb); | |
116 | } | |
117 | ||
118 | static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) | |
119 | { | |
120 | struct sk_buff *skb; | |
121 | ||
122 | for (;;) { | |
123 | skb = skb_peek(&queue->rx_queue); | |
124 | if (!skb) | |
125 | break; | |
126 | if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) | |
127 | break; | |
128 | xenvif_rx_dequeue(queue); | |
129 | kfree_skb(skb); | |
130 | } | |
131 | } | |
132 | ||
eb1723a2 | 133 | static void xenvif_rx_copy_flush(struct xenvif_queue *queue) |
3254f836 | 134 | { |
eb1723a2 | 135 | unsigned int i; |
a37f1229 | 136 | int notify; |
3254f836 | 137 | |
eb1723a2 | 138 | gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); |
3254f836 | 139 | |
eb1723a2 DV |
140 | for (i = 0; i < queue->rx_copy.num; i++) { |
141 | struct gnttab_copy *op; | |
3254f836 | 142 | |
eb1723a2 | 143 | op = &queue->rx_copy.op[i]; |
3254f836 | 144 | |
eb1723a2 DV |
145 | /* If the copy failed, overwrite the status field in |
146 | * the corresponding response. | |
147 | */ | |
148 | if (unlikely(op->status != GNTST_okay)) { | |
149 | struct xen_netif_rx_response *rsp; | |
3254f836 | 150 | |
eb1723a2 DV |
151 | rsp = RING_GET_RESPONSE(&queue->rx, |
152 | queue->rx_copy.idx[i]); | |
153 | rsp->status = op->status; | |
154 | } | |
155 | } | |
3254f836 | 156 | |
eb1723a2 | 157 | queue->rx_copy.num = 0; |
a37f1229 DV |
158 | |
159 | /* Push responses for all completed packets. */ | |
160 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); | |
161 | if (notify) | |
162 | notify_remote_via_irq(queue->rx_irq); | |
163 | ||
164 | __skb_queue_purge(queue->rx_copy.completed); | |
eb1723a2 | 165 | } |
3254f836 | 166 | |
eb1723a2 DV |
167 | static void xenvif_rx_copy_add(struct xenvif_queue *queue, |
168 | struct xen_netif_rx_request *req, | |
169 | unsigned int offset, void *data, size_t len) | |
3254f836 | 170 | { |
eb1723a2 DV |
171 | struct gnttab_copy *op; |
172 | struct page *page; | |
3254f836 | 173 | struct xen_page_foreign *foreign; |
3254f836 | 174 | |
eb1723a2 DV |
175 | if (queue->rx_copy.num == COPY_BATCH_SIZE) |
176 | xenvif_rx_copy_flush(queue); | |
3254f836 | 177 | |
eb1723a2 | 178 | op = &queue->rx_copy.op[queue->rx_copy.num]; |
3254f836 | 179 | |
eb1723a2 | 180 | page = virt_to_page(data); |
3254f836 | 181 | |
eb1723a2 | 182 | op->flags = GNTCOPY_dest_gref; |
3254f836 PD |
183 | |
184 | foreign = xen_page_foreign(page); | |
185 | if (foreign) { | |
eb1723a2 DV |
186 | op->source.domid = foreign->domid; |
187 | op->source.u.ref = foreign->gref; | |
188 | op->flags |= GNTCOPY_source_gref; | |
3254f836 | 189 | } else { |
eb1723a2 DV |
190 | op->source.u.gmfn = virt_to_gfn(data); |
191 | op->source.domid = DOMID_SELF; | |
3254f836 | 192 | } |
3254f836 | 193 | |
eb1723a2 DV |
194 | op->source.offset = xen_offset_in_page(data); |
195 | op->dest.u.ref = req->gref; | |
196 | op->dest.domid = queue->vif->domid; | |
197 | op->dest.offset = offset; | |
198 | op->len = len; | |
3254f836 | 199 | |
eb1723a2 DV |
200 | queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; |
201 | queue->rx_copy.num++; | |
3254f836 PD |
202 | } |
203 | ||
eb1723a2 | 204 | static unsigned int xenvif_gso_type(struct sk_buff *skb) |
3254f836 | 205 | { |
3254f836 PD |
206 | if (skb_is_gso(skb)) { |
207 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | |
eb1723a2 DV |
208 | return XEN_NETIF_GSO_TYPE_TCPV4; |
209 | else | |
210 | return XEN_NETIF_GSO_TYPE_TCPV6; | |
3254f836 | 211 | } |
eb1723a2 DV |
212 | return XEN_NETIF_GSO_TYPE_NONE; |
213 | } | |
3254f836 | 214 | |
eb1723a2 DV |
215 | struct xenvif_pkt_state { |
216 | struct sk_buff *skb; | |
217 | size_t remaining_len; | |
218 | int frag; /* frag == -1 => skb->head */ | |
219 | unsigned int frag_offset; | |
220 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; | |
221 | unsigned int extra_count; | |
222 | unsigned int slot; | |
223 | }; | |
3254f836 | 224 | |
eb1723a2 DV |
225 | static void xenvif_rx_next_skb(struct xenvif_queue *queue, |
226 | struct xenvif_pkt_state *pkt) | |
227 | { | |
228 | struct sk_buff *skb; | |
229 | unsigned int gso_type; | |
3254f836 | 230 | |
eb1723a2 | 231 | skb = xenvif_rx_dequeue(queue); |
3254f836 | 232 | |
eb1723a2 DV |
233 | queue->stats.tx_bytes += skb->len; |
234 | queue->stats.tx_packets++; | |
3254f836 | 235 | |
eb1723a2 DV |
236 | /* Reset packet state. */ |
237 | memset(pkt, 0, sizeof(struct xenvif_pkt_state)); | |
3254f836 | 238 | |
eb1723a2 DV |
239 | pkt->skb = skb; |
240 | pkt->remaining_len = skb->len; | |
241 | pkt->frag = -1; | |
3254f836 | 242 | |
eb1723a2 DV |
243 | gso_type = xenvif_gso_type(skb); |
244 | if ((1 << gso_type) & queue->vif->gso_mask) { | |
245 | struct xen_netif_extra_info *extra; | |
3254f836 | 246 | |
eb1723a2 | 247 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; |
3254f836 | 248 | |
eb1723a2 DV |
249 | extra->u.gso.type = gso_type; |
250 | extra->u.gso.size = skb_shinfo(skb)->gso_size; | |
251 | extra->u.gso.pad = 0; | |
252 | extra->u.gso.features = 0; | |
253 | extra->type = XEN_NETIF_EXTRA_TYPE_GSO; | |
254 | extra->flags = 0; | |
3254f836 | 255 | |
eb1723a2 | 256 | pkt->extra_count++; |
3254f836 PD |
257 | } |
258 | ||
eb1723a2 DV |
259 | if (skb->sw_hash) { |
260 | struct xen_netif_extra_info *extra; | |
3254f836 | 261 | |
eb1723a2 | 262 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; |
3254f836 | 263 | |
eb1723a2 DV |
264 | extra->u.hash.algorithm = |
265 | XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ; | |
3254f836 | 266 | |
eb1723a2 DV |
267 | if (skb->l4_hash) |
268 | extra->u.hash.type = | |
269 | skb->protocol == htons(ETH_P_IP) ? | |
270 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP : | |
271 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP; | |
272 | else | |
273 | extra->u.hash.type = | |
274 | skb->protocol == htons(ETH_P_IP) ? | |
275 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4 : | |
276 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6; | |
3254f836 | 277 | |
eb1723a2 | 278 | *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb); |
3254f836 | 279 | |
eb1723a2 DV |
280 | extra->type = XEN_NETIF_EXTRA_TYPE_HASH; |
281 | extra->flags = 0; | |
3254f836 | 282 | |
eb1723a2 | 283 | pkt->extra_count++; |
3254f836 | 284 | } |
3254f836 PD |
285 | } |
286 | ||
eb1723a2 DV |
287 | static void xenvif_rx_complete(struct xenvif_queue *queue, |
288 | struct xenvif_pkt_state *pkt) | |
3254f836 | 289 | { |
a37f1229 | 290 | /* All responses are ready to be pushed. */ |
eb1723a2 | 291 | queue->rx.rsp_prod_pvt = queue->rx.req_cons; |
3254f836 | 292 | |
a37f1229 | 293 | __skb_queue_tail(queue->rx_copy.completed, pkt->skb); |
3254f836 PD |
294 | } |
295 | ||
eb1723a2 DV |
296 | static void xenvif_rx_next_chunk(struct xenvif_queue *queue, |
297 | struct xenvif_pkt_state *pkt, | |
298 | unsigned int offset, void **data, | |
299 | size_t *len) | |
3254f836 | 300 | { |
eb1723a2 DV |
301 | struct sk_buff *skb = pkt->skb; |
302 | void *frag_data; | |
303 | size_t frag_len, chunk_len; | |
3254f836 | 304 | |
eb1723a2 DV |
305 | if (pkt->frag == -1) { |
306 | frag_data = skb->data; | |
307 | frag_len = skb_headlen(skb); | |
308 | } else { | |
309 | skb_frag_t *frag = &skb_shinfo(skb)->frags[pkt->frag]; | |
3254f836 | 310 | |
eb1723a2 DV |
311 | frag_data = skb_frag_address(frag); |
312 | frag_len = skb_frag_size(frag); | |
3254f836 | 313 | } |
3254f836 | 314 | |
eb1723a2 DV |
315 | frag_data += pkt->frag_offset; |
316 | frag_len -= pkt->frag_offset; | |
3254f836 | 317 | |
eb1723a2 DV |
318 | chunk_len = min(frag_len, XEN_PAGE_SIZE - offset); |
319 | chunk_len = min(chunk_len, | |
320 | XEN_PAGE_SIZE - xen_offset_in_page(frag_data)); | |
3254f836 | 321 | |
eb1723a2 | 322 | pkt->frag_offset += chunk_len; |
3254f836 | 323 | |
eb1723a2 DV |
324 | /* Advance to next frag? */ |
325 | if (frag_len == chunk_len) { | |
326 | pkt->frag++; | |
327 | pkt->frag_offset = 0; | |
3254f836 PD |
328 | } |
329 | ||
eb1723a2 DV |
330 | *data = frag_data; |
331 | *len = chunk_len; | |
332 | } | |
3254f836 | 333 | |
eb1723a2 DV |
334 | static void xenvif_rx_data_slot(struct xenvif_queue *queue, |
335 | struct xenvif_pkt_state *pkt, | |
336 | struct xen_netif_rx_request *req, | |
337 | struct xen_netif_rx_response *rsp) | |
338 | { | |
339 | unsigned int offset = 0; | |
340 | unsigned int flags; | |
3254f836 | 341 | |
eb1723a2 DV |
342 | do { |
343 | size_t len; | |
344 | void *data; | |
3254f836 | 345 | |
eb1723a2 DV |
346 | xenvif_rx_next_chunk(queue, pkt, offset, &data, &len); |
347 | xenvif_rx_copy_add(queue, req, offset, data, len); | |
3254f836 | 348 | |
eb1723a2 DV |
349 | offset += len; |
350 | pkt->remaining_len -= len; | |
3254f836 | 351 | |
eb1723a2 | 352 | } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0); |
3254f836 | 353 | |
eb1723a2 DV |
354 | if (pkt->remaining_len > 0) |
355 | flags = XEN_NETRXF_more_data; | |
356 | else | |
357 | flags = 0; | |
358 | ||
359 | if (pkt->slot == 0) { | |
360 | struct sk_buff *skb = pkt->skb; | |
3254f836 | 361 | |
eb1723a2 | 362 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
3254f836 PD |
363 | flags |= XEN_NETRXF_csum_blank | |
364 | XEN_NETRXF_data_validated; | |
365 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
3254f836 PD |
366 | flags |= XEN_NETRXF_data_validated; |
367 | ||
eb1723a2 DV |
368 | if (pkt->extra_count != 0) |
369 | flags |= XEN_NETRXF_extra_info; | |
370 | } | |
3254f836 | 371 | |
eb1723a2 DV |
372 | rsp->offset = 0; |
373 | rsp->flags = flags; | |
374 | rsp->id = req->id; | |
375 | rsp->status = (s16)offset; | |
376 | } | |
3254f836 | 377 | |
eb1723a2 DV |
378 | static void xenvif_rx_extra_slot(struct xenvif_queue *queue, |
379 | struct xenvif_pkt_state *pkt, | |
380 | struct xen_netif_rx_request *req, | |
381 | struct xen_netif_rx_response *rsp) | |
382 | { | |
383 | struct xen_netif_extra_info *extra = (void *)rsp; | |
384 | unsigned int i; | |
3254f836 | 385 | |
eb1723a2 | 386 | pkt->extra_count--; |
3254f836 | 387 | |
eb1723a2 DV |
388 | for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) { |
389 | if (pkt->extras[i].type) { | |
390 | *extra = pkt->extras[i]; | |
3254f836 | 391 | |
eb1723a2 | 392 | if (pkt->extra_count != 0) |
3254f836 | 393 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; |
eb1723a2 DV |
394 | |
395 | pkt->extras[i].type = 0; | |
396 | return; | |
3254f836 | 397 | } |
eb1723a2 DV |
398 | } |
399 | BUG(); | |
400 | } | |
3254f836 | 401 | |
98f6d57c | 402 | void xenvif_rx_skb(struct xenvif_queue *queue) |
eb1723a2 DV |
403 | { |
404 | struct xenvif_pkt_state pkt; | |
3254f836 | 405 | |
eb1723a2 | 406 | xenvif_rx_next_skb(queue, &pkt); |
3254f836 | 407 | |
eb1723a2 DV |
408 | do { |
409 | struct xen_netif_rx_request *req; | |
410 | struct xen_netif_rx_response *rsp; | |
3254f836 | 411 | |
eb1723a2 DV |
412 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); |
413 | rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); | |
3254f836 | 414 | |
eb1723a2 DV |
415 | /* Extras must go after the first data slot */ |
416 | if (pkt.slot != 0 && pkt.extra_count != 0) | |
417 | xenvif_rx_extra_slot(queue, &pkt, req, rsp); | |
418 | else | |
419 | xenvif_rx_data_slot(queue, &pkt, req, rsp); | |
420 | ||
421 | queue->rx.req_cons++; | |
422 | pkt.slot++; | |
423 | } while (pkt.remaining_len > 0 || pkt.extra_count != 0); | |
424 | ||
425 | xenvif_rx_complete(queue, &pkt); | |
3254f836 PD |
426 | } |
427 | ||
98f6d57c DV |
428 | #define RX_BATCH_SIZE 64 |
429 | ||
430 | void xenvif_rx_action(struct xenvif_queue *queue) | |
431 | { | |
a37f1229 | 432 | struct sk_buff_head completed_skbs; |
98f6d57c DV |
433 | unsigned int work_done = 0; |
434 | ||
a37f1229 DV |
435 | __skb_queue_head_init(&completed_skbs); |
436 | queue->rx_copy.completed = &completed_skbs; | |
437 | ||
98f6d57c DV |
438 | while (xenvif_rx_ring_slots_available(queue) && |
439 | work_done < RX_BATCH_SIZE) { | |
440 | xenvif_rx_skb(queue); | |
441 | work_done++; | |
442 | } | |
a37f1229 DV |
443 | |
444 | /* Flush any pending copies and complete all skbs. */ | |
445 | xenvif_rx_copy_flush(queue); | |
98f6d57c DV |
446 | } |
447 | ||
3254f836 PD |
448 | static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) |
449 | { | |
450 | RING_IDX prod, cons; | |
451 | ||
452 | prod = queue->rx.sring->req_prod; | |
453 | cons = queue->rx.req_cons; | |
454 | ||
455 | return !queue->stalled && | |
456 | prod - cons < 1 && | |
457 | time_after(jiffies, | |
458 | queue->last_rx_time + queue->vif->stall_timeout); | |
459 | } | |
460 | ||
461 | static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) | |
462 | { | |
463 | RING_IDX prod, cons; | |
464 | ||
465 | prod = queue->rx.sring->req_prod; | |
466 | cons = queue->rx.req_cons; | |
467 | ||
468 | return queue->stalled && prod - cons >= 1; | |
469 | } | |
470 | ||
471 | static bool xenvif_have_rx_work(struct xenvif_queue *queue) | |
472 | { | |
473 | return xenvif_rx_ring_slots_available(queue) || | |
474 | (queue->vif->stall_timeout && | |
475 | (xenvif_rx_queue_stalled(queue) || | |
476 | xenvif_rx_queue_ready(queue))) || | |
477 | kthread_should_stop() || | |
478 | queue->vif->disabled; | |
479 | } | |
480 | ||
481 | static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) | |
482 | { | |
483 | struct sk_buff *skb; | |
484 | long timeout; | |
485 | ||
486 | skb = skb_peek(&queue->rx_queue); | |
487 | if (!skb) | |
488 | return MAX_SCHEDULE_TIMEOUT; | |
489 | ||
490 | timeout = XENVIF_RX_CB(skb)->expires - jiffies; | |
491 | return timeout < 0 ? 0 : timeout; | |
492 | } | |
493 | ||
494 | /* Wait until the guest Rx thread has work. | |
495 | * | |
496 | * The timeout needs to be adjusted based on the current head of the | |
497 | * queue (and not just the head at the beginning). In particular, if | |
498 | * the queue is initially empty an infinite timeout is used and this | |
499 | * needs to be reduced when a skb is queued. | |
500 | * | |
501 | * This cannot be done with wait_event_timeout() because it only | |
502 | * calculates the timeout once. | |
503 | */ | |
504 | static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) | |
505 | { | |
506 | DEFINE_WAIT(wait); | |
507 | ||
508 | if (xenvif_have_rx_work(queue)) | |
509 | return; | |
510 | ||
511 | for (;;) { | |
512 | long ret; | |
513 | ||
514 | prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); | |
515 | if (xenvif_have_rx_work(queue)) | |
516 | break; | |
517 | ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); | |
518 | if (!ret) | |
519 | break; | |
520 | } | |
521 | finish_wait(&queue->wq, &wait); | |
522 | } | |
523 | ||
524 | static void xenvif_queue_carrier_off(struct xenvif_queue *queue) | |
525 | { | |
526 | struct xenvif *vif = queue->vif; | |
527 | ||
528 | queue->stalled = true; | |
529 | ||
530 | /* At least one queue has stalled? Disable the carrier. */ | |
531 | spin_lock(&vif->lock); | |
532 | if (vif->stalled_queues++ == 0) { | |
533 | netdev_info(vif->dev, "Guest Rx stalled"); | |
534 | netif_carrier_off(vif->dev); | |
535 | } | |
536 | spin_unlock(&vif->lock); | |
537 | } | |
538 | ||
539 | static void xenvif_queue_carrier_on(struct xenvif_queue *queue) | |
540 | { | |
541 | struct xenvif *vif = queue->vif; | |
542 | ||
543 | queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ | |
544 | queue->stalled = false; | |
545 | ||
546 | /* All queues are ready? Enable the carrier. */ | |
547 | spin_lock(&vif->lock); | |
548 | if (--vif->stalled_queues == 0) { | |
549 | netdev_info(vif->dev, "Guest Rx ready"); | |
550 | netif_carrier_on(vif->dev); | |
551 | } | |
552 | spin_unlock(&vif->lock); | |
553 | } | |
554 | ||
555 | int xenvif_kthread_guest_rx(void *data) | |
556 | { | |
557 | struct xenvif_queue *queue = data; | |
558 | struct xenvif *vif = queue->vif; | |
559 | ||
560 | if (!vif->stall_timeout) | |
561 | xenvif_queue_carrier_on(queue); | |
562 | ||
563 | for (;;) { | |
564 | xenvif_wait_for_rx_work(queue); | |
565 | ||
566 | if (kthread_should_stop()) | |
567 | break; | |
568 | ||
569 | /* This frontend is found to be rogue, disable it in | |
570 | * kthread context. Currently this is only set when | |
571 | * netback finds out frontend sends malformed packet, | |
572 | * but we cannot disable the interface in softirq | |
573 | * context so we defer it here, if this thread is | |
574 | * associated with queue 0. | |
575 | */ | |
576 | if (unlikely(vif->disabled && queue->id == 0)) { | |
577 | xenvif_carrier_off(vif); | |
578 | break; | |
579 | } | |
580 | ||
581 | if (!skb_queue_empty(&queue->rx_queue)) | |
582 | xenvif_rx_action(queue); | |
583 | ||
584 | /* If the guest hasn't provided any Rx slots for a | |
585 | * while it's probably not responsive, drop the | |
586 | * carrier so packets are dropped earlier. | |
587 | */ | |
588 | if (vif->stall_timeout) { | |
589 | if (xenvif_rx_queue_stalled(queue)) | |
590 | xenvif_queue_carrier_off(queue); | |
591 | else if (xenvif_rx_queue_ready(queue)) | |
592 | xenvif_queue_carrier_on(queue); | |
593 | } | |
594 | ||
595 | /* Queued packets may have foreign pages from other | |
596 | * domains. These cannot be queued indefinitely as | |
597 | * this would starve guests of grant refs and transmit | |
598 | * slots. | |
599 | */ | |
600 | xenvif_rx_queue_drop_expired(queue); | |
601 | ||
3254f836 PD |
602 | cond_resched(); |
603 | } | |
604 | ||
605 | /* Bin any remaining skbs */ | |
606 | xenvif_rx_queue_purge(queue); | |
607 | ||
608 | return 0; | |
609 | } |