]>
Commit | Line | Data |
---|---|---|
3254f836 PD |
1 | /* |
2 | * Copyright (c) 2016 Citrix Systems Inc. | |
3 | * Copyright (c) 2002-2005, K A Fraser | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation; or, when distributed | |
8 | * separately from the Linux kernel or incorporated into other | |
9 | * software packages, subject to the following license: | |
10 | * | |
11 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
12 | * of this source file (the "Software"), to deal in the Software without | |
13 | * restriction, including without limitation the rights to use, copy, modify, | |
14 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
15 | * and to permit persons to whom the Software is furnished to do so, subject to | |
16 | * the following conditions: | |
17 | * | |
18 | * The above copyright notice and this permission notice shall be included in | |
19 | * all copies or substantial portions of the Software. | |
20 | * | |
21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
23 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
24 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
26 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
27 | * IN THE SOFTWARE. | |
28 | */ | |
3254f836 PD |
29 | #include "common.h" |
30 | ||
31 | #include <linux/kthread.h> | |
32 | ||
33 | #include <xen/xen.h> | |
34 | #include <xen/events.h> | |
35 | ||
36 | static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) | |
37 | { | |
38 | RING_IDX prod, cons; | |
39 | struct sk_buff *skb; | |
40 | int needed; | |
41 | ||
42 | skb = skb_peek(&queue->rx_queue); | |
43 | if (!skb) | |
44 | return false; | |
45 | ||
46 | needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); | |
47 | if (skb_is_gso(skb)) | |
48 | needed++; | |
49 | if (skb->sw_hash) | |
50 | needed++; | |
51 | ||
52 | do { | |
53 | prod = queue->rx.sring->req_prod; | |
54 | cons = queue->rx.req_cons; | |
55 | ||
56 | if (prod - cons >= needed) | |
57 | return true; | |
58 | ||
59 | queue->rx.sring->req_event = prod + 1; | |
60 | ||
61 | /* Make sure event is visible before we check prod | |
62 | * again. | |
63 | */ | |
64 | mb(); | |
65 | } while (queue->rx.sring->req_prod != prod); | |
66 | ||
67 | return false; | |
68 | } | |
69 | ||
70 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) | |
71 | { | |
72 | unsigned long flags; | |
73 | ||
74 | spin_lock_irqsave(&queue->rx_queue.lock, flags); | |
75 | ||
76 | __skb_queue_tail(&queue->rx_queue, skb); | |
77 | ||
78 | queue->rx_queue_len += skb->len; | |
79 | if (queue->rx_queue_len > queue->rx_queue_max) { | |
80 | struct net_device *dev = queue->vif->dev; | |
81 | ||
82 | netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); | |
83 | } | |
84 | ||
85 | spin_unlock_irqrestore(&queue->rx_queue.lock, flags); | |
86 | } | |
87 | ||
88 | static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) | |
89 | { | |
90 | struct sk_buff *skb; | |
91 | ||
92 | spin_lock_irq(&queue->rx_queue.lock); | |
93 | ||
94 | skb = __skb_dequeue(&queue->rx_queue); | |
7c0b1a23 | 95 | if (skb) { |
3254f836 | 96 | queue->rx_queue_len -= skb->len; |
7c0b1a23 DV |
97 | if (queue->rx_queue_len < queue->rx_queue_max) { |
98 | struct netdev_queue *txq; | |
3254f836 | 99 | |
7c0b1a23 DV |
100 | txq = netdev_get_tx_queue(queue->vif->dev, queue->id); |
101 | netif_tx_wake_queue(txq); | |
102 | } | |
3254f836 PD |
103 | } |
104 | ||
105 | spin_unlock_irq(&queue->rx_queue.lock); | |
7c0b1a23 DV |
106 | |
107 | return skb; | |
3254f836 PD |
108 | } |
109 | ||
110 | static void xenvif_rx_queue_purge(struct xenvif_queue *queue) | |
111 | { | |
112 | struct sk_buff *skb; | |
113 | ||
114 | while ((skb = xenvif_rx_dequeue(queue)) != NULL) | |
115 | kfree_skb(skb); | |
116 | } | |
117 | ||
118 | static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) | |
119 | { | |
120 | struct sk_buff *skb; | |
121 | ||
122 | for (;;) { | |
123 | skb = skb_peek(&queue->rx_queue); | |
124 | if (!skb) | |
125 | break; | |
126 | if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) | |
127 | break; | |
128 | xenvif_rx_dequeue(queue); | |
129 | kfree_skb(skb); | |
130 | } | |
131 | } | |
132 | ||
eb1723a2 | 133 | static void xenvif_rx_copy_flush(struct xenvif_queue *queue) |
3254f836 | 134 | { |
eb1723a2 | 135 | unsigned int i; |
3254f836 | 136 | |
eb1723a2 | 137 | gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); |
3254f836 | 138 | |
eb1723a2 DV |
139 | for (i = 0; i < queue->rx_copy.num; i++) { |
140 | struct gnttab_copy *op; | |
3254f836 | 141 | |
eb1723a2 | 142 | op = &queue->rx_copy.op[i]; |
3254f836 | 143 | |
eb1723a2 DV |
144 | /* If the copy failed, overwrite the status field in |
145 | * the corresponding response. | |
146 | */ | |
147 | if (unlikely(op->status != GNTST_okay)) { | |
148 | struct xen_netif_rx_response *rsp; | |
3254f836 | 149 | |
eb1723a2 DV |
150 | rsp = RING_GET_RESPONSE(&queue->rx, |
151 | queue->rx_copy.idx[i]); | |
152 | rsp->status = op->status; | |
153 | } | |
154 | } | |
3254f836 | 155 | |
eb1723a2 DV |
156 | queue->rx_copy.num = 0; |
157 | } | |
3254f836 | 158 | |
eb1723a2 DV |
159 | static void xenvif_rx_copy_add(struct xenvif_queue *queue, |
160 | struct xen_netif_rx_request *req, | |
161 | unsigned int offset, void *data, size_t len) | |
3254f836 | 162 | { |
eb1723a2 DV |
163 | struct gnttab_copy *op; |
164 | struct page *page; | |
3254f836 | 165 | struct xen_page_foreign *foreign; |
3254f836 | 166 | |
eb1723a2 DV |
167 | if (queue->rx_copy.num == COPY_BATCH_SIZE) |
168 | xenvif_rx_copy_flush(queue); | |
3254f836 | 169 | |
eb1723a2 | 170 | op = &queue->rx_copy.op[queue->rx_copy.num]; |
3254f836 | 171 | |
eb1723a2 | 172 | page = virt_to_page(data); |
3254f836 | 173 | |
eb1723a2 | 174 | op->flags = GNTCOPY_dest_gref; |
3254f836 PD |
175 | |
176 | foreign = xen_page_foreign(page); | |
177 | if (foreign) { | |
eb1723a2 DV |
178 | op->source.domid = foreign->domid; |
179 | op->source.u.ref = foreign->gref; | |
180 | op->flags |= GNTCOPY_source_gref; | |
3254f836 | 181 | } else { |
eb1723a2 DV |
182 | op->source.u.gmfn = virt_to_gfn(data); |
183 | op->source.domid = DOMID_SELF; | |
3254f836 | 184 | } |
3254f836 | 185 | |
eb1723a2 DV |
186 | op->source.offset = xen_offset_in_page(data); |
187 | op->dest.u.ref = req->gref; | |
188 | op->dest.domid = queue->vif->domid; | |
189 | op->dest.offset = offset; | |
190 | op->len = len; | |
3254f836 | 191 | |
eb1723a2 DV |
192 | queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; |
193 | queue->rx_copy.num++; | |
3254f836 PD |
194 | } |
195 | ||
eb1723a2 | 196 | static unsigned int xenvif_gso_type(struct sk_buff *skb) |
3254f836 | 197 | { |
3254f836 PD |
198 | if (skb_is_gso(skb)) { |
199 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | |
eb1723a2 DV |
200 | return XEN_NETIF_GSO_TYPE_TCPV4; |
201 | else | |
202 | return XEN_NETIF_GSO_TYPE_TCPV6; | |
3254f836 | 203 | } |
eb1723a2 DV |
204 | return XEN_NETIF_GSO_TYPE_NONE; |
205 | } | |
3254f836 | 206 | |
eb1723a2 DV |
207 | struct xenvif_pkt_state { |
208 | struct sk_buff *skb; | |
209 | size_t remaining_len; | |
210 | int frag; /* frag == -1 => skb->head */ | |
211 | unsigned int frag_offset; | |
212 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; | |
213 | unsigned int extra_count; | |
214 | unsigned int slot; | |
215 | }; | |
3254f836 | 216 | |
eb1723a2 DV |
217 | static void xenvif_rx_next_skb(struct xenvif_queue *queue, |
218 | struct xenvif_pkt_state *pkt) | |
219 | { | |
220 | struct sk_buff *skb; | |
221 | unsigned int gso_type; | |
3254f836 | 222 | |
eb1723a2 | 223 | skb = xenvif_rx_dequeue(queue); |
3254f836 | 224 | |
eb1723a2 DV |
225 | queue->stats.tx_bytes += skb->len; |
226 | queue->stats.tx_packets++; | |
3254f836 | 227 | |
eb1723a2 DV |
228 | /* Reset packet state. */ |
229 | memset(pkt, 0, sizeof(struct xenvif_pkt_state)); | |
3254f836 | 230 | |
eb1723a2 DV |
231 | pkt->skb = skb; |
232 | pkt->remaining_len = skb->len; | |
233 | pkt->frag = -1; | |
3254f836 | 234 | |
eb1723a2 DV |
235 | gso_type = xenvif_gso_type(skb); |
236 | if ((1 << gso_type) & queue->vif->gso_mask) { | |
237 | struct xen_netif_extra_info *extra; | |
3254f836 | 238 | |
eb1723a2 | 239 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; |
3254f836 | 240 | |
eb1723a2 DV |
241 | extra->u.gso.type = gso_type; |
242 | extra->u.gso.size = skb_shinfo(skb)->gso_size; | |
243 | extra->u.gso.pad = 0; | |
244 | extra->u.gso.features = 0; | |
245 | extra->type = XEN_NETIF_EXTRA_TYPE_GSO; | |
246 | extra->flags = 0; | |
3254f836 | 247 | |
eb1723a2 | 248 | pkt->extra_count++; |
3254f836 PD |
249 | } |
250 | ||
eb1723a2 DV |
251 | if (skb->sw_hash) { |
252 | struct xen_netif_extra_info *extra; | |
3254f836 | 253 | |
eb1723a2 | 254 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; |
3254f836 | 255 | |
eb1723a2 DV |
256 | extra->u.hash.algorithm = |
257 | XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ; | |
3254f836 | 258 | |
eb1723a2 DV |
259 | if (skb->l4_hash) |
260 | extra->u.hash.type = | |
261 | skb->protocol == htons(ETH_P_IP) ? | |
262 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP : | |
263 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP; | |
264 | else | |
265 | extra->u.hash.type = | |
266 | skb->protocol == htons(ETH_P_IP) ? | |
267 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4 : | |
268 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6; | |
3254f836 | 269 | |
eb1723a2 | 270 | *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb); |
3254f836 | 271 | |
eb1723a2 DV |
272 | extra->type = XEN_NETIF_EXTRA_TYPE_HASH; |
273 | extra->flags = 0; | |
3254f836 | 274 | |
eb1723a2 | 275 | pkt->extra_count++; |
3254f836 | 276 | } |
3254f836 PD |
277 | } |
278 | ||
eb1723a2 DV |
279 | static void xenvif_rx_complete(struct xenvif_queue *queue, |
280 | struct xenvif_pkt_state *pkt) | |
3254f836 | 281 | { |
eb1723a2 | 282 | int notify; |
3254f836 | 283 | |
eb1723a2 DV |
284 | /* Complete any outstanding copy ops for this skb. */ |
285 | xenvif_rx_copy_flush(queue); | |
3254f836 | 286 | |
eb1723a2 DV |
287 | /* Push responses and notify. */ |
288 | queue->rx.rsp_prod_pvt = queue->rx.req_cons; | |
289 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); | |
290 | if (notify) | |
291 | notify_remote_via_irq(queue->rx_irq); | |
3254f836 | 292 | |
eb1723a2 | 293 | dev_kfree_skb(pkt->skb); |
3254f836 PD |
294 | } |
295 | ||
eb1723a2 DV |
296 | static void xenvif_rx_next_chunk(struct xenvif_queue *queue, |
297 | struct xenvif_pkt_state *pkt, | |
298 | unsigned int offset, void **data, | |
299 | size_t *len) | |
3254f836 | 300 | { |
eb1723a2 DV |
301 | struct sk_buff *skb = pkt->skb; |
302 | void *frag_data; | |
303 | size_t frag_len, chunk_len; | |
3254f836 | 304 | |
eb1723a2 DV |
305 | if (pkt->frag == -1) { |
306 | frag_data = skb->data; | |
307 | frag_len = skb_headlen(skb); | |
308 | } else { | |
309 | skb_frag_t *frag = &skb_shinfo(skb)->frags[pkt->frag]; | |
3254f836 | 310 | |
eb1723a2 DV |
311 | frag_data = skb_frag_address(frag); |
312 | frag_len = skb_frag_size(frag); | |
3254f836 | 313 | } |
3254f836 | 314 | |
eb1723a2 DV |
315 | frag_data += pkt->frag_offset; |
316 | frag_len -= pkt->frag_offset; | |
3254f836 | 317 | |
eb1723a2 DV |
318 | chunk_len = min(frag_len, XEN_PAGE_SIZE - offset); |
319 | chunk_len = min(chunk_len, | |
320 | XEN_PAGE_SIZE - xen_offset_in_page(frag_data)); | |
3254f836 | 321 | |
eb1723a2 | 322 | pkt->frag_offset += chunk_len; |
3254f836 | 323 | |
eb1723a2 DV |
324 | /* Advance to next frag? */ |
325 | if (frag_len == chunk_len) { | |
326 | pkt->frag++; | |
327 | pkt->frag_offset = 0; | |
3254f836 PD |
328 | } |
329 | ||
eb1723a2 DV |
330 | *data = frag_data; |
331 | *len = chunk_len; | |
332 | } | |
3254f836 | 333 | |
eb1723a2 DV |
334 | static void xenvif_rx_data_slot(struct xenvif_queue *queue, |
335 | struct xenvif_pkt_state *pkt, | |
336 | struct xen_netif_rx_request *req, | |
337 | struct xen_netif_rx_response *rsp) | |
338 | { | |
339 | unsigned int offset = 0; | |
340 | unsigned int flags; | |
3254f836 | 341 | |
eb1723a2 DV |
342 | do { |
343 | size_t len; | |
344 | void *data; | |
3254f836 | 345 | |
eb1723a2 DV |
346 | xenvif_rx_next_chunk(queue, pkt, offset, &data, &len); |
347 | xenvif_rx_copy_add(queue, req, offset, data, len); | |
3254f836 | 348 | |
eb1723a2 DV |
349 | offset += len; |
350 | pkt->remaining_len -= len; | |
3254f836 | 351 | |
eb1723a2 | 352 | } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0); |
3254f836 | 353 | |
eb1723a2 DV |
354 | if (pkt->remaining_len > 0) |
355 | flags = XEN_NETRXF_more_data; | |
356 | else | |
357 | flags = 0; | |
358 | ||
359 | if (pkt->slot == 0) { | |
360 | struct sk_buff *skb = pkt->skb; | |
3254f836 | 361 | |
eb1723a2 | 362 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
3254f836 PD |
363 | flags |= XEN_NETRXF_csum_blank | |
364 | XEN_NETRXF_data_validated; | |
365 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
3254f836 PD |
366 | flags |= XEN_NETRXF_data_validated; |
367 | ||
eb1723a2 DV |
368 | if (pkt->extra_count != 0) |
369 | flags |= XEN_NETRXF_extra_info; | |
370 | } | |
3254f836 | 371 | |
eb1723a2 DV |
372 | rsp->offset = 0; |
373 | rsp->flags = flags; | |
374 | rsp->id = req->id; | |
375 | rsp->status = (s16)offset; | |
376 | } | |
3254f836 | 377 | |
eb1723a2 DV |
378 | static void xenvif_rx_extra_slot(struct xenvif_queue *queue, |
379 | struct xenvif_pkt_state *pkt, | |
380 | struct xen_netif_rx_request *req, | |
381 | struct xen_netif_rx_response *rsp) | |
382 | { | |
383 | struct xen_netif_extra_info *extra = (void *)rsp; | |
384 | unsigned int i; | |
3254f836 | 385 | |
eb1723a2 | 386 | pkt->extra_count--; |
3254f836 | 387 | |
eb1723a2 DV |
388 | for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) { |
389 | if (pkt->extras[i].type) { | |
390 | *extra = pkt->extras[i]; | |
3254f836 | 391 | |
eb1723a2 | 392 | if (pkt->extra_count != 0) |
3254f836 | 393 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; |
eb1723a2 DV |
394 | |
395 | pkt->extras[i].type = 0; | |
396 | return; | |
3254f836 | 397 | } |
eb1723a2 DV |
398 | } |
399 | BUG(); | |
400 | } | |
3254f836 | 401 | |
eb1723a2 DV |
402 | void xenvif_rx_action(struct xenvif_queue *queue) |
403 | { | |
404 | struct xenvif_pkt_state pkt; | |
3254f836 | 405 | |
eb1723a2 | 406 | xenvif_rx_next_skb(queue, &pkt); |
3254f836 | 407 | |
eb1723a2 DV |
408 | do { |
409 | struct xen_netif_rx_request *req; | |
410 | struct xen_netif_rx_response *rsp; | |
3254f836 | 411 | |
eb1723a2 DV |
412 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); |
413 | rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); | |
3254f836 | 414 | |
eb1723a2 DV |
415 | /* Extras must go after the first data slot */ |
416 | if (pkt.slot != 0 && pkt.extra_count != 0) | |
417 | xenvif_rx_extra_slot(queue, &pkt, req, rsp); | |
418 | else | |
419 | xenvif_rx_data_slot(queue, &pkt, req, rsp); | |
420 | ||
421 | queue->rx.req_cons++; | |
422 | pkt.slot++; | |
423 | } while (pkt.remaining_len > 0 || pkt.extra_count != 0); | |
424 | ||
425 | xenvif_rx_complete(queue, &pkt); | |
3254f836 PD |
426 | } |
427 | ||
428 | static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) | |
429 | { | |
430 | RING_IDX prod, cons; | |
431 | ||
432 | prod = queue->rx.sring->req_prod; | |
433 | cons = queue->rx.req_cons; | |
434 | ||
435 | return !queue->stalled && | |
436 | prod - cons < 1 && | |
437 | time_after(jiffies, | |
438 | queue->last_rx_time + queue->vif->stall_timeout); | |
439 | } | |
440 | ||
441 | static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) | |
442 | { | |
443 | RING_IDX prod, cons; | |
444 | ||
445 | prod = queue->rx.sring->req_prod; | |
446 | cons = queue->rx.req_cons; | |
447 | ||
448 | return queue->stalled && prod - cons >= 1; | |
449 | } | |
450 | ||
451 | static bool xenvif_have_rx_work(struct xenvif_queue *queue) | |
452 | { | |
453 | return xenvif_rx_ring_slots_available(queue) || | |
454 | (queue->vif->stall_timeout && | |
455 | (xenvif_rx_queue_stalled(queue) || | |
456 | xenvif_rx_queue_ready(queue))) || | |
457 | kthread_should_stop() || | |
458 | queue->vif->disabled; | |
459 | } | |
460 | ||
461 | static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) | |
462 | { | |
463 | struct sk_buff *skb; | |
464 | long timeout; | |
465 | ||
466 | skb = skb_peek(&queue->rx_queue); | |
467 | if (!skb) | |
468 | return MAX_SCHEDULE_TIMEOUT; | |
469 | ||
470 | timeout = XENVIF_RX_CB(skb)->expires - jiffies; | |
471 | return timeout < 0 ? 0 : timeout; | |
472 | } | |
473 | ||
474 | /* Wait until the guest Rx thread has work. | |
475 | * | |
476 | * The timeout needs to be adjusted based on the current head of the | |
477 | * queue (and not just the head at the beginning). In particular, if | |
478 | * the queue is initially empty an infinite timeout is used and this | |
479 | * needs to be reduced when a skb is queued. | |
480 | * | |
481 | * This cannot be done with wait_event_timeout() because it only | |
482 | * calculates the timeout once. | |
483 | */ | |
484 | static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) | |
485 | { | |
486 | DEFINE_WAIT(wait); | |
487 | ||
488 | if (xenvif_have_rx_work(queue)) | |
489 | return; | |
490 | ||
491 | for (;;) { | |
492 | long ret; | |
493 | ||
494 | prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); | |
495 | if (xenvif_have_rx_work(queue)) | |
496 | break; | |
497 | ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); | |
498 | if (!ret) | |
499 | break; | |
500 | } | |
501 | finish_wait(&queue->wq, &wait); | |
502 | } | |
503 | ||
504 | static void xenvif_queue_carrier_off(struct xenvif_queue *queue) | |
505 | { | |
506 | struct xenvif *vif = queue->vif; | |
507 | ||
508 | queue->stalled = true; | |
509 | ||
510 | /* At least one queue has stalled? Disable the carrier. */ | |
511 | spin_lock(&vif->lock); | |
512 | if (vif->stalled_queues++ == 0) { | |
513 | netdev_info(vif->dev, "Guest Rx stalled"); | |
514 | netif_carrier_off(vif->dev); | |
515 | } | |
516 | spin_unlock(&vif->lock); | |
517 | } | |
518 | ||
519 | static void xenvif_queue_carrier_on(struct xenvif_queue *queue) | |
520 | { | |
521 | struct xenvif *vif = queue->vif; | |
522 | ||
523 | queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ | |
524 | queue->stalled = false; | |
525 | ||
526 | /* All queues are ready? Enable the carrier. */ | |
527 | spin_lock(&vif->lock); | |
528 | if (--vif->stalled_queues == 0) { | |
529 | netdev_info(vif->dev, "Guest Rx ready"); | |
530 | netif_carrier_on(vif->dev); | |
531 | } | |
532 | spin_unlock(&vif->lock); | |
533 | } | |
534 | ||
535 | int xenvif_kthread_guest_rx(void *data) | |
536 | { | |
537 | struct xenvif_queue *queue = data; | |
538 | struct xenvif *vif = queue->vif; | |
539 | ||
540 | if (!vif->stall_timeout) | |
541 | xenvif_queue_carrier_on(queue); | |
542 | ||
543 | for (;;) { | |
544 | xenvif_wait_for_rx_work(queue); | |
545 | ||
546 | if (kthread_should_stop()) | |
547 | break; | |
548 | ||
549 | /* This frontend is found to be rogue, disable it in | |
550 | * kthread context. Currently this is only set when | |
551 | * netback finds out frontend sends malformed packet, | |
552 | * but we cannot disable the interface in softirq | |
553 | * context so we defer it here, if this thread is | |
554 | * associated with queue 0. | |
555 | */ | |
556 | if (unlikely(vif->disabled && queue->id == 0)) { | |
557 | xenvif_carrier_off(vif); | |
558 | break; | |
559 | } | |
560 | ||
561 | if (!skb_queue_empty(&queue->rx_queue)) | |
562 | xenvif_rx_action(queue); | |
563 | ||
564 | /* If the guest hasn't provided any Rx slots for a | |
565 | * while it's probably not responsive, drop the | |
566 | * carrier so packets are dropped earlier. | |
567 | */ | |
568 | if (vif->stall_timeout) { | |
569 | if (xenvif_rx_queue_stalled(queue)) | |
570 | xenvif_queue_carrier_off(queue); | |
571 | else if (xenvif_rx_queue_ready(queue)) | |
572 | xenvif_queue_carrier_on(queue); | |
573 | } | |
574 | ||
575 | /* Queued packets may have foreign pages from other | |
576 | * domains. These cannot be queued indefinitely as | |
577 | * this would starve guests of grant refs and transmit | |
578 | * slots. | |
579 | */ | |
580 | xenvif_rx_queue_drop_expired(queue); | |
581 | ||
3254f836 PD |
582 | cond_resched(); |
583 | } | |
584 | ||
585 | /* Bin any remaining skbs */ | |
586 | xenvif_rx_queue_purge(queue); | |
587 | ||
588 | return 0; | |
589 | } |