]>
Commit | Line | Data |
---|---|---|
f942dc25 IC |
1 | /* |
2 | * Network-device interface management. | |
3 | * | |
4 | * Copyright (c) 2004-2005, Keir Fraser | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License version 2 | |
8 | * as published by the Free Software Foundation; or, when distributed | |
9 | * separately from the Linux kernel or incorporated into other | |
10 | * software packages, subject to the following license: | |
11 | * | |
12 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
13 | * of this source file (the "Software"), to deal in the Software without | |
14 | * restriction, including without limitation the rights to use, copy, modify, | |
15 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
16 | * and to permit persons to whom the Software is furnished to do so, subject to | |
17 | * the following conditions: | |
18 | * | |
19 | * The above copyright notice and this permission notice shall be included in | |
20 | * all copies or substantial portions of the Software. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
25 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
28 | * IN THE SOFTWARE. | |
29 | */ | |
30 | ||
31 | #include "common.h" | |
32 | ||
b3f980bd | 33 | #include <linux/kthread.h> |
f942dc25 IC |
34 | #include <linux/ethtool.h> |
35 | #include <linux/rtnetlink.h> | |
36 | #include <linux/if_vlan.h> | |
37 | ||
38 | #include <xen/events.h> | |
39 | #include <asm/xen/hypercall.h> | |
f53c3fe8 | 40 | #include <xen/balloon.h> |
f942dc25 IC |
41 | |
42 | #define XENVIF_QUEUE_LENGTH 32 | |
b3f980bd | 43 | #define XENVIF_NAPI_WEIGHT 64 |
f942dc25 | 44 | |
e9ce7cb6 WL |
45 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) |
46 | { | |
47 | struct net_device *dev = queue->vif->dev; | |
48 | ||
49 | if (!queue->vif->can_queue) | |
50 | return; | |
51 | ||
52 | netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); | |
53 | } | |
54 | ||
f942dc25 IC |
55 | int xenvif_schedulable(struct xenvif *vif) |
56 | { | |
57 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | |
58 | } | |
59 | ||
e1f00a69 | 60 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
f942dc25 | 61 | { |
e9ce7cb6 | 62 | struct xenvif_queue *queue = dev_id; |
f942dc25 | 63 | |
e9ce7cb6 WL |
64 | if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) |
65 | napi_schedule(&queue->napi); | |
f942dc25 | 66 | |
e1f00a69 WL |
67 | return IRQ_HANDLED; |
68 | } | |
69 | ||
e9ce7cb6 | 70 | int xenvif_poll(struct napi_struct *napi, int budget) |
b3f980bd | 71 | { |
e9ce7cb6 WL |
72 | struct xenvif_queue *queue = |
73 | container_of(napi, struct xenvif_queue, napi); | |
b3f980bd WL |
74 | int work_done; |
75 | ||
e9d8b2c2 WL |
76 | /* This vif is rogue, we pretend we've there is nothing to do |
77 | * for this vif to deschedule it from NAPI. But this interface | |
78 | * will be turned off in thread context later. | |
79 | */ | |
e9ce7cb6 | 80 | if (unlikely(queue->vif->disabled)) { |
e9d8b2c2 WL |
81 | napi_complete(napi); |
82 | return 0; | |
83 | } | |
84 | ||
e9ce7cb6 | 85 | work_done = xenvif_tx_action(queue, budget); |
b3f980bd WL |
86 | |
87 | if (work_done < budget) { | |
0d08fceb | 88 | napi_complete(napi); |
e9ce7cb6 | 89 | xenvif_napi_schedule_or_enable_events(queue); |
b3f980bd WL |
90 | } |
91 | ||
92 | return work_done; | |
93 | } | |
94 | ||
e1f00a69 WL |
95 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
96 | { | |
e9ce7cb6 | 97 | struct xenvif_queue *queue = dev_id; |
e1f00a69 | 98 | |
e9ce7cb6 | 99 | xenvif_kick_thread(queue); |
f942dc25 IC |
100 | |
101 | return IRQ_HANDLED; | |
102 | } | |
103 | ||
e1f00a69 WL |
104 | static irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
105 | { | |
106 | xenvif_tx_interrupt(irq, dev_id); | |
107 | xenvif_rx_interrupt(irq, dev_id); | |
108 | ||
109 | return IRQ_HANDLED; | |
110 | } | |
111 | ||
e9ce7cb6 WL |
112 | int xenvif_queue_stopped(struct xenvif_queue *queue) |
113 | { | |
114 | struct net_device *dev = queue->vif->dev; | |
115 | unsigned int id = queue->id; | |
116 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); | |
117 | } | |
118 | ||
119 | void xenvif_wake_queue(struct xenvif_queue *queue) | |
120 | { | |
121 | struct net_device *dev = queue->vif->dev; | |
122 | unsigned int id = queue->id; | |
123 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); | |
124 | } | |
125 | ||
126 | /* Callback to wake the queue and drain it on timeout */ | |
127 | static void xenvif_wake_queue_callback(unsigned long data) | |
09350788 | 128 | { |
e9ce7cb6 WL |
129 | struct xenvif_queue *queue = (struct xenvif_queue *)data; |
130 | ||
131 | if (xenvif_queue_stopped(queue)) { | |
132 | netdev_err(queue->vif->dev, "draining TX queue\n"); | |
133 | queue->rx_queue_purge = true; | |
134 | xenvif_kick_thread(queue); | |
135 | xenvif_wake_queue(queue); | |
136 | } | |
137 | } | |
09350788 | 138 | |
e9ce7cb6 WL |
139 | static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, |
140 | void *accel_priv, select_queue_fallback_t fallback) | |
141 | { | |
142 | struct xenvif *vif = netdev_priv(dev); | |
143 | unsigned int num_queues = dev->real_num_tx_queues; | |
144 | u32 hash; | |
145 | u16 queue_index; | |
146 | ||
147 | /* First, check if there is only one queue to optimise the | |
148 | * single-queue or old frontend scenario. | |
149 | */ | |
150 | if (num_queues == 1) { | |
151 | queue_index = 0; | |
152 | } else { | |
153 | /* Use skb_get_hash to obtain an L4 hash if available */ | |
154 | hash = skb_get_hash(skb); | |
155 | queue_index = hash % num_queues; | |
09350788 | 156 | } |
e9ce7cb6 WL |
157 | |
158 | return queue_index; | |
09350788 ZK |
159 | } |
160 | ||
f942dc25 IC |
161 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
162 | { | |
163 | struct xenvif *vif = netdev_priv(dev); | |
e9ce7cb6 WL |
164 | struct xenvif_queue *queue = NULL; |
165 | unsigned int num_queues = dev->real_num_tx_queues; | |
166 | u16 index; | |
ca2f09f2 | 167 | int min_slots_needed; |
f942dc25 IC |
168 | |
169 | BUG_ON(skb->dev != dev); | |
170 | ||
e9ce7cb6 WL |
171 | /* Drop the packet if queues are not set up */ |
172 | if (num_queues < 1) | |
173 | goto drop; | |
174 | ||
175 | /* Obtain the queue to be used to transmit this packet */ | |
176 | index = skb_get_queue_mapping(skb); | |
177 | if (index >= num_queues) { | |
178 | pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", | |
179 | index, vif->dev->name); | |
180 | index %= num_queues; | |
181 | } | |
182 | queue = &vif->queues[index]; | |
183 | ||
184 | /* Drop the packet if queue is not ready */ | |
185 | if (queue->task == NULL || | |
186 | queue->dealloc_task == NULL || | |
f53c3fe8 | 187 | !xenvif_schedulable(vif)) |
f942dc25 IC |
188 | goto drop; |
189 | ||
ca2f09f2 PD |
190 | /* At best we'll need one slot for the header and one for each |
191 | * frag. | |
192 | */ | |
193 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | |
f942dc25 | 194 | |
ca2f09f2 PD |
195 | /* If the skb is GSO then we'll also need an extra slot for the |
196 | * metadata. | |
197 | */ | |
836fbaf4 | 198 | if (skb_is_gso(skb)) |
ca2f09f2 | 199 | min_slots_needed++; |
f942dc25 | 200 | |
ca2f09f2 PD |
201 | /* If the skb can't possibly fit in the remaining slots |
202 | * then turn off the queue to give the ring a chance to | |
203 | * drain. | |
204 | */ | |
e9ce7cb6 WL |
205 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { |
206 | queue->wake_queue.function = xenvif_wake_queue_callback; | |
207 | queue->wake_queue.data = (unsigned long)queue; | |
208 | xenvif_stop_queue(queue); | |
209 | mod_timer(&queue->wake_queue, | |
09350788 ZK |
210 | jiffies + rx_drain_timeout_jiffies); |
211 | } | |
f942dc25 | 212 | |
e9ce7cb6 WL |
213 | skb_queue_tail(&queue->rx_queue, skb); |
214 | xenvif_kick_thread(queue); | |
f942dc25 IC |
215 | |
216 | return NETDEV_TX_OK; | |
217 | ||
218 | drop: | |
219 | vif->dev->stats.tx_dropped++; | |
220 | dev_kfree_skb(skb); | |
221 | return NETDEV_TX_OK; | |
222 | } | |
223 | ||
f942dc25 IC |
224 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
225 | { | |
226 | struct xenvif *vif = netdev_priv(dev); | |
e9ce7cb6 WL |
227 | struct xenvif_queue *queue = NULL; |
228 | unsigned int num_queues = dev->real_num_tx_queues; | |
229 | unsigned long rx_bytes = 0; | |
230 | unsigned long rx_packets = 0; | |
231 | unsigned long tx_bytes = 0; | |
232 | unsigned long tx_packets = 0; | |
233 | unsigned int index; | |
234 | ||
235 | if (vif->queues == NULL) | |
236 | goto out; | |
237 | ||
238 | /* Aggregate tx and rx stats from each queue */ | |
239 | for (index = 0; index < num_queues; ++index) { | |
240 | queue = &vif->queues[index]; | |
241 | rx_bytes += queue->stats.rx_bytes; | |
242 | rx_packets += queue->stats.rx_packets; | |
243 | tx_bytes += queue->stats.tx_bytes; | |
244 | tx_packets += queue->stats.tx_packets; | |
245 | } | |
246 | ||
247 | out: | |
248 | vif->dev->stats.rx_bytes = rx_bytes; | |
249 | vif->dev->stats.rx_packets = rx_packets; | |
250 | vif->dev->stats.tx_bytes = tx_bytes; | |
251 | vif->dev->stats.tx_packets = tx_packets; | |
252 | ||
f942dc25 IC |
253 | return &vif->dev->stats; |
254 | } | |
255 | ||
256 | static void xenvif_up(struct xenvif *vif) | |
257 | { | |
e9ce7cb6 WL |
258 | struct xenvif_queue *queue = NULL; |
259 | unsigned int num_queues = vif->dev->real_num_tx_queues; | |
260 | unsigned int queue_index; | |
261 | ||
262 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
263 | queue = &vif->queues[queue_index]; | |
264 | napi_enable(&queue->napi); | |
265 | enable_irq(queue->tx_irq); | |
266 | if (queue->tx_irq != queue->rx_irq) | |
267 | enable_irq(queue->rx_irq); | |
268 | xenvif_napi_schedule_or_enable_events(queue); | |
269 | } | |
f942dc25 IC |
270 | } |
271 | ||
272 | static void xenvif_down(struct xenvif *vif) | |
273 | { | |
e9ce7cb6 WL |
274 | struct xenvif_queue *queue = NULL; |
275 | unsigned int num_queues = vif->dev->real_num_tx_queues; | |
276 | unsigned int queue_index; | |
277 | ||
278 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
279 | queue = &vif->queues[queue_index]; | |
280 | napi_disable(&queue->napi); | |
281 | disable_irq(queue->tx_irq); | |
282 | if (queue->tx_irq != queue->rx_irq) | |
283 | disable_irq(queue->rx_irq); | |
284 | del_timer_sync(&queue->credit_timeout); | |
285 | } | |
f942dc25 IC |
286 | } |
287 | ||
288 | static int xenvif_open(struct net_device *dev) | |
289 | { | |
290 | struct xenvif *vif = netdev_priv(dev); | |
291 | if (netif_carrier_ok(dev)) | |
292 | xenvif_up(vif); | |
e9ce7cb6 | 293 | netif_tx_start_all_queues(dev); |
f942dc25 IC |
294 | return 0; |
295 | } | |
296 | ||
297 | static int xenvif_close(struct net_device *dev) | |
298 | { | |
299 | struct xenvif *vif = netdev_priv(dev); | |
300 | if (netif_carrier_ok(dev)) | |
301 | xenvif_down(vif); | |
e9ce7cb6 | 302 | netif_tx_stop_all_queues(dev); |
f942dc25 IC |
303 | return 0; |
304 | } | |
305 | ||
306 | static int xenvif_change_mtu(struct net_device *dev, int mtu) | |
307 | { | |
308 | struct xenvif *vif = netdev_priv(dev); | |
309 | int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN; | |
310 | ||
311 | if (mtu > max) | |
312 | return -EINVAL; | |
313 | dev->mtu = mtu; | |
314 | return 0; | |
315 | } | |
316 | ||
c8f44aff MM |
317 | static netdev_features_t xenvif_fix_features(struct net_device *dev, |
318 | netdev_features_t features) | |
f942dc25 IC |
319 | { |
320 | struct xenvif *vif = netdev_priv(dev); | |
f942dc25 | 321 | |
47103041 MM |
322 | if (!vif->can_sg) |
323 | features &= ~NETIF_F_SG; | |
82cada22 | 324 | if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4)) |
47103041 | 325 | features &= ~NETIF_F_TSO; |
82cada22 PD |
326 | if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6)) |
327 | features &= ~NETIF_F_TSO6; | |
146c8a77 | 328 | if (!vif->ip_csum) |
47103041 | 329 | features &= ~NETIF_F_IP_CSUM; |
146c8a77 PD |
330 | if (!vif->ipv6_csum) |
331 | features &= ~NETIF_F_IPV6_CSUM; | |
f942dc25 | 332 | |
47103041 | 333 | return features; |
f942dc25 IC |
334 | } |
335 | ||
336 | static const struct xenvif_stat { | |
337 | char name[ETH_GSTRING_LEN]; | |
338 | u16 offset; | |
339 | } xenvif_stats[] = { | |
340 | { | |
341 | "rx_gso_checksum_fixup", | |
e9ce7cb6 | 342 | offsetof(struct xenvif_stats, rx_gso_checksum_fixup) |
f942dc25 | 343 | }, |
1bb332af ZK |
344 | /* If (sent != success + fail), there are probably packets never |
345 | * freed up properly! | |
346 | */ | |
347 | { | |
348 | "tx_zerocopy_sent", | |
e9ce7cb6 | 349 | offsetof(struct xenvif_stats, tx_zerocopy_sent), |
1bb332af ZK |
350 | }, |
351 | { | |
352 | "tx_zerocopy_success", | |
e9ce7cb6 | 353 | offsetof(struct xenvif_stats, tx_zerocopy_success), |
1bb332af ZK |
354 | }, |
355 | { | |
356 | "tx_zerocopy_fail", | |
e9ce7cb6 | 357 | offsetof(struct xenvif_stats, tx_zerocopy_fail) |
1bb332af | 358 | }, |
e3377f36 ZK |
359 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use |
360 | * a guest with the same MAX_SKB_FRAG | |
361 | */ | |
362 | { | |
363 | "tx_frag_overflow", | |
e9ce7cb6 | 364 | offsetof(struct xenvif_stats, tx_frag_overflow) |
e3377f36 | 365 | }, |
f942dc25 IC |
366 | }; |
367 | ||
368 | static int xenvif_get_sset_count(struct net_device *dev, int string_set) | |
369 | { | |
370 | switch (string_set) { | |
371 | case ETH_SS_STATS: | |
372 | return ARRAY_SIZE(xenvif_stats); | |
373 | default: | |
374 | return -EINVAL; | |
375 | } | |
376 | } | |
377 | ||
378 | static void xenvif_get_ethtool_stats(struct net_device *dev, | |
379 | struct ethtool_stats *stats, u64 * data) | |
380 | { | |
e9ce7cb6 WL |
381 | struct xenvif *vif = netdev_priv(dev); |
382 | unsigned int num_queues = dev->real_num_tx_queues; | |
f942dc25 | 383 | int i; |
e9ce7cb6 WL |
384 | unsigned int queue_index; |
385 | struct xenvif_stats *vif_stats; | |
386 | ||
387 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | |
388 | unsigned long accum = 0; | |
389 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
390 | vif_stats = &vif->queues[queue_index].stats; | |
391 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | |
392 | } | |
393 | data[i] = accum; | |
394 | } | |
f942dc25 IC |
395 | } |
396 | ||
397 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) | |
398 | { | |
399 | int i; | |
400 | ||
401 | switch (stringset) { | |
402 | case ETH_SS_STATS: | |
403 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) | |
404 | memcpy(data + i * ETH_GSTRING_LEN, | |
405 | xenvif_stats[i].name, ETH_GSTRING_LEN); | |
406 | break; | |
407 | } | |
408 | } | |
409 | ||
813abbba | 410 | static const struct ethtool_ops xenvif_ethtool_ops = { |
f942dc25 IC |
411 | .get_link = ethtool_op_get_link, |
412 | ||
413 | .get_sset_count = xenvif_get_sset_count, | |
414 | .get_ethtool_stats = xenvif_get_ethtool_stats, | |
415 | .get_strings = xenvif_get_strings, | |
416 | }; | |
417 | ||
813abbba | 418 | static const struct net_device_ops xenvif_netdev_ops = { |
f942dc25 IC |
419 | .ndo_start_xmit = xenvif_start_xmit, |
420 | .ndo_get_stats = xenvif_get_stats, | |
421 | .ndo_open = xenvif_open, | |
422 | .ndo_stop = xenvif_close, | |
423 | .ndo_change_mtu = xenvif_change_mtu, | |
47103041 | 424 | .ndo_fix_features = xenvif_fix_features, |
4a633a60 MW |
425 | .ndo_set_mac_address = eth_mac_addr, |
426 | .ndo_validate_addr = eth_validate_addr, | |
e9ce7cb6 | 427 | .ndo_select_queue = xenvif_select_queue, |
f942dc25 IC |
428 | }; |
429 | ||
430 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |
431 | unsigned int handle) | |
432 | { | |
433 | int err; | |
434 | struct net_device *dev; | |
435 | struct xenvif *vif; | |
436 | char name[IFNAMSIZ] = {}; | |
437 | ||
438 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); | |
e9ce7cb6 | 439 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 1); |
f942dc25 | 440 | if (dev == NULL) { |
b3f980bd | 441 | pr_warn("Could not allocate netdev for %s\n", name); |
f942dc25 IC |
442 | return ERR_PTR(-ENOMEM); |
443 | } | |
444 | ||
445 | SET_NETDEV_DEV(dev, parent); | |
446 | ||
447 | vif = netdev_priv(dev); | |
ac3d5ac2 | 448 | |
f942dc25 IC |
449 | vif->domid = domid; |
450 | vif->handle = handle; | |
f942dc25 | 451 | vif->can_sg = 1; |
146c8a77 | 452 | vif->ip_csum = 1; |
f942dc25 | 453 | vif->dev = dev; |
e9d8b2c2 WL |
454 | vif->disabled = false; |
455 | ||
e9ce7cb6 WL |
456 | /* Start out with no queues. The call below does not require |
457 | * rtnl_lock() as it happens before register_netdev(). | |
458 | */ | |
459 | vif->queues = NULL; | |
460 | netif_set_real_num_tx_queues(dev, 0); | |
09350788 | 461 | |
f942dc25 | 462 | dev->netdev_ops = &xenvif_netdev_ops; |
146c8a77 PD |
463 | dev->hw_features = NETIF_F_SG | |
464 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
82cada22 | 465 | NETIF_F_TSO | NETIF_F_TSO6; |
7365bcfa | 466 | dev->features = dev->hw_features | NETIF_F_RXCSUM; |
7ad24ea4 | 467 | dev->ethtool_ops = &xenvif_ethtool_ops; |
f942dc25 IC |
468 | |
469 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; | |
470 | ||
471 | /* | |
472 | * Initialise a dummy MAC address. We choose the numerically | |
473 | * largest non-broadcast address to prevent the address getting | |
474 | * stolen by an Ethernet bridge for STP purposes. | |
475 | * (FE:FF:FF:FF:FF:FF) | |
476 | */ | |
477 | memset(dev->dev_addr, 0xFF, ETH_ALEN); | |
478 | dev->dev_addr[0] &= ~0x01; | |
479 | ||
480 | netif_carrier_off(dev); | |
481 | ||
482 | err = register_netdev(dev); | |
483 | if (err) { | |
484 | netdev_warn(dev, "Could not register device: err=%d\n", err); | |
485 | free_netdev(dev); | |
486 | return ERR_PTR(err); | |
487 | } | |
488 | ||
489 | netdev_dbg(dev, "Successfully created xenvif\n"); | |
279f438e PD |
490 | |
491 | __module_get(THIS_MODULE); | |
492 | ||
f942dc25 IC |
493 | return vif; |
494 | } | |
495 | ||
e9ce7cb6 WL |
496 | int xenvif_init_queue(struct xenvif_queue *queue) |
497 | { | |
498 | int err, i; | |
499 | ||
500 | queue->credit_bytes = queue->remaining_credit = ~0UL; | |
501 | queue->credit_usec = 0UL; | |
502 | init_timer(&queue->credit_timeout); | |
503 | queue->credit_window_start = get_jiffies_64(); | |
504 | ||
505 | skb_queue_head_init(&queue->rx_queue); | |
506 | skb_queue_head_init(&queue->tx_queue); | |
507 | ||
508 | queue->pending_cons = 0; | |
509 | queue->pending_prod = MAX_PENDING_REQS; | |
510 | for (i = 0; i < MAX_PENDING_REQS; ++i) | |
511 | queue->pending_ring[i] = i; | |
512 | ||
513 | spin_lock_init(&queue->callback_lock); | |
514 | spin_lock_init(&queue->response_lock); | |
515 | ||
516 | /* If ballooning is disabled, this will consume real memory, so you | |
517 | * better enable it. The long term solution would be to use just a | |
518 | * bunch of valid page descriptors, without dependency on ballooning | |
519 | */ | |
520 | err = alloc_xenballooned_pages(MAX_PENDING_REQS, | |
521 | queue->mmap_pages, | |
522 | false); | |
523 | if (err) { | |
524 | netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); | |
525 | return -ENOMEM; | |
526 | } | |
527 | ||
528 | for (i = 0; i < MAX_PENDING_REQS; i++) { | |
529 | queue->pending_tx_info[i].callback_struct = (struct ubuf_info) | |
530 | { .callback = xenvif_zerocopy_callback, | |
531 | .ctx = NULL, | |
532 | .desc = i }; | |
533 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | |
534 | } | |
535 | ||
536 | init_timer(&queue->wake_queue); | |
537 | ||
538 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | |
539 | XENVIF_NAPI_WEIGHT); | |
540 | ||
541 | return 0; | |
542 | } | |
543 | ||
544 | void xenvif_carrier_on(struct xenvif *vif) | |
545 | { | |
546 | rtnl_lock(); | |
547 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | |
548 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | |
549 | netdev_update_features(vif->dev); | |
550 | netif_carrier_on(vif->dev); | |
551 | if (netif_running(vif->dev)) | |
552 | xenvif_up(vif); | |
553 | rtnl_unlock(); | |
554 | } | |
555 | ||
556 | int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |
e1f00a69 WL |
557 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
558 | unsigned int rx_evtchn) | |
f942dc25 | 559 | { |
67fa3660 | 560 | struct task_struct *task; |
f942dc25 IC |
561 | int err = -ENOMEM; |
562 | ||
e9ce7cb6 WL |
563 | BUG_ON(queue->tx_irq); |
564 | BUG_ON(queue->task); | |
565 | BUG_ON(queue->dealloc_task); | |
f942dc25 | 566 | |
e9ce7cb6 | 567 | err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); |
f942dc25 IC |
568 | if (err < 0) |
569 | goto err; | |
570 | ||
e9ce7cb6 WL |
571 | init_waitqueue_head(&queue->wq); |
572 | init_waitqueue_head(&queue->dealloc_wq); | |
ca2f09f2 | 573 | |
e1f00a69 WL |
574 | if (tx_evtchn == rx_evtchn) { |
575 | /* feature-split-event-channels == 0 */ | |
576 | err = bind_interdomain_evtchn_to_irqhandler( | |
e9ce7cb6 WL |
577 | queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, |
578 | queue->name, queue); | |
e1f00a69 WL |
579 | if (err < 0) |
580 | goto err_unmap; | |
e9ce7cb6 WL |
581 | queue->tx_irq = queue->rx_irq = err; |
582 | disable_irq(queue->tx_irq); | |
e1f00a69 WL |
583 | } else { |
584 | /* feature-split-event-channels == 1 */ | |
e9ce7cb6 WL |
585 | snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), |
586 | "%s-tx", queue->name); | |
e1f00a69 | 587 | err = bind_interdomain_evtchn_to_irqhandler( |
e9ce7cb6 WL |
588 | queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, |
589 | queue->tx_irq_name, queue); | |
e1f00a69 WL |
590 | if (err < 0) |
591 | goto err_unmap; | |
e9ce7cb6 WL |
592 | queue->tx_irq = err; |
593 | disable_irq(queue->tx_irq); | |
e1f00a69 | 594 | |
e9ce7cb6 WL |
595 | snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), |
596 | "%s-rx", queue->name); | |
e1f00a69 | 597 | err = bind_interdomain_evtchn_to_irqhandler( |
e9ce7cb6 WL |
598 | queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, |
599 | queue->rx_irq_name, queue); | |
e1f00a69 WL |
600 | if (err < 0) |
601 | goto err_tx_unbind; | |
e9ce7cb6 WL |
602 | queue->rx_irq = err; |
603 | disable_irq(queue->rx_irq); | |
e1f00a69 | 604 | } |
f942dc25 | 605 | |
121fa4b7 | 606 | task = kthread_create(xenvif_kthread_guest_rx, |
e9ce7cb6 | 607 | (void *)queue, "%s-guest-rx", queue->name); |
67fa3660 | 608 | if (IS_ERR(task)) { |
e9ce7cb6 | 609 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
67fa3660 | 610 | err = PTR_ERR(task); |
b3f980bd WL |
611 | goto err_rx_unbind; |
612 | } | |
e9ce7cb6 | 613 | queue->task = task; |
67fa3660 | 614 | |
f53c3fe8 | 615 | task = kthread_create(xenvif_dealloc_kthread, |
e9ce7cb6 | 616 | (void *)queue, "%s-dealloc", queue->name); |
f53c3fe8 | 617 | if (IS_ERR(task)) { |
e9ce7cb6 | 618 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
f53c3fe8 ZK |
619 | err = PTR_ERR(task); |
620 | goto err_rx_unbind; | |
621 | } | |
e9ce7cb6 | 622 | queue->dealloc_task = task; |
f53c3fe8 | 623 | |
e9ce7cb6 WL |
624 | wake_up_process(queue->task); |
625 | wake_up_process(queue->dealloc_task); | |
b3f980bd | 626 | |
f942dc25 | 627 | return 0; |
b3f980bd WL |
628 | |
629 | err_rx_unbind: | |
e9ce7cb6 WL |
630 | unbind_from_irqhandler(queue->rx_irq, queue); |
631 | queue->rx_irq = 0; | |
e1f00a69 | 632 | err_tx_unbind: |
e9ce7cb6 WL |
633 | unbind_from_irqhandler(queue->tx_irq, queue); |
634 | queue->tx_irq = 0; | |
f942dc25 | 635 | err_unmap: |
e9ce7cb6 | 636 | xenvif_unmap_frontend_rings(queue); |
f942dc25 | 637 | err: |
b103f358 | 638 | module_put(THIS_MODULE); |
f942dc25 IC |
639 | return err; |
640 | } | |
641 | ||
48856286 | 642 | void xenvif_carrier_off(struct xenvif *vif) |
f942dc25 IC |
643 | { |
644 | struct net_device *dev = vif->dev; | |
48856286 IC |
645 | |
646 | rtnl_lock(); | |
647 | netif_carrier_off(dev); /* discard queued packets */ | |
648 | if (netif_running(dev)) | |
649 | xenvif_down(vif); | |
650 | rtnl_unlock(); | |
48856286 IC |
651 | } |
652 | ||
e9ce7cb6 WL |
653 | static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, |
654 | unsigned int worst_case_skb_lifetime) | |
655 | { | |
656 | int i, unmap_timeout = 0; | |
657 | ||
658 | for (i = 0; i < MAX_PENDING_REQS; ++i) { | |
659 | if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { | |
660 | unmap_timeout++; | |
661 | schedule_timeout(msecs_to_jiffies(1000)); | |
662 | if (unmap_timeout > worst_case_skb_lifetime && | |
663 | net_ratelimit()) | |
664 | netdev_err(queue->vif->dev, | |
665 | "Page still granted! Index: %x\n", | |
666 | i); | |
667 | i = -1; | |
668 | } | |
669 | } | |
670 | } | |
671 | ||
48856286 IC |
672 | void xenvif_disconnect(struct xenvif *vif) |
673 | { | |
e9ce7cb6 WL |
674 | struct xenvif_queue *queue = NULL; |
675 | unsigned int num_queues = vif->dev->real_num_tx_queues; | |
676 | unsigned int queue_index; | |
677 | ||
48856286 IC |
678 | if (netif_carrier_ok(vif->dev)) |
679 | xenvif_carrier_off(vif); | |
f942dc25 | 680 | |
e9ce7cb6 WL |
681 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
682 | queue = &vif->queues[queue_index]; | |
db739ef3 | 683 | |
e9ce7cb6 WL |
684 | if (queue->task) { |
685 | del_timer_sync(&queue->wake_queue); | |
686 | kthread_stop(queue->task); | |
687 | queue->task = NULL; | |
688 | } | |
f53c3fe8 | 689 | |
e9ce7cb6 WL |
690 | if (queue->dealloc_task) { |
691 | kthread_stop(queue->dealloc_task); | |
692 | queue->dealloc_task = NULL; | |
693 | } | |
694 | ||
695 | if (queue->tx_irq) { | |
696 | if (queue->tx_irq == queue->rx_irq) | |
697 | unbind_from_irqhandler(queue->tx_irq, queue); | |
698 | else { | |
699 | unbind_from_irqhandler(queue->tx_irq, queue); | |
700 | unbind_from_irqhandler(queue->rx_irq, queue); | |
701 | } | |
702 | queue->tx_irq = 0; | |
e1f00a69 | 703 | } |
f942dc25 | 704 | |
e9ce7cb6 WL |
705 | xenvif_unmap_frontend_rings(queue); |
706 | } | |
279f438e PD |
707 | } |
708 | ||
709 | void xenvif_free(struct xenvif *vif) | |
710 | { | |
e9ce7cb6 WL |
711 | struct xenvif_queue *queue = NULL; |
712 | unsigned int num_queues = vif->dev->real_num_tx_queues; | |
713 | unsigned int queue_index; | |
0e59a4a5 ZK |
714 | /* Here we want to avoid timeout messages if an skb can be legitimately |
715 | * stuck somewhere else. Realistically this could be an another vif's | |
09350788 ZK |
716 | * internal or QDisc queue. That another vif also has this |
717 | * rx_drain_timeout_msecs timeout, but the timer only ditches the | |
718 | * internal queue. After that, the QDisc queue can put in worst case | |
719 | * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's | |
720 | * internal queue, so we need several rounds of such timeouts until we | |
721 | * can be sure that no another vif should have skb's from us. We are | |
0e59a4a5 | 722 | * not sending more skb's, so newly stuck packets are not interesting |
09350788 ZK |
723 | * for us here. |
724 | */ | |
725 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * | |
726 | DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); | |
f53c3fe8 | 727 | |
e9ce7cb6 | 728 | unregister_netdev(vif->dev); |
f53c3fe8 | 729 | |
e9ce7cb6 WL |
730 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
731 | queue = &vif->queues[queue_index]; | |
f53c3fe8 | 732 | |
e9ce7cb6 WL |
733 | xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); |
734 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); | |
b3f980bd | 735 | |
e9ce7cb6 WL |
736 | netif_napi_del(&queue->napi); |
737 | } | |
738 | ||
739 | /* Free the array of queues. The call below does not require | |
740 | * rtnl_lock() because it happens after unregister_netdev(). | |
741 | */ | |
742 | netif_set_real_num_tx_queues(vif->dev, 0); | |
743 | vfree(vif->queues); | |
744 | vif->queues = NULL; | |
f942dc25 | 745 | |
f942dc25 | 746 | free_netdev(vif->dev); |
b103f358 | 747 | |
279f438e | 748 | module_put(THIS_MODULE); |
f942dc25 | 749 | } |