]>
Commit | Line | Data |
---|---|---|
f942dc25 IC |
1 | /* |
2 | * Network-device interface management. | |
3 | * | |
4 | * Copyright (c) 2004-2005, Keir Fraser | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License version 2 | |
8 | * as published by the Free Software Foundation; or, when distributed | |
9 | * separately from the Linux kernel or incorporated into other | |
10 | * software packages, subject to the following license: | |
11 | * | |
12 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
13 | * of this source file (the "Software"), to deal in the Software without | |
14 | * restriction, including without limitation the rights to use, copy, modify, | |
15 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
16 | * and to permit persons to whom the Software is furnished to do so, subject to | |
17 | * the following conditions: | |
18 | * | |
19 | * The above copyright notice and this permission notice shall be included in | |
20 | * all copies or substantial portions of the Software. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
25 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
28 | * IN THE SOFTWARE. | |
29 | */ | |
30 | ||
31 | #include "common.h" | |
32 | ||
b3f980bd | 33 | #include <linux/kthread.h> |
f942dc25 IC |
34 | #include <linux/ethtool.h> |
35 | #include <linux/rtnetlink.h> | |
36 | #include <linux/if_vlan.h> | |
e7b599d7 | 37 | #include <linux/vmalloc.h> |
f942dc25 IC |
38 | |
39 | #include <xen/events.h> | |
40 | #include <asm/xen/hypercall.h> | |
f53c3fe8 | 41 | #include <xen/balloon.h> |
f942dc25 IC |
42 | |
43 | #define XENVIF_QUEUE_LENGTH 32 | |
b3f980bd | 44 | #define XENVIF_NAPI_WEIGHT 64 |
f942dc25 | 45 | |
e9ce7cb6 WL |
46 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) |
47 | { | |
48 | struct net_device *dev = queue->vif->dev; | |
49 | ||
50 | if (!queue->vif->can_queue) | |
51 | return; | |
52 | ||
53 | netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); | |
54 | } | |
55 | ||
f942dc25 IC |
56 | int xenvif_schedulable(struct xenvif *vif) |
57 | { | |
58 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | |
59 | } | |
60 | ||
e1f00a69 | 61 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
f942dc25 | 62 | { |
e9ce7cb6 | 63 | struct xenvif_queue *queue = dev_id; |
f942dc25 | 64 | |
e9ce7cb6 WL |
65 | if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) |
66 | napi_schedule(&queue->napi); | |
f942dc25 | 67 | |
e1f00a69 WL |
68 | return IRQ_HANDLED; |
69 | } | |
70 | ||
e9ce7cb6 | 71 | int xenvif_poll(struct napi_struct *napi, int budget) |
b3f980bd | 72 | { |
e9ce7cb6 WL |
73 | struct xenvif_queue *queue = |
74 | container_of(napi, struct xenvif_queue, napi); | |
b3f980bd WL |
75 | int work_done; |
76 | ||
e9d8b2c2 WL |
77 | /* This vif is rogue, we pretend we've there is nothing to do |
78 | * for this vif to deschedule it from NAPI. But this interface | |
79 | * will be turned off in thread context later. | |
80 | */ | |
e9ce7cb6 | 81 | if (unlikely(queue->vif->disabled)) { |
e9d8b2c2 WL |
82 | napi_complete(napi); |
83 | return 0; | |
84 | } | |
85 | ||
e9ce7cb6 | 86 | work_done = xenvif_tx_action(queue, budget); |
b3f980bd WL |
87 | |
88 | if (work_done < budget) { | |
0d08fceb | 89 | napi_complete(napi); |
e9ce7cb6 | 90 | xenvif_napi_schedule_or_enable_events(queue); |
b3f980bd WL |
91 | } |
92 | ||
93 | return work_done; | |
94 | } | |
95 | ||
e1f00a69 WL |
96 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
97 | { | |
e9ce7cb6 | 98 | struct xenvif_queue *queue = dev_id; |
e1f00a69 | 99 | |
e9ce7cb6 | 100 | xenvif_kick_thread(queue); |
f942dc25 IC |
101 | |
102 | return IRQ_HANDLED; | |
103 | } | |
104 | ||
f51de243 | 105 | irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
e1f00a69 WL |
106 | { |
107 | xenvif_tx_interrupt(irq, dev_id); | |
108 | xenvif_rx_interrupt(irq, dev_id); | |
109 | ||
110 | return IRQ_HANDLED; | |
111 | } | |
112 | ||
e9ce7cb6 WL |
113 | int xenvif_queue_stopped(struct xenvif_queue *queue) |
114 | { | |
115 | struct net_device *dev = queue->vif->dev; | |
116 | unsigned int id = queue->id; | |
117 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); | |
118 | } | |
119 | ||
120 | void xenvif_wake_queue(struct xenvif_queue *queue) | |
121 | { | |
122 | struct net_device *dev = queue->vif->dev; | |
123 | unsigned int id = queue->id; | |
124 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); | |
125 | } | |
126 | ||
127 | /* Callback to wake the queue and drain it on timeout */ | |
128 | static void xenvif_wake_queue_callback(unsigned long data) | |
09350788 | 129 | { |
e9ce7cb6 WL |
130 | struct xenvif_queue *queue = (struct xenvif_queue *)data; |
131 | ||
132 | if (xenvif_queue_stopped(queue)) { | |
133 | netdev_err(queue->vif->dev, "draining TX queue\n"); | |
134 | queue->rx_queue_purge = true; | |
135 | xenvif_kick_thread(queue); | |
136 | xenvif_wake_queue(queue); | |
137 | } | |
138 | } | |
09350788 | 139 | |
f942dc25 IC |
140 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
141 | { | |
142 | struct xenvif *vif = netdev_priv(dev); | |
e9ce7cb6 | 143 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 144 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 | 145 | u16 index; |
ca2f09f2 | 146 | int min_slots_needed; |
f942dc25 IC |
147 | |
148 | BUG_ON(skb->dev != dev); | |
149 | ||
e9ce7cb6 WL |
150 | /* Drop the packet if queues are not set up */ |
151 | if (num_queues < 1) | |
152 | goto drop; | |
153 | ||
154 | /* Obtain the queue to be used to transmit this packet */ | |
155 | index = skb_get_queue_mapping(skb); | |
156 | if (index >= num_queues) { | |
157 | pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", | |
158 | index, vif->dev->name); | |
159 | index %= num_queues; | |
160 | } | |
161 | queue = &vif->queues[index]; | |
162 | ||
163 | /* Drop the packet if queue is not ready */ | |
164 | if (queue->task == NULL || | |
165 | queue->dealloc_task == NULL || | |
f53c3fe8 | 166 | !xenvif_schedulable(vif)) |
f942dc25 IC |
167 | goto drop; |
168 | ||
ca2f09f2 PD |
169 | /* At best we'll need one slot for the header and one for each |
170 | * frag. | |
171 | */ | |
172 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | |
f942dc25 | 173 | |
ca2f09f2 PD |
174 | /* If the skb is GSO then we'll also need an extra slot for the |
175 | * metadata. | |
176 | */ | |
836fbaf4 | 177 | if (skb_is_gso(skb)) |
ca2f09f2 | 178 | min_slots_needed++; |
f942dc25 | 179 | |
ca2f09f2 PD |
180 | /* If the skb can't possibly fit in the remaining slots |
181 | * then turn off the queue to give the ring a chance to | |
182 | * drain. | |
183 | */ | |
e9ce7cb6 WL |
184 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { |
185 | queue->wake_queue.function = xenvif_wake_queue_callback; | |
186 | queue->wake_queue.data = (unsigned long)queue; | |
187 | xenvif_stop_queue(queue); | |
188 | mod_timer(&queue->wake_queue, | |
09350788 ZK |
189 | jiffies + rx_drain_timeout_jiffies); |
190 | } | |
f942dc25 | 191 | |
e9ce7cb6 WL |
192 | skb_queue_tail(&queue->rx_queue, skb); |
193 | xenvif_kick_thread(queue); | |
f942dc25 IC |
194 | |
195 | return NETDEV_TX_OK; | |
196 | ||
197 | drop: | |
198 | vif->dev->stats.tx_dropped++; | |
199 | dev_kfree_skb(skb); | |
200 | return NETDEV_TX_OK; | |
201 | } | |
202 | ||
f942dc25 IC |
203 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
204 | { | |
205 | struct xenvif *vif = netdev_priv(dev); | |
e9ce7cb6 | 206 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 207 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 WL |
208 | unsigned long rx_bytes = 0; |
209 | unsigned long rx_packets = 0; | |
210 | unsigned long tx_bytes = 0; | |
211 | unsigned long tx_packets = 0; | |
212 | unsigned int index; | |
213 | ||
214 | if (vif->queues == NULL) | |
215 | goto out; | |
216 | ||
217 | /* Aggregate tx and rx stats from each queue */ | |
218 | for (index = 0; index < num_queues; ++index) { | |
219 | queue = &vif->queues[index]; | |
220 | rx_bytes += queue->stats.rx_bytes; | |
221 | rx_packets += queue->stats.rx_packets; | |
222 | tx_bytes += queue->stats.tx_bytes; | |
223 | tx_packets += queue->stats.tx_packets; | |
224 | } | |
225 | ||
226 | out: | |
227 | vif->dev->stats.rx_bytes = rx_bytes; | |
228 | vif->dev->stats.rx_packets = rx_packets; | |
229 | vif->dev->stats.tx_bytes = tx_bytes; | |
230 | vif->dev->stats.tx_packets = tx_packets; | |
231 | ||
f942dc25 IC |
232 | return &vif->dev->stats; |
233 | } | |
234 | ||
235 | static void xenvif_up(struct xenvif *vif) | |
236 | { | |
e9ce7cb6 | 237 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 238 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 WL |
239 | unsigned int queue_index; |
240 | ||
241 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
242 | queue = &vif->queues[queue_index]; | |
243 | napi_enable(&queue->napi); | |
244 | enable_irq(queue->tx_irq); | |
245 | if (queue->tx_irq != queue->rx_irq) | |
246 | enable_irq(queue->rx_irq); | |
247 | xenvif_napi_schedule_or_enable_events(queue); | |
248 | } | |
f942dc25 IC |
249 | } |
250 | ||
251 | static void xenvif_down(struct xenvif *vif) | |
252 | { | |
e9ce7cb6 | 253 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 254 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 WL |
255 | unsigned int queue_index; |
256 | ||
257 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
258 | queue = &vif->queues[queue_index]; | |
259 | napi_disable(&queue->napi); | |
260 | disable_irq(queue->tx_irq); | |
261 | if (queue->tx_irq != queue->rx_irq) | |
262 | disable_irq(queue->rx_irq); | |
263 | del_timer_sync(&queue->credit_timeout); | |
264 | } | |
f942dc25 IC |
265 | } |
266 | ||
267 | static int xenvif_open(struct net_device *dev) | |
268 | { | |
269 | struct xenvif *vif = netdev_priv(dev); | |
270 | if (netif_carrier_ok(dev)) | |
271 | xenvif_up(vif); | |
e9ce7cb6 | 272 | netif_tx_start_all_queues(dev); |
f942dc25 IC |
273 | return 0; |
274 | } | |
275 | ||
276 | static int xenvif_close(struct net_device *dev) | |
277 | { | |
278 | struct xenvif *vif = netdev_priv(dev); | |
279 | if (netif_carrier_ok(dev)) | |
280 | xenvif_down(vif); | |
e9ce7cb6 | 281 | netif_tx_stop_all_queues(dev); |
f942dc25 IC |
282 | return 0; |
283 | } | |
284 | ||
285 | static int xenvif_change_mtu(struct net_device *dev, int mtu) | |
286 | { | |
287 | struct xenvif *vif = netdev_priv(dev); | |
288 | int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN; | |
289 | ||
290 | if (mtu > max) | |
291 | return -EINVAL; | |
292 | dev->mtu = mtu; | |
293 | return 0; | |
294 | } | |
295 | ||
c8f44aff MM |
296 | static netdev_features_t xenvif_fix_features(struct net_device *dev, |
297 | netdev_features_t features) | |
f942dc25 IC |
298 | { |
299 | struct xenvif *vif = netdev_priv(dev); | |
f942dc25 | 300 | |
47103041 MM |
301 | if (!vif->can_sg) |
302 | features &= ~NETIF_F_SG; | |
82cada22 | 303 | if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4)) |
47103041 | 304 | features &= ~NETIF_F_TSO; |
82cada22 PD |
305 | if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6)) |
306 | features &= ~NETIF_F_TSO6; | |
146c8a77 | 307 | if (!vif->ip_csum) |
47103041 | 308 | features &= ~NETIF_F_IP_CSUM; |
146c8a77 PD |
309 | if (!vif->ipv6_csum) |
310 | features &= ~NETIF_F_IPV6_CSUM; | |
f942dc25 | 311 | |
47103041 | 312 | return features; |
f942dc25 IC |
313 | } |
314 | ||
315 | static const struct xenvif_stat { | |
316 | char name[ETH_GSTRING_LEN]; | |
317 | u16 offset; | |
318 | } xenvif_stats[] = { | |
319 | { | |
320 | "rx_gso_checksum_fixup", | |
e9ce7cb6 | 321 | offsetof(struct xenvif_stats, rx_gso_checksum_fixup) |
f942dc25 | 322 | }, |
1bb332af ZK |
323 | /* If (sent != success + fail), there are probably packets never |
324 | * freed up properly! | |
325 | */ | |
326 | { | |
327 | "tx_zerocopy_sent", | |
e9ce7cb6 | 328 | offsetof(struct xenvif_stats, tx_zerocopy_sent), |
1bb332af ZK |
329 | }, |
330 | { | |
331 | "tx_zerocopy_success", | |
e9ce7cb6 | 332 | offsetof(struct xenvif_stats, tx_zerocopy_success), |
1bb332af ZK |
333 | }, |
334 | { | |
335 | "tx_zerocopy_fail", | |
e9ce7cb6 | 336 | offsetof(struct xenvif_stats, tx_zerocopy_fail) |
1bb332af | 337 | }, |
e3377f36 ZK |
338 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use |
339 | * a guest with the same MAX_SKB_FRAG | |
340 | */ | |
341 | { | |
342 | "tx_frag_overflow", | |
e9ce7cb6 | 343 | offsetof(struct xenvif_stats, tx_frag_overflow) |
e3377f36 | 344 | }, |
f942dc25 IC |
345 | }; |
346 | ||
347 | static int xenvif_get_sset_count(struct net_device *dev, int string_set) | |
348 | { | |
349 | switch (string_set) { | |
350 | case ETH_SS_STATS: | |
351 | return ARRAY_SIZE(xenvif_stats); | |
352 | default: | |
353 | return -EINVAL; | |
354 | } | |
355 | } | |
356 | ||
357 | static void xenvif_get_ethtool_stats(struct net_device *dev, | |
358 | struct ethtool_stats *stats, u64 * data) | |
359 | { | |
e9ce7cb6 | 360 | struct xenvif *vif = netdev_priv(dev); |
f7b50c4e | 361 | unsigned int num_queues = vif->num_queues; |
f942dc25 | 362 | int i; |
e9ce7cb6 WL |
363 | unsigned int queue_index; |
364 | struct xenvif_stats *vif_stats; | |
365 | ||
366 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | |
367 | unsigned long accum = 0; | |
368 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
369 | vif_stats = &vif->queues[queue_index].stats; | |
370 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | |
371 | } | |
372 | data[i] = accum; | |
373 | } | |
f942dc25 IC |
374 | } |
375 | ||
376 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) | |
377 | { | |
378 | int i; | |
379 | ||
380 | switch (stringset) { | |
381 | case ETH_SS_STATS: | |
382 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) | |
383 | memcpy(data + i * ETH_GSTRING_LEN, | |
384 | xenvif_stats[i].name, ETH_GSTRING_LEN); | |
385 | break; | |
386 | } | |
387 | } | |
388 | ||
813abbba | 389 | static const struct ethtool_ops xenvif_ethtool_ops = { |
f942dc25 IC |
390 | .get_link = ethtool_op_get_link, |
391 | ||
392 | .get_sset_count = xenvif_get_sset_count, | |
393 | .get_ethtool_stats = xenvif_get_ethtool_stats, | |
394 | .get_strings = xenvif_get_strings, | |
395 | }; | |
396 | ||
813abbba | 397 | static const struct net_device_ops xenvif_netdev_ops = { |
f942dc25 IC |
398 | .ndo_start_xmit = xenvif_start_xmit, |
399 | .ndo_get_stats = xenvif_get_stats, | |
400 | .ndo_open = xenvif_open, | |
401 | .ndo_stop = xenvif_close, | |
402 | .ndo_change_mtu = xenvif_change_mtu, | |
47103041 | 403 | .ndo_fix_features = xenvif_fix_features, |
4a633a60 MW |
404 | .ndo_set_mac_address = eth_mac_addr, |
405 | .ndo_validate_addr = eth_validate_addr, | |
f942dc25 IC |
406 | }; |
407 | ||
408 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |
409 | unsigned int handle) | |
410 | { | |
411 | int err; | |
412 | struct net_device *dev; | |
413 | struct xenvif *vif; | |
414 | char name[IFNAMSIZ] = {}; | |
415 | ||
416 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); | |
8d3d53b3 AB |
417 | /* Allocate a netdev with the max. supported number of queues. |
418 | * When the guest selects the desired number, it will be updated | |
f7b50c4e | 419 | * via netif_set_real_num_*_queues(). |
8d3d53b3 | 420 | */ |
c835a677 TG |
421 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, |
422 | ether_setup, xenvif_max_queues); | |
f942dc25 | 423 | if (dev == NULL) { |
b3f980bd | 424 | pr_warn("Could not allocate netdev for %s\n", name); |
f942dc25 IC |
425 | return ERR_PTR(-ENOMEM); |
426 | } | |
427 | ||
428 | SET_NETDEV_DEV(dev, parent); | |
429 | ||
430 | vif = netdev_priv(dev); | |
ac3d5ac2 | 431 | |
f942dc25 IC |
432 | vif->domid = domid; |
433 | vif->handle = handle; | |
f942dc25 | 434 | vif->can_sg = 1; |
146c8a77 | 435 | vif->ip_csum = 1; |
f942dc25 | 436 | vif->dev = dev; |
e9d8b2c2 WL |
437 | vif->disabled = false; |
438 | ||
f7b50c4e | 439 | /* Start out with no queues. */ |
e9ce7cb6 | 440 | vif->queues = NULL; |
f7b50c4e | 441 | vif->num_queues = 0; |
09350788 | 442 | |
f942dc25 | 443 | dev->netdev_ops = &xenvif_netdev_ops; |
146c8a77 PD |
444 | dev->hw_features = NETIF_F_SG | |
445 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
82cada22 | 446 | NETIF_F_TSO | NETIF_F_TSO6; |
7365bcfa | 447 | dev->features = dev->hw_features | NETIF_F_RXCSUM; |
7ad24ea4 | 448 | dev->ethtool_ops = &xenvif_ethtool_ops; |
f942dc25 IC |
449 | |
450 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; | |
451 | ||
452 | /* | |
453 | * Initialise a dummy MAC address. We choose the numerically | |
454 | * largest non-broadcast address to prevent the address getting | |
455 | * stolen by an Ethernet bridge for STP purposes. | |
456 | * (FE:FF:FF:FF:FF:FF) | |
457 | */ | |
458 | memset(dev->dev_addr, 0xFF, ETH_ALEN); | |
459 | dev->dev_addr[0] &= ~0x01; | |
460 | ||
461 | netif_carrier_off(dev); | |
462 | ||
463 | err = register_netdev(dev); | |
464 | if (err) { | |
465 | netdev_warn(dev, "Could not register device: err=%d\n", err); | |
466 | free_netdev(dev); | |
467 | return ERR_PTR(err); | |
468 | } | |
469 | ||
470 | netdev_dbg(dev, "Successfully created xenvif\n"); | |
279f438e PD |
471 | |
472 | __module_get(THIS_MODULE); | |
473 | ||
f942dc25 IC |
474 | return vif; |
475 | } | |
476 | ||
e9ce7cb6 WL |
477 | int xenvif_init_queue(struct xenvif_queue *queue) |
478 | { | |
479 | int err, i; | |
480 | ||
481 | queue->credit_bytes = queue->remaining_credit = ~0UL; | |
482 | queue->credit_usec = 0UL; | |
483 | init_timer(&queue->credit_timeout); | |
484 | queue->credit_window_start = get_jiffies_64(); | |
485 | ||
486 | skb_queue_head_init(&queue->rx_queue); | |
487 | skb_queue_head_init(&queue->tx_queue); | |
488 | ||
489 | queue->pending_cons = 0; | |
490 | queue->pending_prod = MAX_PENDING_REQS; | |
491 | for (i = 0; i < MAX_PENDING_REQS; ++i) | |
492 | queue->pending_ring[i] = i; | |
493 | ||
494 | spin_lock_init(&queue->callback_lock); | |
495 | spin_lock_init(&queue->response_lock); | |
496 | ||
497 | /* If ballooning is disabled, this will consume real memory, so you | |
498 | * better enable it. The long term solution would be to use just a | |
499 | * bunch of valid page descriptors, without dependency on ballooning | |
500 | */ | |
501 | err = alloc_xenballooned_pages(MAX_PENDING_REQS, | |
502 | queue->mmap_pages, | |
503 | false); | |
504 | if (err) { | |
505 | netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); | |
506 | return -ENOMEM; | |
507 | } | |
508 | ||
509 | for (i = 0; i < MAX_PENDING_REQS; i++) { | |
510 | queue->pending_tx_info[i].callback_struct = (struct ubuf_info) | |
511 | { .callback = xenvif_zerocopy_callback, | |
512 | .ctx = NULL, | |
513 | .desc = i }; | |
514 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | |
515 | } | |
516 | ||
517 | init_timer(&queue->wake_queue); | |
518 | ||
519 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | |
520 | XENVIF_NAPI_WEIGHT); | |
521 | ||
522 | return 0; | |
523 | } | |
524 | ||
525 | void xenvif_carrier_on(struct xenvif *vif) | |
526 | { | |
527 | rtnl_lock(); | |
528 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | |
529 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | |
530 | netdev_update_features(vif->dev); | |
531 | netif_carrier_on(vif->dev); | |
532 | if (netif_running(vif->dev)) | |
533 | xenvif_up(vif); | |
534 | rtnl_unlock(); | |
535 | } | |
536 | ||
537 | int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |
e1f00a69 WL |
538 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
539 | unsigned int rx_evtchn) | |
f942dc25 | 540 | { |
67fa3660 | 541 | struct task_struct *task; |
f942dc25 IC |
542 | int err = -ENOMEM; |
543 | ||
e9ce7cb6 WL |
544 | BUG_ON(queue->tx_irq); |
545 | BUG_ON(queue->task); | |
546 | BUG_ON(queue->dealloc_task); | |
f942dc25 | 547 | |
e9ce7cb6 | 548 | err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); |
f942dc25 IC |
549 | if (err < 0) |
550 | goto err; | |
551 | ||
e9ce7cb6 WL |
552 | init_waitqueue_head(&queue->wq); |
553 | init_waitqueue_head(&queue->dealloc_wq); | |
ca2f09f2 | 554 | |
e1f00a69 WL |
555 | if (tx_evtchn == rx_evtchn) { |
556 | /* feature-split-event-channels == 0 */ | |
557 | err = bind_interdomain_evtchn_to_irqhandler( | |
e9ce7cb6 WL |
558 | queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, |
559 | queue->name, queue); | |
e1f00a69 WL |
560 | if (err < 0) |
561 | goto err_unmap; | |
e9ce7cb6 WL |
562 | queue->tx_irq = queue->rx_irq = err; |
563 | disable_irq(queue->tx_irq); | |
e1f00a69 WL |
564 | } else { |
565 | /* feature-split-event-channels == 1 */ | |
e9ce7cb6 WL |
566 | snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), |
567 | "%s-tx", queue->name); | |
e1f00a69 | 568 | err = bind_interdomain_evtchn_to_irqhandler( |
e9ce7cb6 WL |
569 | queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, |
570 | queue->tx_irq_name, queue); | |
e1f00a69 WL |
571 | if (err < 0) |
572 | goto err_unmap; | |
e9ce7cb6 WL |
573 | queue->tx_irq = err; |
574 | disable_irq(queue->tx_irq); | |
e1f00a69 | 575 | |
e9ce7cb6 WL |
576 | snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), |
577 | "%s-rx", queue->name); | |
e1f00a69 | 578 | err = bind_interdomain_evtchn_to_irqhandler( |
e9ce7cb6 WL |
579 | queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, |
580 | queue->rx_irq_name, queue); | |
e1f00a69 WL |
581 | if (err < 0) |
582 | goto err_tx_unbind; | |
e9ce7cb6 WL |
583 | queue->rx_irq = err; |
584 | disable_irq(queue->rx_irq); | |
e1f00a69 | 585 | } |
f942dc25 | 586 | |
121fa4b7 | 587 | task = kthread_create(xenvif_kthread_guest_rx, |
e9ce7cb6 | 588 | (void *)queue, "%s-guest-rx", queue->name); |
67fa3660 | 589 | if (IS_ERR(task)) { |
e9ce7cb6 | 590 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
67fa3660 | 591 | err = PTR_ERR(task); |
b3f980bd WL |
592 | goto err_rx_unbind; |
593 | } | |
e9ce7cb6 | 594 | queue->task = task; |
67fa3660 | 595 | |
f53c3fe8 | 596 | task = kthread_create(xenvif_dealloc_kthread, |
e9ce7cb6 | 597 | (void *)queue, "%s-dealloc", queue->name); |
f53c3fe8 | 598 | if (IS_ERR(task)) { |
e9ce7cb6 | 599 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
f53c3fe8 ZK |
600 | err = PTR_ERR(task); |
601 | goto err_rx_unbind; | |
602 | } | |
e9ce7cb6 | 603 | queue->dealloc_task = task; |
f53c3fe8 | 604 | |
e9ce7cb6 WL |
605 | wake_up_process(queue->task); |
606 | wake_up_process(queue->dealloc_task); | |
b3f980bd | 607 | |
f942dc25 | 608 | return 0; |
b3f980bd WL |
609 | |
610 | err_rx_unbind: | |
e9ce7cb6 WL |
611 | unbind_from_irqhandler(queue->rx_irq, queue); |
612 | queue->rx_irq = 0; | |
e1f00a69 | 613 | err_tx_unbind: |
e9ce7cb6 WL |
614 | unbind_from_irqhandler(queue->tx_irq, queue); |
615 | queue->tx_irq = 0; | |
f942dc25 | 616 | err_unmap: |
e9ce7cb6 | 617 | xenvif_unmap_frontend_rings(queue); |
f942dc25 | 618 | err: |
b103f358 | 619 | module_put(THIS_MODULE); |
f942dc25 IC |
620 | return err; |
621 | } | |
622 | ||
48856286 | 623 | void xenvif_carrier_off(struct xenvif *vif) |
f942dc25 IC |
624 | { |
625 | struct net_device *dev = vif->dev; | |
48856286 IC |
626 | |
627 | rtnl_lock(); | |
628 | netif_carrier_off(dev); /* discard queued packets */ | |
629 | if (netif_running(dev)) | |
630 | xenvif_down(vif); | |
631 | rtnl_unlock(); | |
48856286 IC |
632 | } |
633 | ||
e9ce7cb6 WL |
634 | static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, |
635 | unsigned int worst_case_skb_lifetime) | |
636 | { | |
637 | int i, unmap_timeout = 0; | |
638 | ||
639 | for (i = 0; i < MAX_PENDING_REQS; ++i) { | |
640 | if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { | |
641 | unmap_timeout++; | |
642 | schedule_timeout(msecs_to_jiffies(1000)); | |
643 | if (unmap_timeout > worst_case_skb_lifetime && | |
644 | net_ratelimit()) | |
645 | netdev_err(queue->vif->dev, | |
646 | "Page still granted! Index: %x\n", | |
647 | i); | |
648 | i = -1; | |
649 | } | |
650 | } | |
651 | } | |
652 | ||
48856286 IC |
653 | void xenvif_disconnect(struct xenvif *vif) |
654 | { | |
e9ce7cb6 | 655 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 656 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 WL |
657 | unsigned int queue_index; |
658 | ||
48856286 IC |
659 | if (netif_carrier_ok(vif->dev)) |
660 | xenvif_carrier_off(vif); | |
f942dc25 | 661 | |
e9ce7cb6 WL |
662 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
663 | queue = &vif->queues[queue_index]; | |
db739ef3 | 664 | |
e9ce7cb6 WL |
665 | if (queue->task) { |
666 | del_timer_sync(&queue->wake_queue); | |
667 | kthread_stop(queue->task); | |
668 | queue->task = NULL; | |
669 | } | |
f53c3fe8 | 670 | |
e9ce7cb6 WL |
671 | if (queue->dealloc_task) { |
672 | kthread_stop(queue->dealloc_task); | |
673 | queue->dealloc_task = NULL; | |
674 | } | |
675 | ||
676 | if (queue->tx_irq) { | |
677 | if (queue->tx_irq == queue->rx_irq) | |
678 | unbind_from_irqhandler(queue->tx_irq, queue); | |
679 | else { | |
680 | unbind_from_irqhandler(queue->tx_irq, queue); | |
681 | unbind_from_irqhandler(queue->rx_irq, queue); | |
682 | } | |
683 | queue->tx_irq = 0; | |
e1f00a69 | 684 | } |
f942dc25 | 685 | |
e9ce7cb6 WL |
686 | xenvif_unmap_frontend_rings(queue); |
687 | } | |
279f438e PD |
688 | } |
689 | ||
8d3d53b3 AB |
690 | /* Reverse the relevant parts of xenvif_init_queue(). |
691 | * Used for queue teardown from xenvif_free(), and on the | |
692 | * error handling paths in xenbus.c:connect(). | |
693 | */ | |
694 | void xenvif_deinit_queue(struct xenvif_queue *queue) | |
695 | { | |
696 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); | |
697 | netif_napi_del(&queue->napi); | |
698 | } | |
699 | ||
279f438e PD |
700 | void xenvif_free(struct xenvif *vif) |
701 | { | |
e9ce7cb6 | 702 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 703 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 | 704 | unsigned int queue_index; |
0e59a4a5 ZK |
705 | /* Here we want to avoid timeout messages if an skb can be legitimately |
706 | * stuck somewhere else. Realistically this could be an another vif's | |
09350788 ZK |
707 | * internal or QDisc queue. That another vif also has this |
708 | * rx_drain_timeout_msecs timeout, but the timer only ditches the | |
709 | * internal queue. After that, the QDisc queue can put in worst case | |
710 | * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's | |
711 | * internal queue, so we need several rounds of such timeouts until we | |
712 | * can be sure that no another vif should have skb's from us. We are | |
0e59a4a5 | 713 | * not sending more skb's, so newly stuck packets are not interesting |
09350788 ZK |
714 | * for us here. |
715 | */ | |
716 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * | |
717 | DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); | |
f53c3fe8 | 718 | |
e9ce7cb6 | 719 | unregister_netdev(vif->dev); |
f53c3fe8 | 720 | |
e9ce7cb6 WL |
721 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
722 | queue = &vif->queues[queue_index]; | |
e9ce7cb6 | 723 | xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); |
8d3d53b3 | 724 | xenvif_deinit_queue(queue); |
e9ce7cb6 WL |
725 | } |
726 | ||
e9ce7cb6 WL |
727 | vfree(vif->queues); |
728 | vif->queues = NULL; | |
f7b50c4e | 729 | vif->num_queues = 0; |
f942dc25 | 730 | |
f942dc25 | 731 | free_netdev(vif->dev); |
b103f358 | 732 | |
279f438e | 733 | module_put(THIS_MODULE); |
f942dc25 | 734 | } |