]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ifb.c
Merge tag 'iio-for-4.13b' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ifb.c
CommitLineData
6aa20a22 1/* drivers/net/ifb.c:
253af423
JHS
2
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.
9
10 2) Allows for queueing incoming traffic for shaping instead of
6aa20a22
JG
11 dropping.
12
253af423
JHS
13 The original concept is based on what is known as the IMQ
14 driver initially written by Martin Devera, later rewritten
15 by Patrick McHardy and then maintained by Andre Correa.
16
17 You need the tc action mirror or redirect to feed this device
18 packets.
19
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 as published by the Free Software Foundation; either version
23 2 of the License, or (at your option) any later version.
6aa20a22 24
253af423 25 Authors: Jamal Hadi Salim (2005)
6aa20a22 26
253af423
JHS
27*/
28
29
253af423
JHS
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/init.h>
a6b7a407 35#include <linux/interrupt.h>
253af423 36#include <linux/moduleparam.h>
6aa20a22 37#include <net/pkt_sched.h>
881d966b 38#include <net/net_namespace.h>
253af423 39
253af423 40#define TX_Q_LIMIT 32
9e29e21a
ED
41struct ifb_q_private {
42 struct net_device *dev;
253af423 43 struct tasklet_struct ifb_tasklet;
9e29e21a
ED
44 int tasklet_pending;
45 int txqnum;
253af423 46 struct sk_buff_head rq;
9e29e21a
ED
47 u64 rx_packets;
48 u64 rx_bytes;
49 struct u64_stats_sync rsync;
3b0c9cbb 50
51 struct u64_stats_sync tsync;
9e29e21a
ED
52 u64 tx_packets;
53 u64 tx_bytes;
253af423 54 struct sk_buff_head tq;
9e29e21a 55} ____cacheline_aligned_in_smp;
253af423 56
9e29e21a
ED
57struct ifb_dev_private {
58 struct ifb_q_private *tx_private;
59};
253af423 60
424efe9c 61static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
253af423
JHS
62static int ifb_open(struct net_device *dev);
63static int ifb_close(struct net_device *dev);
64
9e29e21a 65static void ifb_ri_tasklet(unsigned long _txp)
253af423 66{
9e29e21a 67 struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
c3f26a26 68 struct netdev_queue *txq;
253af423
JHS
69 struct sk_buff *skb;
70
9e29e21a
ED
71 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
72 skb = skb_peek(&txp->tq);
73 if (!skb) {
74 if (!__netif_tx_trylock(txq))
253af423 75 goto resched;
9e29e21a
ED
76 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
77 __netif_tx_unlock(txq);
253af423
JHS
78 }
79
9e29e21a 80 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
bc31c905 81 skb->tc_redirected = 0;
e7246e12 82 skb->tc_skip_classify = 1;
3b0c9cbb 83
9e29e21a
ED
84 u64_stats_update_begin(&txp->tsync);
85 txp->tx_packets++;
86 txp->tx_bytes += skb->len;
87 u64_stats_update_end(&txp->tsync);
c01003c2 88
05e8689c 89 rcu_read_lock();
9e29e21a 90 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
c01003c2 91 if (!skb->dev) {
05e8689c 92 rcu_read_unlock();
c01003c2 93 dev_kfree_skb(skb);
9e29e21a
ED
94 txp->dev->stats.tx_dropped++;
95 if (skb_queue_len(&txp->tq) != 0)
75c1c825 96 goto resched;
c01003c2
PM
97 break;
98 }
05e8689c 99 rcu_read_unlock();
9e29e21a 100 skb->skb_iif = txp->dev->ifindex;
c01003c2 101
bc31c905 102 if (!skb->tc_from_ingress) {
253af423 103 dev_queue_xmit(skb);
bc31c905 104 } else {
f40ae913 105 skb_pull(skb, skb->mac_len);
1a75972c 106 netif_receive_skb(skb);
bc31c905 107 }
253af423
JHS
108 }
109
c3f26a26 110 if (__netif_tx_trylock(txq)) {
9e29e21a
ED
111 skb = skb_peek(&txp->rq);
112 if (!skb) {
113 txp->tasklet_pending = 0;
114 if (netif_tx_queue_stopped(txq))
115 netif_tx_wake_queue(txq);
253af423 116 } else {
c3f26a26 117 __netif_tx_unlock(txq);
253af423
JHS
118 goto resched;
119 }
c3f26a26 120 __netif_tx_unlock(txq);
253af423
JHS
121 } else {
122resched:
9e29e21a
ED
123 txp->tasklet_pending = 1;
124 tasklet_schedule(&txp->ifb_tasklet);
253af423
JHS
125 }
126
127}
128
bc1f4470 129static void ifb_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats)
3b0c9cbb 131{
9e29e21a
ED
132 struct ifb_dev_private *dp = netdev_priv(dev);
133 struct ifb_q_private *txp = dp->tx_private;
3b0c9cbb 134 unsigned int start;
9e29e21a
ED
135 u64 packets, bytes;
136 int i;
137
138 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
139 do {
140 start = u64_stats_fetch_begin_irq(&txp->rsync);
141 packets = txp->rx_packets;
142 bytes = txp->rx_bytes;
143 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
144 stats->rx_packets += packets;
145 stats->rx_bytes += bytes;
146
147 do {
148 start = u64_stats_fetch_begin_irq(&txp->tsync);
149 packets = txp->tx_packets;
150 bytes = txp->tx_bytes;
151 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
152 stats->tx_packets += packets;
153 stats->tx_bytes += bytes;
154 }
3b0c9cbb 155 stats->rx_dropped = dev->stats.rx_dropped;
156 stats->tx_dropped = dev->stats.tx_dropped;
3b0c9cbb 157}
158
9e29e21a
ED
159static int ifb_dev_init(struct net_device *dev)
160{
161 struct ifb_dev_private *dp = netdev_priv(dev);
162 struct ifb_q_private *txp;
163 int i;
164
165 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
166 if (!txp)
167 return -ENOMEM;
168 dp->tx_private = txp;
169 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
170 txp->txqnum = i;
171 txp->dev = dev;
172 __skb_queue_head_init(&txp->rq);
173 __skb_queue_head_init(&txp->tq);
174 u64_stats_init(&txp->rsync);
175 u64_stats_init(&txp->tsync);
176 tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
177 (unsigned long)txp);
178 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
179 }
180 return 0;
181}
3b0c9cbb 182
8dfcdf34 183static const struct net_device_ops ifb_netdev_ops = {
8dfcdf34
SH
184 .ndo_open = ifb_open,
185 .ndo_stop = ifb_close,
3b0c9cbb 186 .ndo_get_stats64 = ifb_stats64,
00829823
SH
187 .ndo_start_xmit = ifb_xmit,
188 .ndo_validate_addr = eth_validate_addr,
9e29e21a 189 .ndo_init = ifb_dev_init,
8dfcdf34
SH
190};
191
34324dc2 192#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
39980292 193 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
7d945796 194 NETIF_F_GSO_ENCAP_ALL | \
28d2b136
PM
195 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
196 NETIF_F_HW_VLAN_STAG_TX)
39980292 197
9e29e21a
ED
198static void ifb_dev_free(struct net_device *dev)
199{
200 struct ifb_dev_private *dp = netdev_priv(dev);
201 struct ifb_q_private *txp = dp->tx_private;
202 int i;
203
204 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
205 tasklet_kill(&txp->ifb_tasklet);
206 __skb_queue_purge(&txp->rq);
207 __skb_queue_purge(&txp->tq);
208 }
209 kfree(dp->tx_private);
9e29e21a
ED
210}
211
9ba2cd65 212static void ifb_setup(struct net_device *dev)
253af423
JHS
213{
214 /* Initialize the device structure. */
8dfcdf34 215 dev->netdev_ops = &ifb_netdev_ops;
253af423
JHS
216
217 /* Fill in device structure with ethernet-generic values. */
218 ether_setup(dev);
219 dev->tx_queue_len = TX_Q_LIMIT;
8dfcdf34 220
39980292 221 dev->features |= IFB_FEATURES;
7d945796
ED
222 dev->hw_features |= dev->features;
223 dev->hw_enc_features |= dev->features;
8dd6e147
VY
224 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
225 NETIF_F_HW_VLAN_STAG_TX);
39980292 226
253af423
JHS
227 dev->flags |= IFF_NOARP;
228 dev->flags &= ~IFF_MULTICAST;
02875878
ED
229 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
230 netif_keep_dst(dev);
f2cedb63 231 eth_hw_addr_random(dev);
cf124db5
DM
232 dev->needs_free_netdev = true;
233 dev->priv_destructor = ifb_dev_free;
253af423
JHS
234}
235
424efe9c 236static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
253af423 237{
9e29e21a 238 struct ifb_dev_private *dp = netdev_priv(dev);
9e29e21a 239 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
253af423 240
9e29e21a
ED
241 u64_stats_update_begin(&txp->rsync);
242 txp->rx_packets++;
243 txp->rx_bytes += skb->len;
244 u64_stats_update_end(&txp->rsync);
253af423 245
bc31c905 246 if (!skb->tc_redirected || !skb->skb_iif) {
253af423 247 dev_kfree_skb(skb);
3b0c9cbb 248 dev->stats.rx_dropped++;
424efe9c 249 return NETDEV_TX_OK;
253af423
JHS
250 }
251
9e29e21a
ED
252 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
253 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
253af423 254
9e29e21a
ED
255 __skb_queue_tail(&txp->rq, skb);
256 if (!txp->tasklet_pending) {
257 txp->tasklet_pending = 1;
258 tasklet_schedule(&txp->ifb_tasklet);
253af423
JHS
259 }
260
424efe9c 261 return NETDEV_TX_OK;
253af423
JHS
262}
263
253af423
JHS
264static int ifb_close(struct net_device *dev)
265{
9e29e21a 266 netif_tx_stop_all_queues(dev);
253af423
JHS
267 return 0;
268}
269
270static int ifb_open(struct net_device *dev)
271{
9e29e21a 272 netif_tx_start_all_queues(dev);
253af423
JHS
273 return 0;
274}
275
0e06877c
PM
276static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
277{
278 if (tb[IFLA_ADDRESS]) {
279 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
280 return -EINVAL;
281 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
282 return -EADDRNOTAVAIL;
283 }
284 return 0;
285}
286
9ba2cd65
PM
287static struct rtnl_link_ops ifb_link_ops __read_mostly = {
288 .kind = "ifb",
9e29e21a 289 .priv_size = sizeof(struct ifb_dev_private),
9ba2cd65 290 .setup = ifb_setup,
0e06877c 291 .validate = ifb_validate,
9ba2cd65
PM
292};
293
9e29e21a
ED
294/* Number of ifb devices to be set up by this module.
295 * Note that these legacy devices have one queue.
296 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
297 */
298static int numifbs = 2;
2d85cba2
PM
299module_param(numifbs, int, 0);
300MODULE_PARM_DESC(numifbs, "Number of ifb devices");
301
253af423
JHS
302static int __init ifb_init_one(int index)
303{
304 struct net_device *dev_ifb;
305 int err;
306
9e29e21a 307 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
c835a677 308 NET_NAME_UNKNOWN, ifb_setup);
253af423
JHS
309
310 if (!dev_ifb)
311 return -ENOMEM;
312
9ba2cd65
PM
313 dev_ifb->rtnl_link_ops = &ifb_link_ops;
314 err = register_netdevice(dev_ifb);
315 if (err < 0)
316 goto err;
94833dfb 317
9ba2cd65 318 return 0;
62b7ffca 319
9ba2cd65
PM
320err:
321 free_netdev(dev_ifb);
322 return err;
6aa20a22 323}
253af423
JHS
324
325static int __init ifb_init_module(void)
6aa20a22 326{
9ba2cd65
PM
327 int i, err;
328
329 rtnl_lock();
330 err = __rtnl_link_register(&ifb_link_ops);
f2966cd5 331 if (err < 0)
332 goto out;
62b7ffca 333
440d57bc 334 for (i = 0; i < numifbs && !err; i++) {
6aa20a22 335 err = ifb_init_one(i);
440d57bc 336 cond_resched();
337 }
2d85cba2 338 if (err)
9ba2cd65 339 __rtnl_link_unregister(&ifb_link_ops);
f2966cd5 340
341out:
9ba2cd65 342 rtnl_unlock();
253af423
JHS
343
344 return err;
6aa20a22 345}
253af423
JHS
346
347static void __exit ifb_cleanup_module(void)
348{
2d85cba2 349 rtnl_link_unregister(&ifb_link_ops);
253af423
JHS
350}
351
352module_init(ifb_init_module);
353module_exit(ifb_cleanup_module);
354MODULE_LICENSE("GPL");
355MODULE_AUTHOR("Jamal Hadi Salim");
9ba2cd65 356MODULE_ALIAS_RTNL_LINK("ifb");