]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/core/dev.c
net offloading: Accept NETIF_F_HW_CSUM for all protocols.
[mirror_ubuntu-artful-kernel.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
08e9897d 82#include <linux/hash.h>
5a0e3ad6 83#include <linux/slab.h>
1da177e4 84#include <linux/sched.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
457c4cbc 98#include <net/net_namespace.h>
1da177e4
LT
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
1da177e4
LT
104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
44540960 107#include <net/xfrm.h>
1da177e4
LT
108#include <linux/highmem.h>
109#include <linux/init.h>
110#include <linux/kmod.h>
111#include <linux/module.h>
1da177e4
LT
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
295f4a1f 115#include <net/wext.h>
1da177e4 116#include <net/iw_handler.h>
1da177e4 117#include <asm/current.h>
5bdb9886 118#include <linux/audit.h>
db217334 119#include <linux/dmaengine.h>
f6a78bfc 120#include <linux/err.h>
c7fa9d18 121#include <linux/ctype.h>
723e98b7 122#include <linux/if_arp.h>
6de329e2 123#include <linux/if_vlan.h>
8f0f2223 124#include <linux/ip.h>
ad55dcaf 125#include <net/ip.h>
8f0f2223
DM
126#include <linux/ipv6.h>
127#include <linux/in.h>
b6b2fed1
DM
128#include <linux/jhash.h>
129#include <linux/random.h>
9cbc1cb8 130#include <trace/events/napi.h>
cf66ba58 131#include <trace/events/net.h>
07dc22e7 132#include <trace/events/skb.h>
5acbbd42 133#include <linux/pci.h>
caeda9b9 134#include <linux/inetdevice.h>
1da177e4 135
342709ef
PE
136#include "net-sysfs.h"
137
d565b0a1
HX
138/* Instead of increasing this, you should create a hash table. */
139#define MAX_GRO_SKBS 8
140
5d38a079
HX
141/* This should be increased if a protocol with a bigger head is added. */
142#define GRO_MAX_HEAD (MAX_HEADER + 128)
143
1da177e4
LT
144/*
145 * The list of packet types we will receive (as opposed to discard)
146 * and the routines to invoke.
147 *
148 * Why 16. Because with 16 the only overlap we get on a hash of the
149 * low nibble of the protocol value is RARP/SNAP/X.25.
150 *
151 * NOTE: That is no longer true with the addition of VLAN tags. Not
152 * sure which should go first, but I bet it won't make much
153 * difference if we are running VLANs. The good news is that
154 * this protocol won't be in the list unless compiled in, so
3041a069 155 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
156 * --BLG
157 *
158 * 0800 IP
159 * 8100 802.1Q VLAN
160 * 0001 802.3
161 * 0002 AX.25
162 * 0004 802.2
163 * 8035 RARP
164 * 0005 SNAP
165 * 0805 X.25
166 * 0806 ARP
167 * 8137 IPX
168 * 0009 Localtalk
169 * 86DD IPv6
170 */
171
82d8a867
PE
172#define PTYPE_HASH_SIZE (16)
173#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
174
1da177e4 175static DEFINE_SPINLOCK(ptype_lock);
82d8a867 176static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 177static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 178
1da177e4 179/*
7562f876 180 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
181 * semaphore.
182 *
c6d14c84 183 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
184 *
185 * Writers must hold the rtnl semaphore while they loop through the
7562f876 186 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
187 * actual updates. This allows pure readers to access the list even
188 * while a writer is preparing to update it.
189 *
190 * To put it another way, dev_base_lock is held for writing only to
191 * protect against pure readers; the rtnl semaphore provides the
192 * protection against other writers.
193 *
194 * See, for example usages, register_netdevice() and
195 * unregister_netdevice(), which must be called with the rtnl
196 * semaphore held.
197 */
1da177e4 198DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
199EXPORT_SYMBOL(dev_base_lock);
200
881d966b 201static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
202{
203 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
08e9897d 204 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
205}
206
881d966b 207static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 208{
7c28bd0b 209 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
210}
211
e36fa2f7 212static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
213{
214#ifdef CONFIG_RPS
e36fa2f7 215 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
216#endif
217}
218
e36fa2f7 219static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
220{
221#ifdef CONFIG_RPS
e36fa2f7 222 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
223#endif
224}
225
ce286d32
EB
226/* Device list insertion */
227static int list_netdevice(struct net_device *dev)
228{
c346dca1 229 struct net *net = dev_net(dev);
ce286d32
EB
230
231 ASSERT_RTNL();
232
233 write_lock_bh(&dev_base_lock);
c6d14c84 234 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 235 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
236 hlist_add_head_rcu(&dev->index_hlist,
237 dev_index_hash(net, dev->ifindex));
ce286d32
EB
238 write_unlock_bh(&dev_base_lock);
239 return 0;
240}
241
fb699dfd
ED
242/* Device list removal
243 * caller must respect a RCU grace period before freeing/reusing dev
244 */
ce286d32
EB
245static void unlist_netdevice(struct net_device *dev)
246{
247 ASSERT_RTNL();
248
249 /* Unlink dev from the device chain */
250 write_lock_bh(&dev_base_lock);
c6d14c84 251 list_del_rcu(&dev->dev_list);
72c9528b 252 hlist_del_rcu(&dev->name_hlist);
fb699dfd 253 hlist_del_rcu(&dev->index_hlist);
ce286d32
EB
254 write_unlock_bh(&dev_base_lock);
255}
256
1da177e4
LT
257/*
258 * Our notifier list
259 */
260
f07d5b94 261static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
262
263/*
264 * Device drivers call our routines to queue packets here. We empty the
265 * queue in the local softnet handler.
266 */
bea3348e 267
9958da05 268DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 269EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 270
cf508b12 271#ifdef CONFIG_LOCKDEP
723e98b7 272/*
c773e847 273 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
274 * according to dev->type
275 */
276static const unsigned short netdev_lock_type[] =
277 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
278 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
279 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
280 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
281 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
282 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
283 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
284 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
285 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
286 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
287 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
288 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
289 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2d91d78b 290 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
929122cd 291 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
fcb94e42 292 ARPHRD_VOID, ARPHRD_NONE};
723e98b7 293
36cbd3dc 294static const char *const netdev_lock_name[] =
723e98b7
JP
295 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
296 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
297 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
298 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
299 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
300 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
301 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
302 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
303 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
304 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
305 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
306 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
307 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2d91d78b 308 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
929122cd 309 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
fcb94e42 310 "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
311
312static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 313static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
314
315static inline unsigned short netdev_lock_pos(unsigned short dev_type)
316{
317 int i;
318
319 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
320 if (netdev_lock_type[i] == dev_type)
321 return i;
322 /* the last key is used by default */
323 return ARRAY_SIZE(netdev_lock_type) - 1;
324}
325
cf508b12
DM
326static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
327 unsigned short dev_type)
723e98b7
JP
328{
329 int i;
330
331 i = netdev_lock_pos(dev_type);
332 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
333 netdev_lock_name[i]);
334}
cf508b12
DM
335
336static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
337{
338 int i;
339
340 i = netdev_lock_pos(dev->type);
341 lockdep_set_class_and_name(&dev->addr_list_lock,
342 &netdev_addr_lock_key[i],
343 netdev_lock_name[i]);
344}
723e98b7 345#else
cf508b12
DM
346static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
347 unsigned short dev_type)
348{
349}
350static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
351{
352}
353#endif
1da177e4
LT
354
355/*******************************************************************************
356
357 Protocol management and registration routines
358
359*******************************************************************************/
360
1da177e4
LT
361/*
362 * Add a protocol ID to the list. Now that the input handler is
363 * smarter we can dispense with all the messy stuff that used to be
364 * here.
365 *
366 * BEWARE!!! Protocol handlers, mangling input packets,
367 * MUST BE last in hash buckets and checking protocol handlers
368 * MUST start from promiscuous ptype_all chain in net_bh.
369 * It is true now, do not change it.
370 * Explanation follows: if protocol handler, mangling packet, will
371 * be the first on list, it is not able to sense, that packet
372 * is cloned and should be copied-on-write, so that it will
373 * change it and subsequent readers will get broken packet.
374 * --ANK (980803)
375 */
376
c07b68e8
ED
377static inline struct list_head *ptype_head(const struct packet_type *pt)
378{
379 if (pt->type == htons(ETH_P_ALL))
380 return &ptype_all;
381 else
382 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
383}
384
1da177e4
LT
385/**
386 * dev_add_pack - add packet handler
387 * @pt: packet type declaration
388 *
389 * Add a protocol handler to the networking stack. The passed &packet_type
390 * is linked into kernel lists and may not be freed until it has been
391 * removed from the kernel lists.
392 *
4ec93edb 393 * This call does not sleep therefore it can not
1da177e4
LT
394 * guarantee all CPU's that are in middle of receiving packets
395 * will see the new packet type (until the next received packet).
396 */
397
398void dev_add_pack(struct packet_type *pt)
399{
c07b68e8 400 struct list_head *head = ptype_head(pt);
1da177e4 401
c07b68e8
ED
402 spin_lock(&ptype_lock);
403 list_add_rcu(&pt->list, head);
404 spin_unlock(&ptype_lock);
1da177e4 405}
d1b19dff 406EXPORT_SYMBOL(dev_add_pack);
1da177e4 407
1da177e4
LT
408/**
409 * __dev_remove_pack - remove packet handler
410 * @pt: packet type declaration
411 *
412 * Remove a protocol handler that was previously added to the kernel
413 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
414 * from the kernel lists and can be freed or reused once this function
4ec93edb 415 * returns.
1da177e4
LT
416 *
417 * The packet type might still be in use by receivers
418 * and must not be freed until after all the CPU's have gone
419 * through a quiescent state.
420 */
421void __dev_remove_pack(struct packet_type *pt)
422{
c07b68e8 423 struct list_head *head = ptype_head(pt);
1da177e4
LT
424 struct packet_type *pt1;
425
c07b68e8 426 spin_lock(&ptype_lock);
1da177e4
LT
427
428 list_for_each_entry(pt1, head, list) {
429 if (pt == pt1) {
430 list_del_rcu(&pt->list);
431 goto out;
432 }
433 }
434
435 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
436out:
c07b68e8 437 spin_unlock(&ptype_lock);
1da177e4 438}
d1b19dff
ED
439EXPORT_SYMBOL(__dev_remove_pack);
440
1da177e4
LT
441/**
442 * dev_remove_pack - remove packet handler
443 * @pt: packet type declaration
444 *
445 * Remove a protocol handler that was previously added to the kernel
446 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
447 * from the kernel lists and can be freed or reused once this function
448 * returns.
449 *
450 * This call sleeps to guarantee that no CPU is looking at the packet
451 * type after return.
452 */
453void dev_remove_pack(struct packet_type *pt)
454{
455 __dev_remove_pack(pt);
4ec93edb 456
1da177e4
LT
457 synchronize_net();
458}
d1b19dff 459EXPORT_SYMBOL(dev_remove_pack);
1da177e4
LT
460
461/******************************************************************************
462
463 Device Boot-time Settings Routines
464
465*******************************************************************************/
466
467/* Boot time configuration table */
468static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
469
470/**
471 * netdev_boot_setup_add - add new setup entry
472 * @name: name of the device
473 * @map: configured settings for the device
474 *
475 * Adds new setup entry to the dev_boot_setup list. The function
476 * returns 0 on error and 1 on success. This is a generic routine to
477 * all netdevices.
478 */
479static int netdev_boot_setup_add(char *name, struct ifmap *map)
480{
481 struct netdev_boot_setup *s;
482 int i;
483
484 s = dev_boot_setup;
485 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
486 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
487 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 488 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
489 memcpy(&s[i].map, map, sizeof(s[i].map));
490 break;
491 }
492 }
493
494 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
495}
496
497/**
498 * netdev_boot_setup_check - check boot time settings
499 * @dev: the netdevice
500 *
501 * Check boot time settings for the device.
502 * The found settings are set for the device to be used
503 * later in the device probing.
504 * Returns 0 if no settings found, 1 if they are.
505 */
506int netdev_boot_setup_check(struct net_device *dev)
507{
508 struct netdev_boot_setup *s = dev_boot_setup;
509 int i;
510
511 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
512 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 513 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
514 dev->irq = s[i].map.irq;
515 dev->base_addr = s[i].map.base_addr;
516 dev->mem_start = s[i].map.mem_start;
517 dev->mem_end = s[i].map.mem_end;
518 return 1;
519 }
520 }
521 return 0;
522}
d1b19dff 523EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
524
525
526/**
527 * netdev_boot_base - get address from boot time settings
528 * @prefix: prefix for network device
529 * @unit: id for network device
530 *
531 * Check boot time settings for the base address of device.
532 * The found settings are set for the device to be used
533 * later in the device probing.
534 * Returns 0 if no settings found.
535 */
536unsigned long netdev_boot_base(const char *prefix, int unit)
537{
538 const struct netdev_boot_setup *s = dev_boot_setup;
539 char name[IFNAMSIZ];
540 int i;
541
542 sprintf(name, "%s%d", prefix, unit);
543
544 /*
545 * If device already registered then return base of 1
546 * to indicate not to probe for this interface
547 */
881d966b 548 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
549 return 1;
550
551 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
552 if (!strcmp(name, s[i].name))
553 return s[i].map.base_addr;
554 return 0;
555}
556
557/*
558 * Saves at boot time configured settings for any netdevice.
559 */
560int __init netdev_boot_setup(char *str)
561{
562 int ints[5];
563 struct ifmap map;
564
565 str = get_options(str, ARRAY_SIZE(ints), ints);
566 if (!str || !*str)
567 return 0;
568
569 /* Save settings */
570 memset(&map, 0, sizeof(map));
571 if (ints[0] > 0)
572 map.irq = ints[1];
573 if (ints[0] > 1)
574 map.base_addr = ints[2];
575 if (ints[0] > 2)
576 map.mem_start = ints[3];
577 if (ints[0] > 3)
578 map.mem_end = ints[4];
579
580 /* Add new entry to the list */
581 return netdev_boot_setup_add(str, &map);
582}
583
584__setup("netdev=", netdev_boot_setup);
585
586/*******************************************************************************
587
588 Device Interface Subroutines
589
590*******************************************************************************/
591
592/**
593 * __dev_get_by_name - find a device by its name
c4ea43c5 594 * @net: the applicable net namespace
1da177e4
LT
595 * @name: name to find
596 *
597 * Find an interface by name. Must be called under RTNL semaphore
598 * or @dev_base_lock. If the name is found a pointer to the device
599 * is returned. If the name is not found then %NULL is returned. The
600 * reference counters are not incremented so the caller must be
601 * careful with locks.
602 */
603
881d966b 604struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
605{
606 struct hlist_node *p;
0bd8d536
ED
607 struct net_device *dev;
608 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 609
0bd8d536 610 hlist_for_each_entry(dev, p, head, name_hlist)
1da177e4
LT
611 if (!strncmp(dev->name, name, IFNAMSIZ))
612 return dev;
0bd8d536 613
1da177e4
LT
614 return NULL;
615}
d1b19dff 616EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 617
72c9528b
ED
618/**
619 * dev_get_by_name_rcu - find a device by its name
620 * @net: the applicable net namespace
621 * @name: name to find
622 *
623 * Find an interface by name.
624 * If the name is found a pointer to the device is returned.
625 * If the name is not found then %NULL is returned.
626 * The reference counters are not incremented so the caller must be
627 * careful with locks. The caller must hold RCU lock.
628 */
629
630struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
631{
632 struct hlist_node *p;
633 struct net_device *dev;
634 struct hlist_head *head = dev_name_hash(net, name);
635
636 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
637 if (!strncmp(dev->name, name, IFNAMSIZ))
638 return dev;
639
640 return NULL;
641}
642EXPORT_SYMBOL(dev_get_by_name_rcu);
643
1da177e4
LT
644/**
645 * dev_get_by_name - find a device by its name
c4ea43c5 646 * @net: the applicable net namespace
1da177e4
LT
647 * @name: name to find
648 *
649 * Find an interface by name. This can be called from any
650 * context and does its own locking. The returned handle has
651 * the usage count incremented and the caller must use dev_put() to
652 * release it when it is no longer needed. %NULL is returned if no
653 * matching device is found.
654 */
655
881d966b 656struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
657{
658 struct net_device *dev;
659
72c9528b
ED
660 rcu_read_lock();
661 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
662 if (dev)
663 dev_hold(dev);
72c9528b 664 rcu_read_unlock();
1da177e4
LT
665 return dev;
666}
d1b19dff 667EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
668
669/**
670 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 671 * @net: the applicable net namespace
1da177e4
LT
672 * @ifindex: index of device
673 *
674 * Search for an interface by index. Returns %NULL if the device
675 * is not found or a pointer to the device. The device has not
676 * had its reference counter increased so the caller must be careful
677 * about locking. The caller must hold either the RTNL semaphore
678 * or @dev_base_lock.
679 */
680
881d966b 681struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
682{
683 struct hlist_node *p;
0bd8d536
ED
684 struct net_device *dev;
685 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 686
0bd8d536 687 hlist_for_each_entry(dev, p, head, index_hlist)
1da177e4
LT
688 if (dev->ifindex == ifindex)
689 return dev;
0bd8d536 690
1da177e4
LT
691 return NULL;
692}
d1b19dff 693EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 694
fb699dfd
ED
695/**
696 * dev_get_by_index_rcu - find a device by its ifindex
697 * @net: the applicable net namespace
698 * @ifindex: index of device
699 *
700 * Search for an interface by index. Returns %NULL if the device
701 * is not found or a pointer to the device. The device has not
702 * had its reference counter increased so the caller must be careful
703 * about locking. The caller must hold RCU lock.
704 */
705
706struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
707{
708 struct hlist_node *p;
709 struct net_device *dev;
710 struct hlist_head *head = dev_index_hash(net, ifindex);
711
712 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
713 if (dev->ifindex == ifindex)
714 return dev;
715
716 return NULL;
717}
718EXPORT_SYMBOL(dev_get_by_index_rcu);
719
1da177e4
LT
720
721/**
722 * dev_get_by_index - find a device by its ifindex
c4ea43c5 723 * @net: the applicable net namespace
1da177e4
LT
724 * @ifindex: index of device
725 *
726 * Search for an interface by index. Returns NULL if the device
727 * is not found or a pointer to the device. The device returned has
728 * had a reference added and the pointer is safe until the user calls
729 * dev_put to indicate they have finished with it.
730 */
731
881d966b 732struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
733{
734 struct net_device *dev;
735
fb699dfd
ED
736 rcu_read_lock();
737 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
738 if (dev)
739 dev_hold(dev);
fb699dfd 740 rcu_read_unlock();
1da177e4
LT
741 return dev;
742}
d1b19dff 743EXPORT_SYMBOL(dev_get_by_index);
1da177e4
LT
744
745/**
941666c2 746 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 747 * @net: the applicable net namespace
1da177e4
LT
748 * @type: media type of device
749 * @ha: hardware address
750 *
751 * Search for an interface by MAC address. Returns NULL if the device
941666c2
ED
752 * is not found or a pointer to the device. The caller must hold RCU
753 * The returned device has not had its ref count increased
1da177e4
LT
754 * and the caller must therefore be careful about locking
755 *
1da177e4
LT
756 */
757
941666c2
ED
758struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
759 const char *ha)
1da177e4
LT
760{
761 struct net_device *dev;
762
941666c2 763 for_each_netdev_rcu(net, dev)
1da177e4
LT
764 if (dev->type == type &&
765 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
766 return dev;
767
768 return NULL;
1da177e4 769}
941666c2 770EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 771
881d966b 772struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
773{
774 struct net_device *dev;
775
4e9cac2b 776 ASSERT_RTNL();
881d966b 777 for_each_netdev(net, dev)
4e9cac2b 778 if (dev->type == type)
7562f876
PE
779 return dev;
780
781 return NULL;
4e9cac2b 782}
4e9cac2b
PM
783EXPORT_SYMBOL(__dev_getfirstbyhwtype);
784
881d966b 785struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 786{
99fe3c39 787 struct net_device *dev, *ret = NULL;
4e9cac2b 788
99fe3c39
ED
789 rcu_read_lock();
790 for_each_netdev_rcu(net, dev)
791 if (dev->type == type) {
792 dev_hold(dev);
793 ret = dev;
794 break;
795 }
796 rcu_read_unlock();
797 return ret;
1da177e4 798}
1da177e4
LT
799EXPORT_SYMBOL(dev_getfirstbyhwtype);
800
801/**
bb69ae04 802 * dev_get_by_flags_rcu - find any device with given flags
c4ea43c5 803 * @net: the applicable net namespace
1da177e4
LT
804 * @if_flags: IFF_* values
805 * @mask: bitmask of bits in if_flags to check
806 *
807 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04
ED
808 * is not found or a pointer to the device. Must be called inside
809 * rcu_read_lock(), and result refcount is unchanged.
1da177e4
LT
810 */
811
bb69ae04 812struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
d1b19dff 813 unsigned short mask)
1da177e4 814{
7562f876 815 struct net_device *dev, *ret;
1da177e4 816
7562f876 817 ret = NULL;
c6d14c84 818 for_each_netdev_rcu(net, dev) {
1da177e4 819 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 820 ret = dev;
1da177e4
LT
821 break;
822 }
823 }
7562f876 824 return ret;
1da177e4 825}
bb69ae04 826EXPORT_SYMBOL(dev_get_by_flags_rcu);
1da177e4
LT
827
828/**
829 * dev_valid_name - check if name is okay for network device
830 * @name: name string
831 *
832 * Network device names need to be valid file names to
c7fa9d18
DM
833 * to allow sysfs to work. We also disallow any kind of
834 * whitespace.
1da177e4 835 */
c2373ee9 836int dev_valid_name(const char *name)
1da177e4 837{
c7fa9d18
DM
838 if (*name == '\0')
839 return 0;
b6fe17d6
SH
840 if (strlen(name) >= IFNAMSIZ)
841 return 0;
c7fa9d18
DM
842 if (!strcmp(name, ".") || !strcmp(name, ".."))
843 return 0;
844
845 while (*name) {
846 if (*name == '/' || isspace(*name))
847 return 0;
848 name++;
849 }
850 return 1;
1da177e4 851}
d1b19dff 852EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
853
854/**
b267b179
EB
855 * __dev_alloc_name - allocate a name for a device
856 * @net: network namespace to allocate the device name in
1da177e4 857 * @name: name format string
b267b179 858 * @buf: scratch buffer and result name string
1da177e4
LT
859 *
860 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
861 * id. It scans list of devices to build up a free map, then chooses
862 * the first empty slot. The caller must hold the dev_base or rtnl lock
863 * while allocating the name and adding the device in order to avoid
864 * duplicates.
865 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
866 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
867 */
868
b267b179 869static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
870{
871 int i = 0;
1da177e4
LT
872 const char *p;
873 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 874 unsigned long *inuse;
1da177e4
LT
875 struct net_device *d;
876
877 p = strnchr(name, IFNAMSIZ-1, '%');
878 if (p) {
879 /*
880 * Verify the string as this thing may have come from
881 * the user. There must be either one "%d" and no other "%"
882 * characters.
883 */
884 if (p[1] != 'd' || strchr(p + 2, '%'))
885 return -EINVAL;
886
887 /* Use one page as a bit array of possible slots */
cfcabdcc 888 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
889 if (!inuse)
890 return -ENOMEM;
891
881d966b 892 for_each_netdev(net, d) {
1da177e4
LT
893 if (!sscanf(d->name, name, &i))
894 continue;
895 if (i < 0 || i >= max_netdevices)
896 continue;
897
898 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 899 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
900 if (!strncmp(buf, d->name, IFNAMSIZ))
901 set_bit(i, inuse);
902 }
903
904 i = find_first_zero_bit(inuse, max_netdevices);
905 free_page((unsigned long) inuse);
906 }
907
d9031024
OP
908 if (buf != name)
909 snprintf(buf, IFNAMSIZ, name, i);
b267b179 910 if (!__dev_get_by_name(net, buf))
1da177e4 911 return i;
1da177e4
LT
912
913 /* It is possible to run out of possible slots
914 * when the name is long and there isn't enough space left
915 * for the digits, or if all bits are used.
916 */
917 return -ENFILE;
918}
919
b267b179
EB
920/**
921 * dev_alloc_name - allocate a name for a device
922 * @dev: device
923 * @name: name format string
924 *
925 * Passed a format string - eg "lt%d" it will try and find a suitable
926 * id. It scans list of devices to build up a free map, then chooses
927 * the first empty slot. The caller must hold the dev_base or rtnl lock
928 * while allocating the name and adding the device in order to avoid
929 * duplicates.
930 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
931 * Returns the number of the unit assigned or a negative errno code.
932 */
933
934int dev_alloc_name(struct net_device *dev, const char *name)
935{
936 char buf[IFNAMSIZ];
937 struct net *net;
938 int ret;
939
c346dca1
YH
940 BUG_ON(!dev_net(dev));
941 net = dev_net(dev);
b267b179
EB
942 ret = __dev_alloc_name(net, name, buf);
943 if (ret >= 0)
944 strlcpy(dev->name, buf, IFNAMSIZ);
945 return ret;
946}
d1b19dff 947EXPORT_SYMBOL(dev_alloc_name);
b267b179 948
8ce6cebc 949static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
d9031024 950{
8ce6cebc
DL
951 struct net *net;
952
953 BUG_ON(!dev_net(dev));
954 net = dev_net(dev);
955
d9031024
OP
956 if (!dev_valid_name(name))
957 return -EINVAL;
958
959 if (fmt && strchr(name, '%'))
8ce6cebc 960 return dev_alloc_name(dev, name);
d9031024
OP
961 else if (__dev_get_by_name(net, name))
962 return -EEXIST;
8ce6cebc
DL
963 else if (dev->name != name)
964 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
965
966 return 0;
967}
1da177e4
LT
968
969/**
970 * dev_change_name - change name of a device
971 * @dev: device
972 * @newname: name (or format string) must be at least IFNAMSIZ
973 *
974 * Change name of a device, can pass format strings "eth%d".
975 * for wildcarding.
976 */
cf04a4c7 977int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 978{
fcc5a03a 979 char oldname[IFNAMSIZ];
1da177e4 980 int err = 0;
fcc5a03a 981 int ret;
881d966b 982 struct net *net;
1da177e4
LT
983
984 ASSERT_RTNL();
c346dca1 985 BUG_ON(!dev_net(dev));
1da177e4 986
c346dca1 987 net = dev_net(dev);
1da177e4
LT
988 if (dev->flags & IFF_UP)
989 return -EBUSY;
990
c8d90dca
SH
991 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
992 return 0;
993
fcc5a03a
HX
994 memcpy(oldname, dev->name, IFNAMSIZ);
995
8ce6cebc 996 err = dev_get_valid_name(dev, newname, 1);
d9031024
OP
997 if (err < 0)
998 return err;
1da177e4 999
fcc5a03a 1000rollback:
a1b3f594
EB
1001 ret = device_rename(&dev->dev, dev->name);
1002 if (ret) {
1003 memcpy(dev->name, oldname, IFNAMSIZ);
1004 return ret;
dcc99773 1005 }
7f988eab
HX
1006
1007 write_lock_bh(&dev_base_lock);
92749821 1008 hlist_del(&dev->name_hlist);
72c9528b
ED
1009 write_unlock_bh(&dev_base_lock);
1010
1011 synchronize_rcu();
1012
1013 write_lock_bh(&dev_base_lock);
1014 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1015 write_unlock_bh(&dev_base_lock);
1016
056925ab 1017 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1018 ret = notifier_to_errno(ret);
1019
1020 if (ret) {
91e9c07b
ED
1021 /* err >= 0 after dev_alloc_name() or stores the first errno */
1022 if (err >= 0) {
fcc5a03a
HX
1023 err = ret;
1024 memcpy(dev->name, oldname, IFNAMSIZ);
1025 goto rollback;
91e9c07b
ED
1026 } else {
1027 printk(KERN_ERR
1028 "%s: name change rollback failed: %d.\n",
1029 dev->name, ret);
fcc5a03a
HX
1030 }
1031 }
1da177e4
LT
1032
1033 return err;
1034}
1035
0b815a1a
SH
1036/**
1037 * dev_set_alias - change ifalias of a device
1038 * @dev: device
1039 * @alias: name up to IFALIASZ
f0db275a 1040 * @len: limit of bytes to copy from info
0b815a1a
SH
1041 *
1042 * Set ifalias for a device,
1043 */
1044int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1045{
1046 ASSERT_RTNL();
1047
1048 if (len >= IFALIASZ)
1049 return -EINVAL;
1050
96ca4a2c
OH
1051 if (!len) {
1052 if (dev->ifalias) {
1053 kfree(dev->ifalias);
1054 dev->ifalias = NULL;
1055 }
1056 return 0;
1057 }
1058
d1b19dff 1059 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
0b815a1a
SH
1060 if (!dev->ifalias)
1061 return -ENOMEM;
1062
1063 strlcpy(dev->ifalias, alias, len+1);
1064 return len;
1065}
1066
1067
d8a33ac4 1068/**
3041a069 1069 * netdev_features_change - device changes features
d8a33ac4
SH
1070 * @dev: device to cause notification
1071 *
1072 * Called to indicate a device has changed features.
1073 */
1074void netdev_features_change(struct net_device *dev)
1075{
056925ab 1076 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1077}
1078EXPORT_SYMBOL(netdev_features_change);
1079
1da177e4
LT
1080/**
1081 * netdev_state_change - device changes state
1082 * @dev: device to cause notification
1083 *
1084 * Called to indicate a device has changed state. This function calls
1085 * the notifier chains for netdev_chain and sends a NEWLINK message
1086 * to the routing socket.
1087 */
1088void netdev_state_change(struct net_device *dev)
1089{
1090 if (dev->flags & IFF_UP) {
056925ab 1091 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1092 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1093 }
1094}
d1b19dff 1095EXPORT_SYMBOL(netdev_state_change);
1da177e4 1096
3ca5b404 1097int netdev_bonding_change(struct net_device *dev, unsigned long event)
c1da4ac7 1098{
3ca5b404 1099 return call_netdevice_notifiers(event, dev);
c1da4ac7
OG
1100}
1101EXPORT_SYMBOL(netdev_bonding_change);
1102
1da177e4
LT
1103/**
1104 * dev_load - load a network module
c4ea43c5 1105 * @net: the applicable net namespace
1da177e4
LT
1106 * @name: name of interface
1107 *
1108 * If a network interface is not present and the process has suitable
1109 * privileges this function loads the module. If module loading is not
1110 * available in this kernel then it becomes a nop.
1111 */
1112
881d966b 1113void dev_load(struct net *net, const char *name)
1da177e4 1114{
4ec93edb 1115 struct net_device *dev;
1da177e4 1116
72c9528b
ED
1117 rcu_read_lock();
1118 dev = dev_get_by_name_rcu(net, name);
1119 rcu_read_unlock();
1da177e4 1120
a8f80e8f 1121 if (!dev && capable(CAP_NET_ADMIN))
1da177e4
LT
1122 request_module("%s", name);
1123}
d1b19dff 1124EXPORT_SYMBOL(dev_load);
1da177e4 1125
bd380811 1126static int __dev_open(struct net_device *dev)
1da177e4 1127{
d314774c 1128 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1129 int ret;
1da177e4 1130
e46b66bc
BH
1131 ASSERT_RTNL();
1132
1da177e4
LT
1133 /*
1134 * Is it even present?
1135 */
1136 if (!netif_device_present(dev))
1137 return -ENODEV;
1138
3b8bcfd5
JB
1139 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1140 ret = notifier_to_errno(ret);
1141 if (ret)
1142 return ret;
1143
1da177e4
LT
1144 /*
1145 * Call device private open method
1146 */
1147 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1148
d314774c
SH
1149 if (ops->ndo_validate_addr)
1150 ret = ops->ndo_validate_addr(dev);
bada339b 1151
d314774c
SH
1152 if (!ret && ops->ndo_open)
1153 ret = ops->ndo_open(dev);
1da177e4 1154
4ec93edb 1155 /*
1da177e4
LT
1156 * If it went open OK then:
1157 */
1158
bada339b
JG
1159 if (ret)
1160 clear_bit(__LINK_STATE_START, &dev->state);
1161 else {
1da177e4
LT
1162 /*
1163 * Set the flags.
1164 */
1165 dev->flags |= IFF_UP;
1166
649274d9
DW
1167 /*
1168 * Enable NET_DMA
1169 */
b4bd07c2 1170 net_dmaengine_get();
649274d9 1171
1da177e4
LT
1172 /*
1173 * Initialize multicasting status
1174 */
4417da66 1175 dev_set_rx_mode(dev);
1da177e4
LT
1176
1177 /*
1178 * Wakeup transmit queue engine
1179 */
1180 dev_activate(dev);
1da177e4 1181 }
bada339b 1182
1da177e4
LT
1183 return ret;
1184}
1185
1186/**
bd380811
PM
1187 * dev_open - prepare an interface for use.
1188 * @dev: device to open
1da177e4 1189 *
bd380811
PM
1190 * Takes a device from down to up state. The device's private open
1191 * function is invoked and then the multicast lists are loaded. Finally
1192 * the device is moved into the up state and a %NETDEV_UP message is
1193 * sent to the netdev notifier chain.
1194 *
1195 * Calling this function on an active interface is a nop. On a failure
1196 * a negative errno code is returned.
1da177e4 1197 */
bd380811
PM
1198int dev_open(struct net_device *dev)
1199{
1200 int ret;
1201
1202 /*
1203 * Is it already up?
1204 */
1205 if (dev->flags & IFF_UP)
1206 return 0;
1207
1208 /*
1209 * Open device
1210 */
1211 ret = __dev_open(dev);
1212 if (ret < 0)
1213 return ret;
1214
1215 /*
1216 * ... and announce new interface.
1217 */
1218 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1219 call_netdevice_notifiers(NETDEV_UP, dev);
1220
1221 return ret;
1222}
1223EXPORT_SYMBOL(dev_open);
1224
44345724 1225static int __dev_close_many(struct list_head *head)
1da177e4 1226{
44345724 1227 struct net_device *dev;
e46b66bc 1228
bd380811 1229 ASSERT_RTNL();
9d5010db
DM
1230 might_sleep();
1231
44345724
OP
1232 list_for_each_entry(dev, head, unreg_list) {
1233 /*
1234 * Tell people we are going down, so that they can
1235 * prepare to death, when device is still operating.
1236 */
1237 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1238
44345724 1239 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1240
44345724
OP
1241 /* Synchronize to scheduled poll. We cannot touch poll list, it
1242 * can be even on different cpu. So just clear netif_running().
1243 *
1244 * dev->stop() will invoke napi_disable() on all of it's
1245 * napi_struct instances on this device.
1246 */
1247 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1248 }
1da177e4 1249
44345724 1250 dev_deactivate_many(head);
d8b2a4d2 1251
44345724
OP
1252 list_for_each_entry(dev, head, unreg_list) {
1253 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1254
44345724
OP
1255 /*
1256 * Call the device specific close. This cannot fail.
1257 * Only if device is UP
1258 *
1259 * We allow it to be called even after a DETACH hot-plug
1260 * event.
1261 */
1262 if (ops->ndo_stop)
1263 ops->ndo_stop(dev);
1264
1265 /*
1266 * Device is now down.
1267 */
1268
1269 dev->flags &= ~IFF_UP;
1270
1271 /*
1272 * Shutdown NET_DMA
1273 */
1274 net_dmaengine_put();
1275 }
1276
1277 return 0;
1278}
1279
1280static int __dev_close(struct net_device *dev)
1281{
1282 LIST_HEAD(single);
1283
1284 list_add(&dev->unreg_list, &single);
1285 return __dev_close_many(&single);
1286}
1287
1288int dev_close_many(struct list_head *head)
1289{
1290 struct net_device *dev, *tmp;
1291 LIST_HEAD(tmp_list);
1da177e4 1292
44345724
OP
1293 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1294 if (!(dev->flags & IFF_UP))
1295 list_move(&dev->unreg_list, &tmp_list);
1296
1297 __dev_close_many(head);
1da177e4
LT
1298
1299 /*
44345724 1300 * Tell people we are down
1da177e4 1301 */
44345724
OP
1302 list_for_each_entry(dev, head, unreg_list) {
1303 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1304 call_netdevice_notifiers(NETDEV_DOWN, dev);
1305 }
bd380811 1306
44345724
OP
1307 /* rollback_registered_many needs the complete original list */
1308 list_splice(&tmp_list, head);
bd380811
PM
1309 return 0;
1310}
1311
1312/**
1313 * dev_close - shutdown an interface.
1314 * @dev: device to shutdown
1315 *
1316 * This function moves an active device into down state. A
1317 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1318 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1319 * chain.
1320 */
1321int dev_close(struct net_device *dev)
1322{
44345724 1323 LIST_HEAD(single);
1da177e4 1324
44345724
OP
1325 list_add(&dev->unreg_list, &single);
1326 dev_close_many(&single);
649274d9 1327
1da177e4
LT
1328 return 0;
1329}
d1b19dff 1330EXPORT_SYMBOL(dev_close);
1da177e4
LT
1331
1332
0187bdfb
BH
1333/**
1334 * dev_disable_lro - disable Large Receive Offload on a device
1335 * @dev: device
1336 *
1337 * Disable Large Receive Offload (LRO) on a net device. Must be
1338 * called under RTNL. This is needed if received packets may be
1339 * forwarded to another interface.
1340 */
1341void dev_disable_lro(struct net_device *dev)
1342{
1343 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1344 dev->ethtool_ops->set_flags) {
1345 u32 flags = dev->ethtool_ops->get_flags(dev);
1346 if (flags & ETH_FLAG_LRO) {
1347 flags &= ~ETH_FLAG_LRO;
1348 dev->ethtool_ops->set_flags(dev, flags);
1349 }
1350 }
1351 WARN_ON(dev->features & NETIF_F_LRO);
1352}
1353EXPORT_SYMBOL(dev_disable_lro);
1354
1355
881d966b
EB
1356static int dev_boot_phase = 1;
1357
1da177e4
LT
1358/*
1359 * Device change register/unregister. These are not inline or static
1360 * as we export them to the world.
1361 */
1362
1363/**
1364 * register_netdevice_notifier - register a network notifier block
1365 * @nb: notifier
1366 *
1367 * Register a notifier to be called when network device events occur.
1368 * The notifier passed is linked into the kernel structures and must
1369 * not be reused until it has been unregistered. A negative errno code
1370 * is returned on a failure.
1371 *
1372 * When registered all registration and up events are replayed
4ec93edb 1373 * to the new notifier to allow device to have a race free
1da177e4
LT
1374 * view of the network device list.
1375 */
1376
1377int register_netdevice_notifier(struct notifier_block *nb)
1378{
1379 struct net_device *dev;
fcc5a03a 1380 struct net_device *last;
881d966b 1381 struct net *net;
1da177e4
LT
1382 int err;
1383
1384 rtnl_lock();
f07d5b94 1385 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1386 if (err)
1387 goto unlock;
881d966b
EB
1388 if (dev_boot_phase)
1389 goto unlock;
1390 for_each_net(net) {
1391 for_each_netdev(net, dev) {
1392 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1393 err = notifier_to_errno(err);
1394 if (err)
1395 goto rollback;
1396
1397 if (!(dev->flags & IFF_UP))
1398 continue;
1da177e4 1399
881d966b
EB
1400 nb->notifier_call(nb, NETDEV_UP, dev);
1401 }
1da177e4 1402 }
fcc5a03a
HX
1403
1404unlock:
1da177e4
LT
1405 rtnl_unlock();
1406 return err;
fcc5a03a
HX
1407
1408rollback:
1409 last = dev;
881d966b
EB
1410 for_each_net(net) {
1411 for_each_netdev(net, dev) {
1412 if (dev == last)
1413 break;
fcc5a03a 1414
881d966b
EB
1415 if (dev->flags & IFF_UP) {
1416 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1417 nb->notifier_call(nb, NETDEV_DOWN, dev);
1418 }
1419 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
a5ee1551 1420 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
fcc5a03a 1421 }
fcc5a03a 1422 }
c67625a1
PE
1423
1424 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1425 goto unlock;
1da177e4 1426}
d1b19dff 1427EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1428
1429/**
1430 * unregister_netdevice_notifier - unregister a network notifier block
1431 * @nb: notifier
1432 *
1433 * Unregister a notifier previously registered by
1434 * register_netdevice_notifier(). The notifier is unlinked into the
1435 * kernel structures and may then be reused. A negative errno code
1436 * is returned on a failure.
1437 */
1438
1439int unregister_netdevice_notifier(struct notifier_block *nb)
1440{
9f514950
HX
1441 int err;
1442
1443 rtnl_lock();
f07d5b94 1444 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1445 rtnl_unlock();
1446 return err;
1da177e4 1447}
d1b19dff 1448EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4
LT
1449
1450/**
1451 * call_netdevice_notifiers - call all network notifier blocks
1452 * @val: value passed unmodified to notifier function
c4ea43c5 1453 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1454 *
1455 * Call all network notifier blocks. Parameters and return value
f07d5b94 1456 * are as for raw_notifier_call_chain().
1da177e4
LT
1457 */
1458
ad7379d4 1459int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1460{
ab930471 1461 ASSERT_RTNL();
ad7379d4 1462 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4
LT
1463}
1464
1465/* When > 0 there are consumers of rx skb time stamps */
1466static atomic_t netstamp_needed = ATOMIC_INIT(0);
1467
1468void net_enable_timestamp(void)
1469{
1470 atomic_inc(&netstamp_needed);
1471}
d1b19dff 1472EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1473
1474void net_disable_timestamp(void)
1475{
1476 atomic_dec(&netstamp_needed);
1477}
d1b19dff 1478EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1479
3b098e2d 1480static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4
LT
1481{
1482 if (atomic_read(&netstamp_needed))
a61bbcf2 1483 __net_timestamp(skb);
b7aa0bf7
ED
1484 else
1485 skb->tstamp.tv64 = 0;
1da177e4
LT
1486}
1487
3b098e2d
ED
1488static inline void net_timestamp_check(struct sk_buff *skb)
1489{
1490 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1491 __net_timestamp(skb);
1492}
1493
44540960
AB
1494/**
1495 * dev_forward_skb - loopback an skb to another netif
1496 *
1497 * @dev: destination network device
1498 * @skb: buffer to forward
1499 *
1500 * return values:
1501 * NET_RX_SUCCESS (no congestion)
6ec82562 1502 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1503 *
1504 * dev_forward_skb can be used for injecting an skb from the
1505 * start_xmit function of one device into the receive queue
1506 * of another device.
1507 *
1508 * The receiving device may be in another namespace, so
1509 * we have to clear all information in the skb that could
1510 * impact namespace isolation.
1511 */
1512int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1513{
1514 skb_orphan(skb);
c736eefa 1515 nf_reset(skb);
44540960 1516
caf586e5 1517 if (unlikely(!(dev->flags & IFF_UP) ||
2198a10b 1518 (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) {
caf586e5 1519 atomic_long_inc(&dev->rx_dropped);
6ec82562 1520 kfree_skb(skb);
44540960 1521 return NET_RX_DROP;
6ec82562 1522 }
8a83a00b 1523 skb_set_dev(skb, dev);
44540960
AB
1524 skb->tstamp.tv64 = 0;
1525 skb->pkt_type = PACKET_HOST;
1526 skb->protocol = eth_type_trans(skb, dev);
44540960
AB
1527 return netif_rx(skb);
1528}
1529EXPORT_SYMBOL_GPL(dev_forward_skb);
1530
71d9dec2
CG
1531static inline int deliver_skb(struct sk_buff *skb,
1532 struct packet_type *pt_prev,
1533 struct net_device *orig_dev)
1534{
1535 atomic_inc(&skb->users);
1536 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1537}
1538
1da177e4
LT
1539/*
1540 * Support routine. Sends outgoing frames to any network
1541 * taps currently in use.
1542 */
1543
f6a78bfc 1544static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1545{
1546 struct packet_type *ptype;
71d9dec2
CG
1547 struct sk_buff *skb2 = NULL;
1548 struct packet_type *pt_prev = NULL;
a61bbcf2 1549
1da177e4
LT
1550 rcu_read_lock();
1551 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1552 /* Never send packets back to the socket
1553 * they originated from - MvS (miquels@drinkel.ow.org)
1554 */
1555 if ((ptype->dev == dev || !ptype->dev) &&
1556 (ptype->af_packet_priv == NULL ||
1557 (struct sock *)ptype->af_packet_priv != skb->sk)) {
71d9dec2
CG
1558 if (pt_prev) {
1559 deliver_skb(skb2, pt_prev, skb->dev);
1560 pt_prev = ptype;
1561 continue;
1562 }
1563
1564 skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1565 if (!skb2)
1566 break;
1567
70978182
ED
1568 net_timestamp_set(skb2);
1569
1da177e4
LT
1570 /* skb->nh should be correctly
1571 set by sender, so that the second statement is
1572 just protection against buggy protocols.
1573 */
459a98ed 1574 skb_reset_mac_header(skb2);
1da177e4 1575
d56f90a7 1576 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1577 skb2->network_header > skb2->tail) {
1da177e4
LT
1578 if (net_ratelimit())
1579 printk(KERN_CRIT "protocol %04x is "
1580 "buggy, dev %s\n",
70777d03
SAS
1581 ntohs(skb2->protocol),
1582 dev->name);
c1d2bbe1 1583 skb_reset_network_header(skb2);
1da177e4
LT
1584 }
1585
b0e380b1 1586 skb2->transport_header = skb2->network_header;
1da177e4 1587 skb2->pkt_type = PACKET_OUTGOING;
71d9dec2 1588 pt_prev = ptype;
1da177e4
LT
1589 }
1590 }
71d9dec2
CG
1591 if (pt_prev)
1592 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1593 rcu_read_unlock();
1594}
1595
f0796d5c
JF
1596/*
1597 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1598 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1599 */
e6484930 1600int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 1601{
1d24eb48
TH
1602 int rc;
1603
e6484930
TH
1604 if (txq < 1 || txq > dev->num_tx_queues)
1605 return -EINVAL;
f0796d5c 1606
e6484930
TH
1607 if (dev->reg_state == NETREG_REGISTERED) {
1608 ASSERT_RTNL();
1609
1d24eb48
TH
1610 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1611 txq);
bf264145
TH
1612 if (rc)
1613 return rc;
1614
e6484930
TH
1615 if (txq < dev->real_num_tx_queues)
1616 qdisc_reset_all_tx_gt(dev, txq);
f0796d5c 1617 }
e6484930
TH
1618
1619 dev->real_num_tx_queues = txq;
1620 return 0;
f0796d5c
JF
1621}
1622EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 1623
62fe0b40
BH
1624#ifdef CONFIG_RPS
1625/**
1626 * netif_set_real_num_rx_queues - set actual number of RX queues used
1627 * @dev: Network device
1628 * @rxq: Actual number of RX queues
1629 *
1630 * This must be called either with the rtnl_lock held or before
1631 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
1632 * negative error code. If called before registration, it always
1633 * succeeds.
62fe0b40
BH
1634 */
1635int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1636{
1637 int rc;
1638
bd25fa7b
TH
1639 if (rxq < 1 || rxq > dev->num_rx_queues)
1640 return -EINVAL;
1641
62fe0b40
BH
1642 if (dev->reg_state == NETREG_REGISTERED) {
1643 ASSERT_RTNL();
1644
62fe0b40
BH
1645 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1646 rxq);
1647 if (rc)
1648 return rc;
62fe0b40
BH
1649 }
1650
1651 dev->real_num_rx_queues = rxq;
1652 return 0;
1653}
1654EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1655#endif
1656
def82a1d 1657static inline void __netif_reschedule(struct Qdisc *q)
56079431 1658{
def82a1d
JP
1659 struct softnet_data *sd;
1660 unsigned long flags;
56079431 1661
def82a1d
JP
1662 local_irq_save(flags);
1663 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
1664 q->next_sched = NULL;
1665 *sd->output_queue_tailp = q;
1666 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
1667 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1668 local_irq_restore(flags);
1669}
1670
1671void __netif_schedule(struct Qdisc *q)
1672{
1673 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1674 __netif_reschedule(q);
56079431
DV
1675}
1676EXPORT_SYMBOL(__netif_schedule);
1677
bea3348e 1678void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1679{
3578b0c8 1680 if (atomic_dec_and_test(&skb->users)) {
bea3348e
SH
1681 struct softnet_data *sd;
1682 unsigned long flags;
56079431 1683
bea3348e
SH
1684 local_irq_save(flags);
1685 sd = &__get_cpu_var(softnet_data);
1686 skb->next = sd->completion_queue;
1687 sd->completion_queue = skb;
1688 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1689 local_irq_restore(flags);
1690 }
56079431 1691}
bea3348e 1692EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1693
1694void dev_kfree_skb_any(struct sk_buff *skb)
1695{
1696 if (in_irq() || irqs_disabled())
1697 dev_kfree_skb_irq(skb);
1698 else
1699 dev_kfree_skb(skb);
1700}
1701EXPORT_SYMBOL(dev_kfree_skb_any);
1702
1703
bea3348e
SH
1704/**
1705 * netif_device_detach - mark device as removed
1706 * @dev: network device
1707 *
1708 * Mark device as removed from system and therefore no longer available.
1709 */
56079431
DV
1710void netif_device_detach(struct net_device *dev)
1711{
1712 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1713 netif_running(dev)) {
d543103a 1714 netif_tx_stop_all_queues(dev);
56079431
DV
1715 }
1716}
1717EXPORT_SYMBOL(netif_device_detach);
1718
bea3348e
SH
1719/**
1720 * netif_device_attach - mark device as attached
1721 * @dev: network device
1722 *
1723 * Mark device as attached from system and restart if needed.
1724 */
56079431
DV
1725void netif_device_attach(struct net_device *dev)
1726{
1727 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1728 netif_running(dev)) {
d543103a 1729 netif_tx_wake_all_queues(dev);
4ec93edb 1730 __netdev_watchdog_up(dev);
56079431
DV
1731 }
1732}
1733EXPORT_SYMBOL(netif_device_attach);
1734
6de329e2
BH
1735static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1736{
9497a051 1737 return ((features & NETIF_F_GEN_CSUM) ||
66c68bcc 1738 ((features & NETIF_F_V4_CSUM) &&
6de329e2 1739 protocol == htons(ETH_P_IP)) ||
66c68bcc 1740 ((features & NETIF_F_V6_CSUM) &&
1c8dbcf6
YZ
1741 protocol == htons(ETH_P_IPV6)) ||
1742 ((features & NETIF_F_FCOE_CRC) &&
1743 protocol == htons(ETH_P_FCOE)));
6de329e2
BH
1744}
1745
1746static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1747{
af1905db 1748 __be16 protocol = skb->protocol;
7b9c6090
JG
1749 int features = dev->features;
1750
af1905db 1751 if (vlan_tx_tag_present(skb)) {
7b9c6090 1752 features &= dev->vlan_features;
af1905db 1753 } else if (protocol == htons(ETH_P_8021Q)) {
6de329e2 1754 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
af1905db
BH
1755 protocol = veh->h_vlan_encapsulated_proto;
1756 features &= dev->vlan_features;
6de329e2
BH
1757 }
1758
af1905db 1759 return can_checksum_protocol(features, protocol);
6de329e2 1760}
56079431 1761
8a83a00b
AB
1762/**
1763 * skb_dev_set -- assign a new device to a buffer
1764 * @skb: buffer for the new device
1765 * @dev: network device
1766 *
1767 * If an skb is owned by a device already, we have to reset
1768 * all data private to the namespace a device belongs to
1769 * before assigning it a new device.
1770 */
1771#ifdef CONFIG_NET_NS
1772void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1773{
1774 skb_dst_drop(skb);
1775 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1776 secpath_reset(skb);
1777 nf_reset(skb);
1778 skb_init_secmark(skb);
1779 skb->mark = 0;
1780 skb->priority = 0;
1781 skb->nf_trace = 0;
1782 skb->ipvs_property = 0;
1783#ifdef CONFIG_NET_SCHED
1784 skb->tc_index = 0;
1785#endif
1786 }
1787 skb->dev = dev;
1788}
1789EXPORT_SYMBOL(skb_set_dev);
1790#endif /* CONFIG_NET_NS */
1791
1da177e4
LT
1792/*
1793 * Invalidate hardware checksum when packet is to be mangled, and
1794 * complete checksum manually on outgoing path.
1795 */
84fa7933 1796int skb_checksum_help(struct sk_buff *skb)
1da177e4 1797{
d3bc23e7 1798 __wsum csum;
663ead3b 1799 int ret = 0, offset;
1da177e4 1800
84fa7933 1801 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1802 goto out_set_summed;
1803
1804 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1805 /* Let GSO fix up the checksum. */
1806 goto out_set_summed;
1da177e4
LT
1807 }
1808
55508d60 1809 offset = skb_checksum_start_offset(skb);
a030847e
HX
1810 BUG_ON(offset >= skb_headlen(skb));
1811 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1812
1813 offset += skb->csum_offset;
1814 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1815
1816 if (skb_cloned(skb) &&
1817 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1818 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1819 if (ret)
1820 goto out;
1821 }
1822
a030847e 1823 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1824out_set_summed:
1da177e4 1825 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1826out:
1da177e4
LT
1827 return ret;
1828}
d1b19dff 1829EXPORT_SYMBOL(skb_checksum_help);
1da177e4 1830
f6a78bfc
HX
1831/**
1832 * skb_gso_segment - Perform segmentation on skb.
1833 * @skb: buffer to segment
576a30eb 1834 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1835 *
1836 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1837 *
1838 * It may return NULL if the skb requires no segmentation. This is
1839 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1840 */
576a30eb 1841struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1842{
1843 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1844 struct packet_type *ptype;
252e3346 1845 __be16 type = skb->protocol;
c8d5bcd1 1846 int vlan_depth = ETH_HLEN;
a430a43d 1847 int err;
f6a78bfc 1848
c8d5bcd1
JG
1849 while (type == htons(ETH_P_8021Q)) {
1850 struct vlan_hdr *vh;
7b9c6090 1851
c8d5bcd1 1852 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
7b9c6090
JG
1853 return ERR_PTR(-EINVAL);
1854
c8d5bcd1
JG
1855 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1856 type = vh->h_vlan_encapsulated_proto;
1857 vlan_depth += VLAN_HLEN;
7b9c6090
JG
1858 }
1859
459a98ed 1860 skb_reset_mac_header(skb);
b0e380b1 1861 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1862 __skb_pull(skb, skb->mac_len);
1863
67fd1a73
HX
1864 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1865 struct net_device *dev = skb->dev;
1866 struct ethtool_drvinfo info = {};
1867
1868 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1869 dev->ethtool_ops->get_drvinfo(dev, &info);
1870
b194a367 1871 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
67fd1a73
HX
1872 info.driver, dev ? dev->features : 0L,
1873 skb->sk ? skb->sk->sk_route_caps : 0L,
1874 skb->len, skb->data_len, skb->ip_summed);
1875
a430a43d
HX
1876 if (skb_header_cloned(skb) &&
1877 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1878 return ERR_PTR(err);
1879 }
1880
f6a78bfc 1881 rcu_read_lock();
82d8a867
PE
1882 list_for_each_entry_rcu(ptype,
1883 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1884 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1885 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1886 err = ptype->gso_send_check(skb);
1887 segs = ERR_PTR(err);
1888 if (err || skb_gso_ok(skb, features))
1889 break;
d56f90a7
ACM
1890 __skb_push(skb, (skb->data -
1891 skb_network_header(skb)));
a430a43d 1892 }
576a30eb 1893 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1894 break;
1895 }
1896 }
1897 rcu_read_unlock();
1898
98e399f8 1899 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1900
f6a78bfc
HX
1901 return segs;
1902}
f6a78bfc
HX
1903EXPORT_SYMBOL(skb_gso_segment);
1904
fb286bb2
HX
1905/* Take action when hardware reception checksum errors are detected. */
1906#ifdef CONFIG_BUG
1907void netdev_rx_csum_fault(struct net_device *dev)
1908{
1909 if (net_ratelimit()) {
4ec93edb 1910 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1911 dev ? dev->name : "<unknown>");
fb286bb2
HX
1912 dump_stack();
1913 }
1914}
1915EXPORT_SYMBOL(netdev_rx_csum_fault);
1916#endif
1917
1da177e4
LT
1918/* Actually, we should eliminate this check as soon as we know, that:
1919 * 1. IOMMU is present and allows to map all the memory.
1920 * 2. No high memory really exists on this machine.
1921 */
1922
9092c658 1923static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 1924{
3d3a8533 1925#ifdef CONFIG_HIGHMEM
1da177e4 1926 int i;
5acbbd42
FT
1927 if (!(dev->features & NETIF_F_HIGHDMA)) {
1928 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1929 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1930 return 1;
1931 }
1da177e4 1932
5acbbd42
FT
1933 if (PCI_DMA_BUS_IS_PHYS) {
1934 struct device *pdev = dev->dev.parent;
1da177e4 1935
9092c658
ED
1936 if (!pdev)
1937 return 0;
5acbbd42
FT
1938 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1939 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1940 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1941 return 1;
1942 }
1943 }
3d3a8533 1944#endif
1da177e4
LT
1945 return 0;
1946}
1da177e4 1947
f6a78bfc
HX
1948struct dev_gso_cb {
1949 void (*destructor)(struct sk_buff *skb);
1950};
1951
1952#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1953
1954static void dev_gso_skb_destructor(struct sk_buff *skb)
1955{
1956 struct dev_gso_cb *cb;
1957
1958 do {
1959 struct sk_buff *nskb = skb->next;
1960
1961 skb->next = nskb->next;
1962 nskb->next = NULL;
1963 kfree_skb(nskb);
1964 } while (skb->next);
1965
1966 cb = DEV_GSO_CB(skb);
1967 if (cb->destructor)
1968 cb->destructor(skb);
1969}
1970
1971/**
1972 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1973 * @skb: buffer to segment
1974 *
1975 * This function segments the given skb and stores the list of segments
1976 * in skb->next.
1977 */
1978static int dev_gso_segment(struct sk_buff *skb)
1979{
1980 struct net_device *dev = skb->dev;
1981 struct sk_buff *segs;
576a30eb
HX
1982 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1983 NETIF_F_SG : 0);
1984
1985 segs = skb_gso_segment(skb, features);
1986
1987 /* Verifying header integrity only. */
1988 if (!segs)
1989 return 0;
f6a78bfc 1990
801678c5 1991 if (IS_ERR(segs))
f6a78bfc
HX
1992 return PTR_ERR(segs);
1993
1994 skb->next = segs;
1995 DEV_GSO_CB(skb)->destructor = skb->destructor;
1996 skb->destructor = dev_gso_skb_destructor;
1997
1998 return 0;
1999}
2000
fc6055a5
ED
2001/*
2002 * Try to orphan skb early, right before transmission by the device.
2244d07b
OH
2003 * We cannot orphan skb if tx timestamp is requested or the sk-reference
2004 * is needed on driver level for other reasons, e.g. see net/can/raw.c
fc6055a5
ED
2005 */
2006static inline void skb_orphan_try(struct sk_buff *skb)
2007{
87fd308c
ED
2008 struct sock *sk = skb->sk;
2009
2244d07b 2010 if (sk && !skb_shinfo(skb)->tx_flags) {
87fd308c
ED
2011 /* skb_tx_hash() wont be able to get sk.
2012 * We copy sk_hash into skb->rxhash
2013 */
2014 if (!skb->rxhash)
2015 skb->rxhash = sk->sk_hash;
fc6055a5 2016 skb_orphan(skb);
87fd308c 2017 }
fc6055a5
ED
2018}
2019
58e998c6
JG
2020int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
2021{
2022 __be16 protocol = skb->protocol;
2023
2024 if (protocol == htons(ETH_P_8021Q)) {
2025 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2026 protocol = veh->h_vlan_encapsulated_proto;
2027 } else if (!skb->vlan_tci)
2028 return dev->features;
2029
2030 if (protocol != htons(ETH_P_8021Q))
2031 return dev->features & dev->vlan_features;
2032 else
2033 return 0;
2034}
6b353088 2035EXPORT_SYMBOL(netif_get_vlan_features);
58e998c6 2036
6afff0ca
JF
2037/*
2038 * Returns true if either:
2039 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2040 * 2. skb is fragmented and the device does not support SG, or if
2041 * at least one of fragments is in highmem and device does not
2042 * support DMA from it.
2043 */
2044static inline int skb_needs_linearize(struct sk_buff *skb,
2045 struct net_device *dev)
2046{
e1e78db6
JG
2047 if (skb_is_nonlinear(skb)) {
2048 int features = dev->features;
7b9c6090 2049
e1e78db6
JG
2050 if (vlan_tx_tag_present(skb))
2051 features &= dev->vlan_features;
7b9c6090 2052
e1e78db6
JG
2053 return (skb_has_frag_list(skb) &&
2054 !(features & NETIF_F_FRAGLIST)) ||
2055 (skb_shinfo(skb)->nr_frags &&
2056 (!(features & NETIF_F_SG) ||
2057 illegal_highdma(dev, skb)));
2058 }
2059
2060 return 0;
6afff0ca
JF
2061}
2062
fd2ea0a7
DM
2063int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2064 struct netdev_queue *txq)
f6a78bfc 2065{
00829823 2066 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 2067 int rc = NETDEV_TX_OK;
00829823 2068
f6a78bfc 2069 if (likely(!skb->next)) {
93f154b5
ED
2070 /*
2071 * If device doesnt need skb->dst, release it right now while
2072 * its hot in this cpu cache
2073 */
adf30907
ED
2074 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2075 skb_dst_drop(skb);
2076
15c2d75f
ED
2077 if (!list_empty(&ptype_all))
2078 dev_queue_xmit_nit(skb, dev);
2079
fc6055a5 2080 skb_orphan_try(skb);
9ccb8975 2081
7b9c6090
JG
2082 if (vlan_tx_tag_present(skb) &&
2083 !(dev->features & NETIF_F_HW_VLAN_TX)) {
2084 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2085 if (unlikely(!skb))
2086 goto out;
2087
2088 skb->vlan_tci = 0;
2089 }
2090
9ccb8975
DM
2091 if (netif_needs_gso(dev, skb)) {
2092 if (unlikely(dev_gso_segment(skb)))
2093 goto out_kfree_skb;
2094 if (skb->next)
2095 goto gso;
6afff0ca
JF
2096 } else {
2097 if (skb_needs_linearize(skb, dev) &&
2098 __skb_linearize(skb))
2099 goto out_kfree_skb;
2100
2101 /* If packet is not checksummed and device does not
2102 * support checksumming for this protocol, complete
2103 * checksumming here.
2104 */
2105 if (skb->ip_summed == CHECKSUM_PARTIAL) {
55508d60
MM
2106 skb_set_transport_header(skb,
2107 skb_checksum_start_offset(skb));
6afff0ca
JF
2108 if (!dev_can_checksum(dev, skb) &&
2109 skb_checksum_help(skb))
2110 goto out_kfree_skb;
2111 }
9ccb8975
DM
2112 }
2113
ac45f602 2114 rc = ops->ndo_start_xmit(skb, dev);
cf66ba58 2115 trace_net_dev_xmit(skb, rc);
ec634fe3 2116 if (rc == NETDEV_TX_OK)
08baf561 2117 txq_trans_update(txq);
ac45f602 2118 return rc;
f6a78bfc
HX
2119 }
2120
576a30eb 2121gso:
f6a78bfc
HX
2122 do {
2123 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
2124
2125 skb->next = nskb->next;
2126 nskb->next = NULL;
068a2de5
KK
2127
2128 /*
2129 * If device doesnt need nskb->dst, release it right now while
2130 * its hot in this cpu cache
2131 */
2132 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2133 skb_dst_drop(nskb);
2134
00829823 2135 rc = ops->ndo_start_xmit(nskb, dev);
cf66ba58 2136 trace_net_dev_xmit(nskb, rc);
ec634fe3 2137 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
2138 if (rc & ~NETDEV_TX_MASK)
2139 goto out_kfree_gso_skb;
f54d9e8d 2140 nskb->next = skb->next;
f6a78bfc
HX
2141 skb->next = nskb;
2142 return rc;
2143 }
08baf561 2144 txq_trans_update(txq);
fd2ea0a7 2145 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
f54d9e8d 2146 return NETDEV_TX_BUSY;
f6a78bfc 2147 } while (skb->next);
4ec93edb 2148
572a9d7b
PM
2149out_kfree_gso_skb:
2150 if (likely(skb->next == NULL))
2151 skb->destructor = DEV_GSO_CB(skb)->destructor;
f6a78bfc
HX
2152out_kfree_skb:
2153 kfree_skb(skb);
7b9c6090 2154out:
572a9d7b 2155 return rc;
f6a78bfc
HX
2156}
2157
0a9627f2 2158static u32 hashrnd __read_mostly;
b6b2fed1 2159
a3d22a68
VZ
2160/*
2161 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2162 * to be used as a distribution range.
2163 */
2164u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2165 unsigned int num_tx_queues)
8f0f2223 2166{
7019298a 2167 u32 hash;
b6b2fed1 2168
513de11b
DM
2169 if (skb_rx_queue_recorded(skb)) {
2170 hash = skb_get_rx_queue(skb);
a3d22a68
VZ
2171 while (unlikely(hash >= num_tx_queues))
2172 hash -= num_tx_queues;
513de11b
DM
2173 return hash;
2174 }
ec581f6a
ED
2175
2176 if (skb->sk && skb->sk->sk_hash)
7019298a 2177 hash = skb->sk->sk_hash;
ec581f6a 2178 else
87fd308c 2179 hash = (__force u16) skb->protocol ^ skb->rxhash;
0a9627f2 2180 hash = jhash_1word(hash, hashrnd);
b6b2fed1 2181
a3d22a68 2182 return (u16) (((u64) hash * num_tx_queues) >> 32);
8f0f2223 2183}
a3d22a68 2184EXPORT_SYMBOL(__skb_tx_hash);
8f0f2223 2185
ed04642f
ED
2186static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2187{
2188 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2189 if (net_ratelimit()) {
7a161ea9
ED
2190 pr_warning("%s selects TX queue %d, but "
2191 "real number of TX queues is %d\n",
2192 dev->name, queue_index, dev->real_num_tx_queues);
ed04642f
ED
2193 }
2194 return 0;
2195 }
2196 return queue_index;
2197}
2198
1d24eb48
TH
2199static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2200{
bf264145 2201#ifdef CONFIG_XPS
1d24eb48
TH
2202 struct xps_dev_maps *dev_maps;
2203 struct xps_map *map;
2204 int queue_index = -1;
2205
2206 rcu_read_lock();
2207 dev_maps = rcu_dereference(dev->xps_maps);
2208 if (dev_maps) {
2209 map = rcu_dereference(
2210 dev_maps->cpu_map[raw_smp_processor_id()]);
2211 if (map) {
2212 if (map->len == 1)
2213 queue_index = map->queues[0];
2214 else {
2215 u32 hash;
2216 if (skb->sk && skb->sk->sk_hash)
2217 hash = skb->sk->sk_hash;
2218 else
2219 hash = (__force u16) skb->protocol ^
2220 skb->rxhash;
2221 hash = jhash_1word(hash, hashrnd);
2222 queue_index = map->queues[
2223 ((u64)hash * map->len) >> 32];
2224 }
2225 if (unlikely(queue_index >= dev->real_num_tx_queues))
2226 queue_index = -1;
2227 }
2228 }
2229 rcu_read_unlock();
2230
2231 return queue_index;
2232#else
2233 return -1;
2234#endif
2235}
2236
e8a0464c
DM
2237static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2238 struct sk_buff *skb)
2239{
b0f77d0e 2240 int queue_index;
deabc772 2241 const struct net_device_ops *ops = dev->netdev_ops;
a4ee3ce3 2242
3853b584
TH
2243 if (dev->real_num_tx_queues == 1)
2244 queue_index = 0;
2245 else if (ops->ndo_select_queue) {
deabc772
HS
2246 queue_index = ops->ndo_select_queue(dev, skb);
2247 queue_index = dev_cap_txqueue(dev, queue_index);
2248 } else {
2249 struct sock *sk = skb->sk;
2250 queue_index = sk_tx_queue_get(sk);
a4ee3ce3 2251
3853b584
TH
2252 if (queue_index < 0 || skb->ooo_okay ||
2253 queue_index >= dev->real_num_tx_queues) {
2254 int old_index = queue_index;
fd2ea0a7 2255
1d24eb48
TH
2256 queue_index = get_xps_queue(dev, skb);
2257 if (queue_index < 0)
2258 queue_index = skb_tx_hash(dev, skb);
3853b584
TH
2259
2260 if (queue_index != old_index && sk) {
2261 struct dst_entry *dst =
2262 rcu_dereference_check(sk->sk_dst_cache, 1);
8728c544
ED
2263
2264 if (dst && skb_dst(skb) == dst)
2265 sk_tx_queue_set(sk, queue_index);
2266 }
a4ee3ce3
KK
2267 }
2268 }
eae792b7 2269
fd2ea0a7
DM
2270 skb_set_queue_mapping(skb, queue_index);
2271 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
2272}
2273
bbd8a0d3
KK
2274static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2275 struct net_device *dev,
2276 struct netdev_queue *txq)
2277{
2278 spinlock_t *root_lock = qdisc_lock(q);
79640a4c 2279 bool contended = qdisc_is_running(q);
bbd8a0d3
KK
2280 int rc;
2281
79640a4c
ED
2282 /*
2283 * Heuristic to force contended enqueues to serialize on a
2284 * separate lock before trying to get qdisc main lock.
2285 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2286 * and dequeue packets faster.
2287 */
2288 if (unlikely(contended))
2289 spin_lock(&q->busylock);
2290
bbd8a0d3
KK
2291 spin_lock(root_lock);
2292 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2293 kfree_skb(skb);
2294 rc = NET_XMIT_DROP;
2295 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2296 qdisc_run_begin(q)) {
bbd8a0d3
KK
2297 /*
2298 * This is a work-conserving queue; there are no old skbs
2299 * waiting to be sent out; and the qdisc is not running -
2300 * xmit the skb directly.
2301 */
7fee226a
ED
2302 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2303 skb_dst_force(skb);
bbd8a0d3 2304 __qdisc_update_bstats(q, skb->len);
79640a4c
ED
2305 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2306 if (unlikely(contended)) {
2307 spin_unlock(&q->busylock);
2308 contended = false;
2309 }
bbd8a0d3 2310 __qdisc_run(q);
79640a4c 2311 } else
bc135b23 2312 qdisc_run_end(q);
bbd8a0d3
KK
2313
2314 rc = NET_XMIT_SUCCESS;
2315 } else {
7fee226a 2316 skb_dst_force(skb);
bbd8a0d3 2317 rc = qdisc_enqueue_root(skb, q);
79640a4c
ED
2318 if (qdisc_run_begin(q)) {
2319 if (unlikely(contended)) {
2320 spin_unlock(&q->busylock);
2321 contended = false;
2322 }
2323 __qdisc_run(q);
2324 }
bbd8a0d3
KK
2325 }
2326 spin_unlock(root_lock);
79640a4c
ED
2327 if (unlikely(contended))
2328 spin_unlock(&q->busylock);
bbd8a0d3
KK
2329 return rc;
2330}
2331
745e20f1 2332static DEFINE_PER_CPU(int, xmit_recursion);
11a766ce 2333#define RECURSION_LIMIT 10
745e20f1 2334
d29f749e
DJ
2335/**
2336 * dev_queue_xmit - transmit a buffer
2337 * @skb: buffer to transmit
2338 *
2339 * Queue a buffer for transmission to a network device. The caller must
2340 * have set the device and priority and built the buffer before calling
2341 * this function. The function can be called from an interrupt.
2342 *
2343 * A negative errno code is returned on a failure. A success does not
2344 * guarantee the frame will be transmitted as it may be dropped due
2345 * to congestion or traffic shaping.
2346 *
2347 * -----------------------------------------------------------------------------------
2348 * I notice this method can also return errors from the queue disciplines,
2349 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2350 * be positive.
2351 *
2352 * Regardless of the return value, the skb is consumed, so it is currently
2353 * difficult to retry a send to this method. (You can bump the ref count
2354 * before sending to hold a reference for retry if you are careful.)
2355 *
2356 * When calling this method, interrupts MUST be enabled. This is because
2357 * the BH enable code must have IRQs enabled so that it will not deadlock.
2358 * --BLG
2359 */
1da177e4
LT
2360int dev_queue_xmit(struct sk_buff *skb)
2361{
2362 struct net_device *dev = skb->dev;
dc2b4847 2363 struct netdev_queue *txq;
1da177e4
LT
2364 struct Qdisc *q;
2365 int rc = -ENOMEM;
2366
4ec93edb
YH
2367 /* Disable soft irqs for various locks below. Also
2368 * stops preemption for RCU.
1da177e4 2369 */
4ec93edb 2370 rcu_read_lock_bh();
1da177e4 2371
eae792b7 2372 txq = dev_pick_tx(dev, skb);
a898def2 2373 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2374
1da177e4 2375#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2376 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 2377#endif
cf66ba58 2378 trace_net_dev_queue(skb);
1da177e4 2379 if (q->enqueue) {
bbd8a0d3 2380 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2381 goto out;
1da177e4
LT
2382 }
2383
2384 /* The device has no queue. Common case for software devices:
2385 loopback, all the sorts of tunnels...
2386
932ff279
HX
2387 Really, it is unlikely that netif_tx_lock protection is necessary
2388 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2389 counters.)
2390 However, it is possible, that they rely on protection
2391 made by us here.
2392
2393 Check this and shot the lock. It is not prone from deadlocks.
2394 Either shot noqueue qdisc, it is even simpler 8)
2395 */
2396 if (dev->flags & IFF_UP) {
2397 int cpu = smp_processor_id(); /* ok because BHs are off */
2398
c773e847 2399 if (txq->xmit_lock_owner != cpu) {
1da177e4 2400
745e20f1
ED
2401 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2402 goto recursion_alert;
2403
c773e847 2404 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2405
fd2ea0a7 2406 if (!netif_tx_queue_stopped(txq)) {
745e20f1 2407 __this_cpu_inc(xmit_recursion);
572a9d7b 2408 rc = dev_hard_start_xmit(skb, dev, txq);
745e20f1 2409 __this_cpu_dec(xmit_recursion);
572a9d7b 2410 if (dev_xmit_complete(rc)) {
c773e847 2411 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2412 goto out;
2413 }
2414 }
c773e847 2415 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2416 if (net_ratelimit())
2417 printk(KERN_CRIT "Virtual device %s asks to "
2418 "queue packet!\n", dev->name);
2419 } else {
2420 /* Recursion is detected! It is possible,
745e20f1
ED
2421 * unfortunately
2422 */
2423recursion_alert:
1da177e4
LT
2424 if (net_ratelimit())
2425 printk(KERN_CRIT "Dead loop on virtual device "
2426 "%s, fix it urgently!\n", dev->name);
2427 }
2428 }
2429
2430 rc = -ENETDOWN;
d4828d85 2431 rcu_read_unlock_bh();
1da177e4 2432
1da177e4
LT
2433 kfree_skb(skb);
2434 return rc;
2435out:
d4828d85 2436 rcu_read_unlock_bh();
1da177e4
LT
2437 return rc;
2438}
d1b19dff 2439EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2440
2441
2442/*=======================================================================
2443 Receiver routines
2444 =======================================================================*/
2445
6b2bedc3 2446int netdev_max_backlog __read_mostly = 1000;
3b098e2d 2447int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2448int netdev_budget __read_mostly = 300;
2449int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2450
eecfd7c4
ED
2451/* Called with irq disabled */
2452static inline void ____napi_schedule(struct softnet_data *sd,
2453 struct napi_struct *napi)
2454{
2455 list_add_tail(&napi->poll_list, &sd->poll_list);
2456 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2457}
2458
0a9627f2 2459/*
bfb564e7
KK
2460 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2461 * and src/dst port numbers. Returns a non-zero hash number on success
2462 * and 0 on failure.
0a9627f2 2463 */
bfb564e7 2464__u32 __skb_get_rxhash(struct sk_buff *skb)
0a9627f2 2465{
12fcdefb 2466 int nhoff, hash = 0, poff;
0a9627f2
TH
2467 struct ipv6hdr *ip6;
2468 struct iphdr *ip;
0a9627f2 2469 u8 ip_proto;
8c52d509
CG
2470 u32 addr1, addr2, ihl;
2471 union {
2472 u32 v32;
2473 u16 v16[2];
2474 } ports;
0a9627f2 2475
bfb564e7 2476 nhoff = skb_network_offset(skb);
0a9627f2
TH
2477
2478 switch (skb->protocol) {
2479 case __constant_htons(ETH_P_IP):
bfb564e7 2480 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
0a9627f2
TH
2481 goto done;
2482
1003489e 2483 ip = (struct iphdr *) (skb->data + nhoff);
dbe5775b
CG
2484 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2485 ip_proto = 0;
2486 else
2487 ip_proto = ip->protocol;
b249dcb8
ED
2488 addr1 = (__force u32) ip->saddr;
2489 addr2 = (__force u32) ip->daddr;
0a9627f2
TH
2490 ihl = ip->ihl;
2491 break;
2492 case __constant_htons(ETH_P_IPV6):
bfb564e7 2493 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
0a9627f2
TH
2494 goto done;
2495
1003489e 2496 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
0a9627f2 2497 ip_proto = ip6->nexthdr;
b249dcb8
ED
2498 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2499 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
0a9627f2
TH
2500 ihl = (40 >> 2);
2501 break;
2502 default:
2503 goto done;
2504 }
bfb564e7 2505
12fcdefb
CG
2506 ports.v32 = 0;
2507 poff = proto_ports_offset(ip_proto);
2508 if (poff >= 0) {
2509 nhoff += ihl * 4 + poff;
2510 if (pskb_may_pull(skb, nhoff + 4)) {
2511 ports.v32 = * (__force u32 *) (skb->data + nhoff);
8c52d509
CG
2512 if (ports.v16[1] < ports.v16[0])
2513 swap(ports.v16[0], ports.v16[1]);
b249dcb8 2514 }
0a9627f2
TH
2515 }
2516
b249dcb8
ED
2517 /* get a consistent hash (same value on both flow directions) */
2518 if (addr2 < addr1)
2519 swap(addr1, addr2);
0a9627f2 2520
bfb564e7
KK
2521 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2522 if (!hash)
2523 hash = 1;
2524
2525done:
2526 return hash;
2527}
2528EXPORT_SYMBOL(__skb_get_rxhash);
2529
2530#ifdef CONFIG_RPS
2531
2532/* One global table that all flow-based protocols share. */
6e3f7faf 2533struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7
KK
2534EXPORT_SYMBOL(rps_sock_flow_table);
2535
2536/*
2537 * get_rps_cpu is called from netif_receive_skb and returns the target
2538 * CPU from the RPS map of the receiving queue for a given skb.
2539 * rcu_read_lock must be held on entry.
2540 */
2541static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2542 struct rps_dev_flow **rflowp)
2543{
2544 struct netdev_rx_queue *rxqueue;
6e3f7faf 2545 struct rps_map *map;
bfb564e7
KK
2546 struct rps_dev_flow_table *flow_table;
2547 struct rps_sock_flow_table *sock_flow_table;
2548 int cpu = -1;
2549 u16 tcpu;
2550
2551 if (skb_rx_queue_recorded(skb)) {
2552 u16 index = skb_get_rx_queue(skb);
62fe0b40
BH
2553 if (unlikely(index >= dev->real_num_rx_queues)) {
2554 WARN_ONCE(dev->real_num_rx_queues > 1,
2555 "%s received packet on queue %u, but number "
2556 "of RX queues is %u\n",
2557 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
2558 goto done;
2559 }
2560 rxqueue = dev->_rx + index;
2561 } else
2562 rxqueue = dev->_rx;
2563
6e3f7faf
ED
2564 map = rcu_dereference(rxqueue->rps_map);
2565 if (map) {
2566 if (map->len == 1) {
6febfca9
CG
2567 tcpu = map->cpus[0];
2568 if (cpu_online(tcpu))
2569 cpu = tcpu;
2570 goto done;
2571 }
6e3f7faf 2572 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
bfb564e7 2573 goto done;
6febfca9 2574 }
bfb564e7 2575
2d47b459 2576 skb_reset_network_header(skb);
bfb564e7
KK
2577 if (!skb_get_rxhash(skb))
2578 goto done;
2579
fec5e652
TH
2580 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2581 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2582 if (flow_table && sock_flow_table) {
2583 u16 next_cpu;
2584 struct rps_dev_flow *rflow;
2585
2586 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2587 tcpu = rflow->cpu;
2588
2589 next_cpu = sock_flow_table->ents[skb->rxhash &
2590 sock_flow_table->mask];
2591
2592 /*
2593 * If the desired CPU (where last recvmsg was done) is
2594 * different from current CPU (one in the rx-queue flow
2595 * table entry), switch if one of the following holds:
2596 * - Current CPU is unset (equal to RPS_NO_CPU).
2597 * - Current CPU is offline.
2598 * - The current CPU's queue tail has advanced beyond the
2599 * last packet that was enqueued using this table entry.
2600 * This guarantees that all previous packets for the flow
2601 * have been dequeued, thus preserving in order delivery.
2602 */
2603 if (unlikely(tcpu != next_cpu) &&
2604 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2605 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2606 rflow->last_qtail)) >= 0)) {
2607 tcpu = rflow->cpu = next_cpu;
2608 if (tcpu != RPS_NO_CPU)
2609 rflow->last_qtail = per_cpu(softnet_data,
2610 tcpu).input_queue_head;
2611 }
2612 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2613 *rflowp = rflow;
2614 cpu = tcpu;
2615 goto done;
2616 }
2617 }
2618
0a9627f2 2619 if (map) {
fec5e652 2620 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
2621
2622 if (cpu_online(tcpu)) {
2623 cpu = tcpu;
2624 goto done;
2625 }
2626 }
2627
2628done:
0a9627f2
TH
2629 return cpu;
2630}
2631
0a9627f2 2632/* Called from hardirq (IPI) context */
e36fa2f7 2633static void rps_trigger_softirq(void *data)
0a9627f2 2634{
e36fa2f7
ED
2635 struct softnet_data *sd = data;
2636
eecfd7c4 2637 ____napi_schedule(sd, &sd->backlog);
dee42870 2638 sd->received_rps++;
0a9627f2 2639}
e36fa2f7 2640
fec5e652 2641#endif /* CONFIG_RPS */
0a9627f2 2642
e36fa2f7
ED
2643/*
2644 * Check if this softnet_data structure is another cpu one
2645 * If yes, queue it to our IPI list and return 1
2646 * If no, return 0
2647 */
2648static int rps_ipi_queued(struct softnet_data *sd)
2649{
2650#ifdef CONFIG_RPS
2651 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2652
2653 if (sd != mysd) {
2654 sd->rps_ipi_next = mysd->rps_ipi_list;
2655 mysd->rps_ipi_list = sd;
2656
2657 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2658 return 1;
2659 }
2660#endif /* CONFIG_RPS */
2661 return 0;
2662}
2663
0a9627f2
TH
2664/*
2665 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2666 * queue (may be a remote CPU queue).
2667 */
fec5e652
TH
2668static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2669 unsigned int *qtail)
0a9627f2 2670{
e36fa2f7 2671 struct softnet_data *sd;
0a9627f2
TH
2672 unsigned long flags;
2673
e36fa2f7 2674 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
2675
2676 local_irq_save(flags);
0a9627f2 2677
e36fa2f7 2678 rps_lock(sd);
6e7676c1
CG
2679 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2680 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 2681enqueue:
e36fa2f7 2682 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 2683 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 2684 rps_unlock(sd);
152102c7 2685 local_irq_restore(flags);
0a9627f2
TH
2686 return NET_RX_SUCCESS;
2687 }
2688
ebda37c2
ED
2689 /* Schedule NAPI for backlog device
2690 * We can use non atomic operation since we own the queue lock
2691 */
2692 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 2693 if (!rps_ipi_queued(sd))
eecfd7c4 2694 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
2695 }
2696 goto enqueue;
2697 }
2698
dee42870 2699 sd->dropped++;
e36fa2f7 2700 rps_unlock(sd);
0a9627f2 2701
0a9627f2
TH
2702 local_irq_restore(flags);
2703
caf586e5 2704 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
2705 kfree_skb(skb);
2706 return NET_RX_DROP;
2707}
1da177e4 2708
1da177e4
LT
2709/**
2710 * netif_rx - post buffer to the network code
2711 * @skb: buffer to post
2712 *
2713 * This function receives a packet from a device driver and queues it for
2714 * the upper (protocol) levels to process. It always succeeds. The buffer
2715 * may be dropped during processing for congestion control or by the
2716 * protocol layers.
2717 *
2718 * return values:
2719 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
2720 * NET_RX_DROP (packet was dropped)
2721 *
2722 */
2723
2724int netif_rx(struct sk_buff *skb)
2725{
b0e28f1e 2726 int ret;
1da177e4
LT
2727
2728 /* if netpoll wants it, pretend we never saw it */
2729 if (netpoll_rx(skb))
2730 return NET_RX_DROP;
2731
3b098e2d
ED
2732 if (netdev_tstamp_prequeue)
2733 net_timestamp_check(skb);
1da177e4 2734
cf66ba58 2735 trace_netif_rx(skb);
df334545 2736#ifdef CONFIG_RPS
b0e28f1e 2737 {
fec5e652 2738 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
2739 int cpu;
2740
cece1945 2741 preempt_disable();
b0e28f1e 2742 rcu_read_lock();
fec5e652
TH
2743
2744 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
2745 if (cpu < 0)
2746 cpu = smp_processor_id();
fec5e652
TH
2747
2748 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2749
b0e28f1e 2750 rcu_read_unlock();
cece1945 2751 preempt_enable();
b0e28f1e 2752 }
1e94d72f 2753#else
fec5e652
TH
2754 {
2755 unsigned int qtail;
2756 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2757 put_cpu();
2758 }
1e94d72f 2759#endif
b0e28f1e 2760 return ret;
1da177e4 2761}
d1b19dff 2762EXPORT_SYMBOL(netif_rx);
1da177e4
LT
2763
2764int netif_rx_ni(struct sk_buff *skb)
2765{
2766 int err;
2767
2768 preempt_disable();
2769 err = netif_rx(skb);
2770 if (local_softirq_pending())
2771 do_softirq();
2772 preempt_enable();
2773
2774 return err;
2775}
1da177e4
LT
2776EXPORT_SYMBOL(netif_rx_ni);
2777
1da177e4
LT
2778static void net_tx_action(struct softirq_action *h)
2779{
2780 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2781
2782 if (sd->completion_queue) {
2783 struct sk_buff *clist;
2784
2785 local_irq_disable();
2786 clist = sd->completion_queue;
2787 sd->completion_queue = NULL;
2788 local_irq_enable();
2789
2790 while (clist) {
2791 struct sk_buff *skb = clist;
2792 clist = clist->next;
2793
547b792c 2794 WARN_ON(atomic_read(&skb->users));
07dc22e7 2795 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
2796 __kfree_skb(skb);
2797 }
2798 }
2799
2800 if (sd->output_queue) {
37437bb2 2801 struct Qdisc *head;
1da177e4
LT
2802
2803 local_irq_disable();
2804 head = sd->output_queue;
2805 sd->output_queue = NULL;
a9cbd588 2806 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
2807 local_irq_enable();
2808
2809 while (head) {
37437bb2
DM
2810 struct Qdisc *q = head;
2811 spinlock_t *root_lock;
2812
1da177e4
LT
2813 head = head->next_sched;
2814
5fb66229 2815 root_lock = qdisc_lock(q);
37437bb2 2816 if (spin_trylock(root_lock)) {
def82a1d
JP
2817 smp_mb__before_clear_bit();
2818 clear_bit(__QDISC_STATE_SCHED,
2819 &q->state);
37437bb2
DM
2820 qdisc_run(q);
2821 spin_unlock(root_lock);
1da177e4 2822 } else {
195648bb 2823 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 2824 &q->state)) {
195648bb 2825 __netif_reschedule(q);
e8a83e10
JP
2826 } else {
2827 smp_mb__before_clear_bit();
2828 clear_bit(__QDISC_STATE_SCHED,
2829 &q->state);
2830 }
1da177e4
LT
2831 }
2832 }
2833 }
2834}
2835
ab95bfe0
JP
2836#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2837 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
2838/* This hook is defined here for ATM LANE */
2839int (*br_fdb_test_addr_hook)(struct net_device *dev,
2840 unsigned char *addr) __read_mostly;
4fb019a0 2841EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 2842#endif
1da177e4 2843
1da177e4
LT
2844#ifdef CONFIG_NET_CLS_ACT
2845/* TODO: Maybe we should just force sch_ingress to be compiled in
2846 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2847 * a compare and 2 stores extra right now if we dont have it on
2848 * but have CONFIG_NET_CLS_ACT
4ec93edb 2849 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
2850 * the ingress scheduler, you just cant add policies on ingress.
2851 *
2852 */
24824a09 2853static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
1da177e4 2854{
1da177e4 2855 struct net_device *dev = skb->dev;
f697c3e8 2856 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
2857 int result = TC_ACT_OK;
2858 struct Qdisc *q;
4ec93edb 2859
de384830
SH
2860 if (unlikely(MAX_RED_LOOP < ttl++)) {
2861 if (net_ratelimit())
2862 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2863 skb->skb_iif, dev->ifindex);
f697c3e8
HX
2864 return TC_ACT_SHOT;
2865 }
1da177e4 2866
f697c3e8
HX
2867 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2868 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 2869
83874000 2870 q = rxq->qdisc;
8d50b53d 2871 if (q != &noop_qdisc) {
83874000 2872 spin_lock(qdisc_lock(q));
a9312ae8
DM
2873 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2874 result = qdisc_enqueue_root(skb, q);
83874000
DM
2875 spin_unlock(qdisc_lock(q));
2876 }
f697c3e8
HX
2877
2878 return result;
2879}
86e65da9 2880
f697c3e8
HX
2881static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2882 struct packet_type **pt_prev,
2883 int *ret, struct net_device *orig_dev)
2884{
24824a09
ED
2885 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
2886
2887 if (!rxq || rxq->qdisc == &noop_qdisc)
f697c3e8 2888 goto out;
1da177e4 2889
f697c3e8
HX
2890 if (*pt_prev) {
2891 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2892 *pt_prev = NULL;
1da177e4
LT
2893 }
2894
24824a09 2895 switch (ing_filter(skb, rxq)) {
f697c3e8
HX
2896 case TC_ACT_SHOT:
2897 case TC_ACT_STOLEN:
2898 kfree_skb(skb);
2899 return NULL;
2900 }
2901
2902out:
2903 skb->tc_verd = 0;
2904 return skb;
1da177e4
LT
2905}
2906#endif
2907
ab95bfe0
JP
2908/**
2909 * netdev_rx_handler_register - register receive handler
2910 * @dev: device to register a handler for
2911 * @rx_handler: receive handler to register
93e2c32b 2912 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0
JP
2913 *
2914 * Register a receive hander for a device. This handler will then be
2915 * called from __netif_receive_skb. A negative errno code is returned
2916 * on a failure.
2917 *
2918 * The caller must hold the rtnl_mutex.
2919 */
2920int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
2921 rx_handler_func_t *rx_handler,
2922 void *rx_handler_data)
ab95bfe0
JP
2923{
2924 ASSERT_RTNL();
2925
2926 if (dev->rx_handler)
2927 return -EBUSY;
2928
93e2c32b 2929 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
2930 rcu_assign_pointer(dev->rx_handler, rx_handler);
2931
2932 return 0;
2933}
2934EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2935
2936/**
2937 * netdev_rx_handler_unregister - unregister receive handler
2938 * @dev: device to unregister a handler from
2939 *
2940 * Unregister a receive hander from a device.
2941 *
2942 * The caller must hold the rtnl_mutex.
2943 */
2944void netdev_rx_handler_unregister(struct net_device *dev)
2945{
2946
2947 ASSERT_RTNL();
2948 rcu_assign_pointer(dev->rx_handler, NULL);
93e2c32b 2949 rcu_assign_pointer(dev->rx_handler_data, NULL);
ab95bfe0
JP
2950}
2951EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2952
acbbc071
ED
2953static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2954 struct net_device *master)
2955{
2956 if (skb->pkt_type == PACKET_HOST) {
2957 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2958
2959 memcpy(dest, master->dev_addr, ETH_ALEN);
2960 }
2961}
2962
2963/* On bonding slaves other than the currently active slave, suppress
2964 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2965 * ARP on active-backup slaves with arp_validate enabled.
2966 */
2967int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2968{
2969 struct net_device *dev = skb->dev;
2970
2971 if (master->priv_flags & IFF_MASTER_ARPMON)
2972 dev->last_rx = jiffies;
2973
f350a0a8
JP
2974 if ((master->priv_flags & IFF_MASTER_ALB) &&
2975 (master->priv_flags & IFF_BRIDGE_PORT)) {
acbbc071
ED
2976 /* Do address unmangle. The local destination address
2977 * will be always the one master has. Provides the right
2978 * functionality in a bridge.
2979 */
2980 skb_bond_set_mac_by_master(skb, master);
2981 }
2982
2983 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2984 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2985 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2986 return 0;
2987
2988 if (master->priv_flags & IFF_MASTER_ALB) {
2989 if (skb->pkt_type != PACKET_BROADCAST &&
2990 skb->pkt_type != PACKET_MULTICAST)
2991 return 0;
2992 }
2993 if (master->priv_flags & IFF_MASTER_8023AD &&
2994 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2995 return 0;
2996
2997 return 1;
2998 }
2999 return 0;
3000}
3001EXPORT_SYMBOL(__skb_bond_should_drop);
3002
10f744d2 3003static int __netif_receive_skb(struct sk_buff *skb)
1da177e4
LT
3004{
3005 struct packet_type *ptype, *pt_prev;
ab95bfe0 3006 rx_handler_func_t *rx_handler;
f2ccd8fa 3007 struct net_device *orig_dev;
0641e4fb 3008 struct net_device *master;
0d7a3681 3009 struct net_device *null_or_orig;
2df4a0fa 3010 struct net_device *orig_or_bond;
1da177e4 3011 int ret = NET_RX_DROP;
252e3346 3012 __be16 type;
1da177e4 3013
3b098e2d
ED
3014 if (!netdev_tstamp_prequeue)
3015 net_timestamp_check(skb);
81bbb3d4 3016
cf66ba58 3017 trace_netif_receive_skb(skb);
9b22ea56 3018
1da177e4 3019 /* if we've gotten here through NAPI, check netpoll */
bea3348e 3020 if (netpoll_receive_skb(skb))
1da177e4
LT
3021 return NET_RX_DROP;
3022
8964be4a
ED
3023 if (!skb->skb_iif)
3024 skb->skb_iif = skb->dev->ifindex;
86e65da9 3025
597a264b
JF
3026 /*
3027 * bonding note: skbs received on inactive slaves should only
3028 * be delivered to pkt handlers that are exact matches. Also
3029 * the deliver_no_wcard flag will be set. If packet handlers
3030 * are sensitive to duplicate packets these skbs will need to
3701e513 3031 * be dropped at the handler.
597a264b 3032 */
0d7a3681 3033 null_or_orig = NULL;
cc9bd5ce 3034 orig_dev = skb->dev;
0641e4fb 3035 master = ACCESS_ONCE(orig_dev->master);
597a264b
JF
3036 if (skb->deliver_no_wcard)
3037 null_or_orig = orig_dev;
3038 else if (master) {
3039 if (skb_bond_should_drop(skb, master)) {
3040 skb->deliver_no_wcard = 1;
0d7a3681 3041 null_or_orig = orig_dev; /* deliver only exact match */
597a264b 3042 } else
0641e4fb 3043 skb->dev = master;
cc9bd5ce 3044 }
8f903c70 3045
27f39c73 3046 __this_cpu_inc(softnet_data.processed);
c1d2bbe1 3047 skb_reset_network_header(skb);
badff6d0 3048 skb_reset_transport_header(skb);
b0e380b1 3049 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
3050
3051 pt_prev = NULL;
3052
3053 rcu_read_lock();
3054
3055#ifdef CONFIG_NET_CLS_ACT
3056 if (skb->tc_verd & TC_NCLS) {
3057 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3058 goto ncls;
3059 }
3060#endif
3061
3062 list_for_each_entry_rcu(ptype, &ptype_all, list) {
f982307f
JE
3063 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
3064 ptype->dev == orig_dev) {
4ec93edb 3065 if (pt_prev)
f2ccd8fa 3066 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3067 pt_prev = ptype;
3068 }
3069 }
3070
3071#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
3072 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3073 if (!skb)
1da177e4 3074 goto out;
1da177e4
LT
3075ncls:
3076#endif
3077
ab95bfe0
JP
3078 /* Handle special case of bridge or macvlan */
3079 rx_handler = rcu_dereference(skb->dev->rx_handler);
3080 if (rx_handler) {
3081 if (pt_prev) {
3082 ret = deliver_skb(skb, pt_prev, orig_dev);
3083 pt_prev = NULL;
3084 }
3085 skb = rx_handler(skb);
3086 if (!skb)
3087 goto out;
3088 }
1da177e4 3089
3701e513
JG
3090 if (vlan_tx_tag_present(skb)) {
3091 if (pt_prev) {
3092 ret = deliver_skb(skb, pt_prev, orig_dev);
3093 pt_prev = NULL;
3094 }
3095 if (vlan_hwaccel_do_receive(&skb)) {
3096 ret = __netif_receive_skb(skb);
3097 goto out;
3098 } else if (unlikely(!skb))
3099 goto out;
3100 }
3101
1f3c8804
AG
3102 /*
3103 * Make sure frames received on VLAN interfaces stacked on
3104 * bonding interfaces still make their way to any base bonding
3105 * device that may have registered for a specific ptype. The
3106 * handler may have to adjust skb->dev and orig_dev.
1f3c8804 3107 */
2df4a0fa 3108 orig_or_bond = orig_dev;
1f3c8804
AG
3109 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
3110 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2df4a0fa 3111 orig_or_bond = vlan_dev_real_dev(skb->dev);
1f3c8804
AG
3112 }
3113
1da177e4 3114 type = skb->protocol;
82d8a867
PE
3115 list_for_each_entry_rcu(ptype,
3116 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1f3c8804 3117 if (ptype->type == type && (ptype->dev == null_or_orig ||
ca8d9ea3 3118 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2df4a0fa 3119 ptype->dev == orig_or_bond)) {
4ec93edb 3120 if (pt_prev)
f2ccd8fa 3121 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3122 pt_prev = ptype;
3123 }
3124 }
3125
3126 if (pt_prev) {
f2ccd8fa 3127 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3128 } else {
caf586e5 3129 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3130 kfree_skb(skb);
3131 /* Jamal, now you will not able to escape explaining
3132 * me how you were going to use this. :-)
3133 */
3134 ret = NET_RX_DROP;
3135 }
3136
3137out:
3138 rcu_read_unlock();
3139 return ret;
3140}
0a9627f2
TH
3141
3142/**
3143 * netif_receive_skb - process receive buffer from network
3144 * @skb: buffer to process
3145 *
3146 * netif_receive_skb() is the main receive data processing function.
3147 * It always succeeds. The buffer may be dropped during processing
3148 * for congestion control or by the protocol layers.
3149 *
3150 * This function may only be called from softirq context and interrupts
3151 * should be enabled.
3152 *
3153 * Return values (usually ignored):
3154 * NET_RX_SUCCESS: no congestion
3155 * NET_RX_DROP: packet was dropped
3156 */
3157int netif_receive_skb(struct sk_buff *skb)
3158{
3b098e2d
ED
3159 if (netdev_tstamp_prequeue)
3160 net_timestamp_check(skb);
3161
c1f19b51
RC
3162 if (skb_defer_rx_timestamp(skb))
3163 return NET_RX_SUCCESS;
3164
df334545 3165#ifdef CONFIG_RPS
3b098e2d
ED
3166 {
3167 struct rps_dev_flow voidflow, *rflow = &voidflow;
3168 int cpu, ret;
fec5e652 3169
3b098e2d
ED
3170 rcu_read_lock();
3171
3172 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3173
3b098e2d
ED
3174 if (cpu >= 0) {
3175 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3176 rcu_read_unlock();
3177 } else {
3178 rcu_read_unlock();
3179 ret = __netif_receive_skb(skb);
3180 }
0a9627f2 3181
3b098e2d 3182 return ret;
fec5e652 3183 }
1e94d72f
TH
3184#else
3185 return __netif_receive_skb(skb);
3186#endif
0a9627f2 3187}
d1b19dff 3188EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3189
88751275
ED
3190/* Network device is going away, flush any packets still pending
3191 * Called with irqs disabled.
3192 */
152102c7 3193static void flush_backlog(void *arg)
6e583ce5 3194{
152102c7 3195 struct net_device *dev = arg;
e36fa2f7 3196 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
3197 struct sk_buff *skb, *tmp;
3198
e36fa2f7 3199 rps_lock(sd);
6e7676c1 3200 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3201 if (skb->dev == dev) {
e36fa2f7 3202 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3203 kfree_skb(skb);
76cc8b13 3204 input_queue_head_incr(sd);
6e583ce5 3205 }
6e7676c1 3206 }
e36fa2f7 3207 rps_unlock(sd);
6e7676c1
CG
3208
3209 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3210 if (skb->dev == dev) {
3211 __skb_unlink(skb, &sd->process_queue);
3212 kfree_skb(skb);
76cc8b13 3213 input_queue_head_incr(sd);
6e7676c1
CG
3214 }
3215 }
6e583ce5
SH
3216}
3217
d565b0a1
HX
3218static int napi_gro_complete(struct sk_buff *skb)
3219{
3220 struct packet_type *ptype;
3221 __be16 type = skb->protocol;
3222 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3223 int err = -ENOENT;
3224
fc59f9a3
HX
3225 if (NAPI_GRO_CB(skb)->count == 1) {
3226 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3227 goto out;
fc59f9a3 3228 }
d565b0a1
HX
3229
3230 rcu_read_lock();
3231 list_for_each_entry_rcu(ptype, head, list) {
3232 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3233 continue;
3234
3235 err = ptype->gro_complete(skb);
3236 break;
3237 }
3238 rcu_read_unlock();
3239
3240 if (err) {
3241 WARN_ON(&ptype->list == head);
3242 kfree_skb(skb);
3243 return NET_RX_SUCCESS;
3244 }
3245
3246out:
d565b0a1
HX
3247 return netif_receive_skb(skb);
3248}
3249
86cac58b 3250inline void napi_gro_flush(struct napi_struct *napi)
d565b0a1
HX
3251{
3252 struct sk_buff *skb, *next;
3253
3254 for (skb = napi->gro_list; skb; skb = next) {
3255 next = skb->next;
3256 skb->next = NULL;
3257 napi_gro_complete(skb);
3258 }
3259
4ae5544f 3260 napi->gro_count = 0;
d565b0a1
HX
3261 napi->gro_list = NULL;
3262}
86cac58b 3263EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3264
5b252f0c 3265enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3266{
3267 struct sk_buff **pp = NULL;
3268 struct packet_type *ptype;
3269 __be16 type = skb->protocol;
3270 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
0da2afd5 3271 int same_flow;
d565b0a1 3272 int mac_len;
5b252f0c 3273 enum gro_result ret;
d565b0a1 3274
ce9e76c8 3275 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
d565b0a1
HX
3276 goto normal;
3277
21dc3301 3278 if (skb_is_gso(skb) || skb_has_frag_list(skb))
f17f5c91
HX
3279 goto normal;
3280
d565b0a1
HX
3281 rcu_read_lock();
3282 list_for_each_entry_rcu(ptype, head, list) {
d565b0a1
HX
3283 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3284 continue;
3285
86911732 3286 skb_set_network_header(skb, skb_gro_offset(skb));
d565b0a1
HX
3287 mac_len = skb->network_header - skb->mac_header;
3288 skb->mac_len = mac_len;
3289 NAPI_GRO_CB(skb)->same_flow = 0;
3290 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3291 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3292
d565b0a1
HX
3293 pp = ptype->gro_receive(&napi->gro_list, skb);
3294 break;
3295 }
3296 rcu_read_unlock();
3297
3298 if (&ptype->list == head)
3299 goto normal;
3300
0da2afd5 3301 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3302 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3303
d565b0a1
HX
3304 if (pp) {
3305 struct sk_buff *nskb = *pp;
3306
3307 *pp = nskb->next;
3308 nskb->next = NULL;
3309 napi_gro_complete(nskb);
4ae5544f 3310 napi->gro_count--;
d565b0a1
HX
3311 }
3312
0da2afd5 3313 if (same_flow)
d565b0a1
HX
3314 goto ok;
3315
4ae5544f 3316 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
d565b0a1 3317 goto normal;
d565b0a1 3318
4ae5544f 3319 napi->gro_count++;
d565b0a1 3320 NAPI_GRO_CB(skb)->count = 1;
86911732 3321 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3322 skb->next = napi->gro_list;
3323 napi->gro_list = skb;
5d0d9be8 3324 ret = GRO_HELD;
d565b0a1 3325
ad0f9904 3326pull:
cb18978c
HX
3327 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3328 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3329
3330 BUG_ON(skb->end - skb->tail < grow);
3331
3332 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3333
3334 skb->tail += grow;
3335 skb->data_len -= grow;
3336
3337 skb_shinfo(skb)->frags[0].page_offset += grow;
3338 skb_shinfo(skb)->frags[0].size -= grow;
3339
3340 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3341 put_page(skb_shinfo(skb)->frags[0].page);
3342 memmove(skb_shinfo(skb)->frags,
3343 skb_shinfo(skb)->frags + 1,
e5093aec 3344 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
cb18978c 3345 }
ad0f9904
HX
3346 }
3347
d565b0a1 3348ok:
5d0d9be8 3349 return ret;
d565b0a1
HX
3350
3351normal:
ad0f9904
HX
3352 ret = GRO_NORMAL;
3353 goto pull;
5d38a079 3354}
96e93eab
HX
3355EXPORT_SYMBOL(dev_gro_receive);
3356
40d0802b 3357static inline gro_result_t
5b252f0c 3358__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
96e93eab
HX
3359{
3360 struct sk_buff *p;
3361
3362 for (p = napi->gro_list; p; p = p->next) {
40d0802b
ED
3363 unsigned long diffs;
3364
3365 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3701e513 3366 diffs |= p->vlan_tci ^ skb->vlan_tci;
40d0802b 3367 diffs |= compare_ether_header(skb_mac_header(p),
f64f9e71 3368 skb_gro_mac_header(skb));
40d0802b 3369 NAPI_GRO_CB(p)->same_flow = !diffs;
96e93eab
HX
3370 NAPI_GRO_CB(p)->flush = 0;
3371 }
3372
3373 return dev_gro_receive(napi, skb);
3374}
5d38a079 3375
c7c4b3b6 3376gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3377{
5d0d9be8
HX
3378 switch (ret) {
3379 case GRO_NORMAL:
c7c4b3b6
BH
3380 if (netif_receive_skb(skb))
3381 ret = GRO_DROP;
3382 break;
5d38a079 3383
5d0d9be8 3384 case GRO_DROP:
5d0d9be8 3385 case GRO_MERGED_FREE:
5d38a079
HX
3386 kfree_skb(skb);
3387 break;
5b252f0c
BH
3388
3389 case GRO_HELD:
3390 case GRO_MERGED:
3391 break;
5d38a079
HX
3392 }
3393
c7c4b3b6 3394 return ret;
5d0d9be8
HX
3395}
3396EXPORT_SYMBOL(napi_skb_finish);
3397
78a478d0
HX
3398void skb_gro_reset_offset(struct sk_buff *skb)
3399{
3400 NAPI_GRO_CB(skb)->data_offset = 0;
3401 NAPI_GRO_CB(skb)->frag0 = NULL;
7489594c 3402 NAPI_GRO_CB(skb)->frag0_len = 0;
78a478d0 3403
78d3fd0b 3404 if (skb->mac_header == skb->tail &&
7489594c 3405 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
78a478d0
HX
3406 NAPI_GRO_CB(skb)->frag0 =
3407 page_address(skb_shinfo(skb)->frags[0].page) +
3408 skb_shinfo(skb)->frags[0].page_offset;
7489594c
HX
3409 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3410 }
78a478d0
HX
3411}
3412EXPORT_SYMBOL(skb_gro_reset_offset);
3413
c7c4b3b6 3414gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3415{
86911732
HX
3416 skb_gro_reset_offset(skb);
3417
5d0d9be8 3418 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
d565b0a1
HX
3419}
3420EXPORT_SYMBOL(napi_gro_receive);
3421
d0c2b0d2 3422static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 3423{
96e93eab
HX
3424 __skb_pull(skb, skb_headlen(skb));
3425 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3701e513 3426 skb->vlan_tci = 0;
96e93eab
HX
3427
3428 napi->skb = skb;
3429}
96e93eab 3430
76620aaf 3431struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 3432{
5d38a079 3433 struct sk_buff *skb = napi->skb;
5d38a079
HX
3434
3435 if (!skb) {
89d71a66
ED
3436 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3437 if (skb)
3438 napi->skb = skb;
80595d59 3439 }
96e93eab
HX
3440 return skb;
3441}
76620aaf 3442EXPORT_SYMBOL(napi_get_frags);
96e93eab 3443
c7c4b3b6
BH
3444gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3445 gro_result_t ret)
96e93eab 3446{
5d0d9be8
HX
3447 switch (ret) {
3448 case GRO_NORMAL:
86911732 3449 case GRO_HELD:
e76b69cc 3450 skb->protocol = eth_type_trans(skb, skb->dev);
86911732 3451
c7c4b3b6
BH
3452 if (ret == GRO_HELD)
3453 skb_gro_pull(skb, -ETH_HLEN);
3454 else if (netif_receive_skb(skb))
3455 ret = GRO_DROP;
86911732 3456 break;
5d38a079 3457
5d0d9be8 3458 case GRO_DROP:
5d0d9be8
HX
3459 case GRO_MERGED_FREE:
3460 napi_reuse_skb(napi, skb);
3461 break;
5b252f0c
BH
3462
3463 case GRO_MERGED:
3464 break;
5d0d9be8 3465 }
5d38a079 3466
c7c4b3b6 3467 return ret;
5d38a079 3468}
5d0d9be8
HX
3469EXPORT_SYMBOL(napi_frags_finish);
3470
76620aaf
HX
3471struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3472{
3473 struct sk_buff *skb = napi->skb;
3474 struct ethhdr *eth;
a5b1cf28
HX
3475 unsigned int hlen;
3476 unsigned int off;
76620aaf
HX
3477
3478 napi->skb = NULL;
3479
3480 skb_reset_mac_header(skb);
3481 skb_gro_reset_offset(skb);
3482
a5b1cf28
HX
3483 off = skb_gro_offset(skb);
3484 hlen = off + sizeof(*eth);
3485 eth = skb_gro_header_fast(skb, off);
3486 if (skb_gro_header_hard(skb, hlen)) {
3487 eth = skb_gro_header_slow(skb, hlen, off);
3488 if (unlikely(!eth)) {
3489 napi_reuse_skb(napi, skb);
3490 skb = NULL;
3491 goto out;
3492 }
76620aaf
HX
3493 }
3494
3495 skb_gro_pull(skb, sizeof(*eth));
3496
3497 /*
3498 * This works because the only protocols we care about don't require
3499 * special handling. We'll fix it up properly at the end.
3500 */
3501 skb->protocol = eth->h_proto;
3502
3503out:
3504 return skb;
3505}
3506EXPORT_SYMBOL(napi_frags_skb);
3507
c7c4b3b6 3508gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 3509{
76620aaf 3510 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
3511
3512 if (!skb)
c7c4b3b6 3513 return GRO_DROP;
5d0d9be8
HX
3514
3515 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3516}
5d38a079
HX
3517EXPORT_SYMBOL(napi_gro_frags);
3518
e326bed2
ED
3519/*
3520 * net_rps_action sends any pending IPI's for rps.
3521 * Note: called with local irq disabled, but exits with local irq enabled.
3522 */
3523static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3524{
3525#ifdef CONFIG_RPS
3526 struct softnet_data *remsd = sd->rps_ipi_list;
3527
3528 if (remsd) {
3529 sd->rps_ipi_list = NULL;
3530
3531 local_irq_enable();
3532
3533 /* Send pending IPI's to kick RPS processing on remote cpus. */
3534 while (remsd) {
3535 struct softnet_data *next = remsd->rps_ipi_next;
3536
3537 if (cpu_online(remsd->cpu))
3538 __smp_call_function_single(remsd->cpu,
3539 &remsd->csd, 0);
3540 remsd = next;
3541 }
3542 } else
3543#endif
3544 local_irq_enable();
3545}
3546
bea3348e 3547static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
3548{
3549 int work = 0;
eecfd7c4 3550 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 3551
e326bed2
ED
3552#ifdef CONFIG_RPS
3553 /* Check if we have pending ipi, its better to send them now,
3554 * not waiting net_rx_action() end.
3555 */
3556 if (sd->rps_ipi_list) {
3557 local_irq_disable();
3558 net_rps_action_and_irq_enable(sd);
3559 }
3560#endif
bea3348e 3561 napi->weight = weight_p;
6e7676c1
CG
3562 local_irq_disable();
3563 while (work < quota) {
1da177e4 3564 struct sk_buff *skb;
6e7676c1
CG
3565 unsigned int qlen;
3566
3567 while ((skb = __skb_dequeue(&sd->process_queue))) {
3568 local_irq_enable();
3569 __netif_receive_skb(skb);
6e7676c1 3570 local_irq_disable();
76cc8b13
TH
3571 input_queue_head_incr(sd);
3572 if (++work >= quota) {
3573 local_irq_enable();
3574 return work;
3575 }
6e7676c1 3576 }
1da177e4 3577
e36fa2f7 3578 rps_lock(sd);
6e7676c1 3579 qlen = skb_queue_len(&sd->input_pkt_queue);
76cc8b13 3580 if (qlen)
6e7676c1
CG
3581 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3582 &sd->process_queue);
76cc8b13 3583
6e7676c1 3584 if (qlen < quota - work) {
eecfd7c4
ED
3585 /*
3586 * Inline a custom version of __napi_complete().
3587 * only current cpu owns and manipulates this napi,
3588 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3589 * we can use a plain write instead of clear_bit(),
3590 * and we dont need an smp_mb() memory barrier.
3591 */
3592 list_del(&napi->poll_list);
3593 napi->state = 0;
3594
6e7676c1 3595 quota = work + qlen;
bea3348e 3596 }
e36fa2f7 3597 rps_unlock(sd);
6e7676c1
CG
3598 }
3599 local_irq_enable();
1da177e4 3600
bea3348e
SH
3601 return work;
3602}
1da177e4 3603
bea3348e
SH
3604/**
3605 * __napi_schedule - schedule for receive
c4ea43c5 3606 * @n: entry to schedule
bea3348e
SH
3607 *
3608 * The entry's receive function will be scheduled to run
3609 */
b5606c2d 3610void __napi_schedule(struct napi_struct *n)
bea3348e
SH
3611{
3612 unsigned long flags;
1da177e4 3613
bea3348e 3614 local_irq_save(flags);
eecfd7c4 3615 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 3616 local_irq_restore(flags);
1da177e4 3617}
bea3348e
SH
3618EXPORT_SYMBOL(__napi_schedule);
3619
d565b0a1
HX
3620void __napi_complete(struct napi_struct *n)
3621{
3622 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3623 BUG_ON(n->gro_list);
3624
3625 list_del(&n->poll_list);
3626 smp_mb__before_clear_bit();
3627 clear_bit(NAPI_STATE_SCHED, &n->state);
3628}
3629EXPORT_SYMBOL(__napi_complete);
3630
3631void napi_complete(struct napi_struct *n)
3632{
3633 unsigned long flags;
3634
3635 /*
3636 * don't let napi dequeue from the cpu poll list
3637 * just in case its running on a different cpu
3638 */
3639 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3640 return;
3641
3642 napi_gro_flush(n);
3643 local_irq_save(flags);
3644 __napi_complete(n);
3645 local_irq_restore(flags);
3646}
3647EXPORT_SYMBOL(napi_complete);
3648
3649void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3650 int (*poll)(struct napi_struct *, int), int weight)
3651{
3652 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 3653 napi->gro_count = 0;
d565b0a1 3654 napi->gro_list = NULL;
5d38a079 3655 napi->skb = NULL;
d565b0a1
HX
3656 napi->poll = poll;
3657 napi->weight = weight;
3658 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 3659 napi->dev = dev;
5d38a079 3660#ifdef CONFIG_NETPOLL
d565b0a1
HX
3661 spin_lock_init(&napi->poll_lock);
3662 napi->poll_owner = -1;
3663#endif
3664 set_bit(NAPI_STATE_SCHED, &napi->state);
3665}
3666EXPORT_SYMBOL(netif_napi_add);
3667
3668void netif_napi_del(struct napi_struct *napi)
3669{
3670 struct sk_buff *skb, *next;
3671
d7b06636 3672 list_del_init(&napi->dev_list);
76620aaf 3673 napi_free_frags(napi);
d565b0a1
HX
3674
3675 for (skb = napi->gro_list; skb; skb = next) {
3676 next = skb->next;
3677 skb->next = NULL;
3678 kfree_skb(skb);
3679 }
3680
3681 napi->gro_list = NULL;
4ae5544f 3682 napi->gro_count = 0;
d565b0a1
HX
3683}
3684EXPORT_SYMBOL(netif_napi_del);
3685
1da177e4
LT
3686static void net_rx_action(struct softirq_action *h)
3687{
e326bed2 3688 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 3689 unsigned long time_limit = jiffies + 2;
51b0bded 3690 int budget = netdev_budget;
53fb95d3
MM
3691 void *have;
3692
1da177e4
LT
3693 local_irq_disable();
3694
e326bed2 3695 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
3696 struct napi_struct *n;
3697 int work, weight;
1da177e4 3698
bea3348e 3699 /* If softirq window is exhuasted then punt.
24f8b238
SH
3700 * Allow this to run for 2 jiffies since which will allow
3701 * an average latency of 1.5/HZ.
bea3348e 3702 */
24f8b238 3703 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
1da177e4
LT
3704 goto softnet_break;
3705
3706 local_irq_enable();
3707
bea3348e
SH
3708 /* Even though interrupts have been re-enabled, this
3709 * access is safe because interrupts can only add new
3710 * entries to the tail of this list, and only ->poll()
3711 * calls can remove this head entry from the list.
3712 */
e326bed2 3713 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 3714
bea3348e
SH
3715 have = netpoll_poll_lock(n);
3716
3717 weight = n->weight;
3718
0a7606c1
DM
3719 /* This NAPI_STATE_SCHED test is for avoiding a race
3720 * with netpoll's poll_napi(). Only the entity which
3721 * obtains the lock and sees NAPI_STATE_SCHED set will
3722 * actually make the ->poll() call. Therefore we avoid
3723 * accidently calling ->poll() when NAPI is not scheduled.
3724 */
3725 work = 0;
4ea7e386 3726 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 3727 work = n->poll(n, weight);
4ea7e386
NH
3728 trace_napi_poll(n);
3729 }
bea3348e
SH
3730
3731 WARN_ON_ONCE(work > weight);
3732
3733 budget -= work;
3734
3735 local_irq_disable();
3736
3737 /* Drivers must not modify the NAPI state if they
3738 * consume the entire weight. In such cases this code
3739 * still "owns" the NAPI instance and therefore can
3740 * move the instance around on the list at-will.
3741 */
fed17f30 3742 if (unlikely(work == weight)) {
ff780cd8
HX
3743 if (unlikely(napi_disable_pending(n))) {
3744 local_irq_enable();
3745 napi_complete(n);
3746 local_irq_disable();
3747 } else
e326bed2 3748 list_move_tail(&n->poll_list, &sd->poll_list);
fed17f30 3749 }
bea3348e
SH
3750
3751 netpoll_poll_unlock(have);
1da177e4
LT
3752 }
3753out:
e326bed2 3754 net_rps_action_and_irq_enable(sd);
0a9627f2 3755
db217334
CL
3756#ifdef CONFIG_NET_DMA
3757 /*
3758 * There may not be any more sk_buffs coming right now, so push
3759 * any pending DMA copies to hardware
3760 */
2ba05622 3761 dma_issue_pending_all();
db217334 3762#endif
bea3348e 3763
1da177e4
LT
3764 return;
3765
3766softnet_break:
dee42870 3767 sd->time_squeeze++;
1da177e4
LT
3768 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3769 goto out;
3770}
3771
d1b19dff 3772static gifconf_func_t *gifconf_list[NPROTO];
1da177e4
LT
3773
3774/**
3775 * register_gifconf - register a SIOCGIF handler
3776 * @family: Address family
3777 * @gifconf: Function handler
3778 *
3779 * Register protocol dependent address dumping routines. The handler
3780 * that is passed must not be freed or reused until it has been replaced
3781 * by another handler.
3782 */
d1b19dff 3783int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
1da177e4
LT
3784{
3785 if (family >= NPROTO)
3786 return -EINVAL;
3787 gifconf_list[family] = gifconf;
3788 return 0;
3789}
d1b19dff 3790EXPORT_SYMBOL(register_gifconf);
1da177e4
LT
3791
3792
3793/*
3794 * Map an interface index to its name (SIOCGIFNAME)
3795 */
3796
3797/*
3798 * We need this ioctl for efficient implementation of the
3799 * if_indextoname() function required by the IPv6 API. Without
3800 * it, we would have to search all the interfaces to find a
3801 * match. --pb
3802 */
3803
881d966b 3804static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
3805{
3806 struct net_device *dev;
3807 struct ifreq ifr;
3808
3809 /*
3810 * Fetch the caller's info block.
3811 */
3812
3813 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3814 return -EFAULT;
3815
fb699dfd
ED
3816 rcu_read_lock();
3817 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
1da177e4 3818 if (!dev) {
fb699dfd 3819 rcu_read_unlock();
1da177e4
LT
3820 return -ENODEV;
3821 }
3822
3823 strcpy(ifr.ifr_name, dev->name);
fb699dfd 3824 rcu_read_unlock();
1da177e4
LT
3825
3826 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3827 return -EFAULT;
3828 return 0;
3829}
3830
3831/*
3832 * Perform a SIOCGIFCONF call. This structure will change
3833 * size eventually, and there is nothing I can do about it.
3834 * Thus we will need a 'compatibility mode'.
3835 */
3836
881d966b 3837static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
3838{
3839 struct ifconf ifc;
3840 struct net_device *dev;
3841 char __user *pos;
3842 int len;
3843 int total;
3844 int i;
3845
3846 /*
3847 * Fetch the caller's info block.
3848 */
3849
3850 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3851 return -EFAULT;
3852
3853 pos = ifc.ifc_buf;
3854 len = ifc.ifc_len;
3855
3856 /*
3857 * Loop over the interfaces, and write an info block for each.
3858 */
3859
3860 total = 0;
881d966b 3861 for_each_netdev(net, dev) {
1da177e4
LT
3862 for (i = 0; i < NPROTO; i++) {
3863 if (gifconf_list[i]) {
3864 int done;
3865 if (!pos)
3866 done = gifconf_list[i](dev, NULL, 0);
3867 else
3868 done = gifconf_list[i](dev, pos + total,
3869 len - total);
3870 if (done < 0)
3871 return -EFAULT;
3872 total += done;
3873 }
3874 }
4ec93edb 3875 }
1da177e4
LT
3876
3877 /*
3878 * All done. Write the updated control block back to the caller.
3879 */
3880 ifc.ifc_len = total;
3881
3882 /*
3883 * Both BSD and Solaris return 0 here, so we do too.
3884 */
3885 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3886}
3887
3888#ifdef CONFIG_PROC_FS
3889/*
3890 * This is invoked by the /proc filesystem handler to display a device
3891 * in detail.
3892 */
7562f876 3893void *dev_seq_start(struct seq_file *seq, loff_t *pos)
c6d14c84 3894 __acquires(RCU)
1da177e4 3895{
e372c414 3896 struct net *net = seq_file_net(seq);
7562f876 3897 loff_t off;
1da177e4 3898 struct net_device *dev;
1da177e4 3899
c6d14c84 3900 rcu_read_lock();
7562f876
PE
3901 if (!*pos)
3902 return SEQ_START_TOKEN;
1da177e4 3903
7562f876 3904 off = 1;
c6d14c84 3905 for_each_netdev_rcu(net, dev)
7562f876
PE
3906 if (off++ == *pos)
3907 return dev;
1da177e4 3908
7562f876 3909 return NULL;
1da177e4
LT
3910}
3911
3912void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3913{
c6d14c84
ED
3914 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3915 first_net_device(seq_file_net(seq)) :
3916 next_net_device((struct net_device *)v);
3917
1da177e4 3918 ++*pos;
c6d14c84 3919 return rcu_dereference(dev);
1da177e4
LT
3920}
3921
3922void dev_seq_stop(struct seq_file *seq, void *v)
c6d14c84 3923 __releases(RCU)
1da177e4 3924{
c6d14c84 3925 rcu_read_unlock();
1da177e4
LT
3926}
3927
3928static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3929{
28172739
ED
3930 struct rtnl_link_stats64 temp;
3931 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
1da177e4 3932
be1f3c2c
BH
3933 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3934 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
5a1b5898
RR
3935 dev->name, stats->rx_bytes, stats->rx_packets,
3936 stats->rx_errors,
3937 stats->rx_dropped + stats->rx_missed_errors,
3938 stats->rx_fifo_errors,
3939 stats->rx_length_errors + stats->rx_over_errors +
3940 stats->rx_crc_errors + stats->rx_frame_errors,
3941 stats->rx_compressed, stats->multicast,
3942 stats->tx_bytes, stats->tx_packets,
3943 stats->tx_errors, stats->tx_dropped,
3944 stats->tx_fifo_errors, stats->collisions,
3945 stats->tx_carrier_errors +
3946 stats->tx_aborted_errors +
3947 stats->tx_window_errors +
3948 stats->tx_heartbeat_errors,
3949 stats->tx_compressed);
1da177e4
LT
3950}
3951
3952/*
3953 * Called from the PROCfs module. This now uses the new arbitrary sized
3954 * /proc/net interface to create /proc/net/dev
3955 */
3956static int dev_seq_show(struct seq_file *seq, void *v)
3957{
3958 if (v == SEQ_START_TOKEN)
3959 seq_puts(seq, "Inter-| Receive "
3960 " | Transmit\n"
3961 " face |bytes packets errs drop fifo frame "
3962 "compressed multicast|bytes packets errs "
3963 "drop fifo colls carrier compressed\n");
3964 else
3965 dev_seq_printf_stats(seq, v);
3966 return 0;
3967}
3968
dee42870 3969static struct softnet_data *softnet_get_online(loff_t *pos)
1da177e4 3970{
dee42870 3971 struct softnet_data *sd = NULL;
1da177e4 3972
0c0b0aca 3973 while (*pos < nr_cpu_ids)
4ec93edb 3974 if (cpu_online(*pos)) {
dee42870 3975 sd = &per_cpu(softnet_data, *pos);
1da177e4
LT
3976 break;
3977 } else
3978 ++*pos;
dee42870 3979 return sd;
1da177e4
LT
3980}
3981
3982static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3983{
3984 return softnet_get_online(pos);
3985}
3986
3987static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3988{
3989 ++*pos;
3990 return softnet_get_online(pos);
3991}
3992
3993static void softnet_seq_stop(struct seq_file *seq, void *v)
3994{
3995}
3996
3997static int softnet_seq_show(struct seq_file *seq, void *v)
3998{
dee42870 3999 struct softnet_data *sd = v;
1da177e4 4000
0a9627f2 4001 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
dee42870 4002 sd->processed, sd->dropped, sd->time_squeeze, 0,
c1ebcdb8 4003 0, 0, 0, 0, /* was fastroute */
dee42870 4004 sd->cpu_collision, sd->received_rps);
1da177e4
LT
4005 return 0;
4006}
4007
f690808e 4008static const struct seq_operations dev_seq_ops = {
1da177e4
LT
4009 .start = dev_seq_start,
4010 .next = dev_seq_next,
4011 .stop = dev_seq_stop,
4012 .show = dev_seq_show,
4013};
4014
4015static int dev_seq_open(struct inode *inode, struct file *file)
4016{
e372c414
DL
4017 return seq_open_net(inode, file, &dev_seq_ops,
4018 sizeof(struct seq_net_private));
1da177e4
LT
4019}
4020
9a32144e 4021static const struct file_operations dev_seq_fops = {
1da177e4
LT
4022 .owner = THIS_MODULE,
4023 .open = dev_seq_open,
4024 .read = seq_read,
4025 .llseek = seq_lseek,
e372c414 4026 .release = seq_release_net,
1da177e4
LT
4027};
4028
f690808e 4029static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
4030 .start = softnet_seq_start,
4031 .next = softnet_seq_next,
4032 .stop = softnet_seq_stop,
4033 .show = softnet_seq_show,
4034};
4035
4036static int softnet_seq_open(struct inode *inode, struct file *file)
4037{
4038 return seq_open(file, &softnet_seq_ops);
4039}
4040
9a32144e 4041static const struct file_operations softnet_seq_fops = {
1da177e4
LT
4042 .owner = THIS_MODULE,
4043 .open = softnet_seq_open,
4044 .read = seq_read,
4045 .llseek = seq_lseek,
4046 .release = seq_release,
4047};
4048
0e1256ff
SH
4049static void *ptype_get_idx(loff_t pos)
4050{
4051 struct packet_type *pt = NULL;
4052 loff_t i = 0;
4053 int t;
4054
4055 list_for_each_entry_rcu(pt, &ptype_all, list) {
4056 if (i == pos)
4057 return pt;
4058 ++i;
4059 }
4060
82d8a867 4061 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
4062 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4063 if (i == pos)
4064 return pt;
4065 ++i;
4066 }
4067 }
4068 return NULL;
4069}
4070
4071static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 4072 __acquires(RCU)
0e1256ff
SH
4073{
4074 rcu_read_lock();
4075 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4076}
4077
4078static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4079{
4080 struct packet_type *pt;
4081 struct list_head *nxt;
4082 int hash;
4083
4084 ++*pos;
4085 if (v == SEQ_START_TOKEN)
4086 return ptype_get_idx(0);
4087
4088 pt = v;
4089 nxt = pt->list.next;
4090 if (pt->type == htons(ETH_P_ALL)) {
4091 if (nxt != &ptype_all)
4092 goto found;
4093 hash = 0;
4094 nxt = ptype_base[0].next;
4095 } else
82d8a867 4096 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
4097
4098 while (nxt == &ptype_base[hash]) {
82d8a867 4099 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
4100 return NULL;
4101 nxt = ptype_base[hash].next;
4102 }
4103found:
4104 return list_entry(nxt, struct packet_type, list);
4105}
4106
4107static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 4108 __releases(RCU)
0e1256ff
SH
4109{
4110 rcu_read_unlock();
4111}
4112
0e1256ff
SH
4113static int ptype_seq_show(struct seq_file *seq, void *v)
4114{
4115 struct packet_type *pt = v;
4116
4117 if (v == SEQ_START_TOKEN)
4118 seq_puts(seq, "Type Device Function\n");
c346dca1 4119 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
4120 if (pt->type == htons(ETH_P_ALL))
4121 seq_puts(seq, "ALL ");
4122 else
4123 seq_printf(seq, "%04x", ntohs(pt->type));
4124
908cd2da
AD
4125 seq_printf(seq, " %-8s %pF\n",
4126 pt->dev ? pt->dev->name : "", pt->func);
0e1256ff
SH
4127 }
4128
4129 return 0;
4130}
4131
4132static const struct seq_operations ptype_seq_ops = {
4133 .start = ptype_seq_start,
4134 .next = ptype_seq_next,
4135 .stop = ptype_seq_stop,
4136 .show = ptype_seq_show,
4137};
4138
4139static int ptype_seq_open(struct inode *inode, struct file *file)
4140{
2feb27db
PE
4141 return seq_open_net(inode, file, &ptype_seq_ops,
4142 sizeof(struct seq_net_private));
0e1256ff
SH
4143}
4144
4145static const struct file_operations ptype_seq_fops = {
4146 .owner = THIS_MODULE,
4147 .open = ptype_seq_open,
4148 .read = seq_read,
4149 .llseek = seq_lseek,
2feb27db 4150 .release = seq_release_net,
0e1256ff
SH
4151};
4152
4153
4665079c 4154static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
4155{
4156 int rc = -ENOMEM;
4157
881d966b 4158 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 4159 goto out;
881d966b 4160 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 4161 goto out_dev;
881d966b 4162 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 4163 goto out_softnet;
0e1256ff 4164
881d966b 4165 if (wext_proc_init(net))
457c4cbc 4166 goto out_ptype;
1da177e4
LT
4167 rc = 0;
4168out:
4169 return rc;
457c4cbc 4170out_ptype:
881d966b 4171 proc_net_remove(net, "ptype");
1da177e4 4172out_softnet:
881d966b 4173 proc_net_remove(net, "softnet_stat");
1da177e4 4174out_dev:
881d966b 4175 proc_net_remove(net, "dev");
1da177e4
LT
4176 goto out;
4177}
881d966b 4178
4665079c 4179static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
4180{
4181 wext_proc_exit(net);
4182
4183 proc_net_remove(net, "ptype");
4184 proc_net_remove(net, "softnet_stat");
4185 proc_net_remove(net, "dev");
4186}
4187
022cbae6 4188static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
4189 .init = dev_proc_net_init,
4190 .exit = dev_proc_net_exit,
4191};
4192
4193static int __init dev_proc_init(void)
4194{
4195 return register_pernet_subsys(&dev_proc_ops);
4196}
1da177e4
LT
4197#else
4198#define dev_proc_init() 0
4199#endif /* CONFIG_PROC_FS */
4200
4201
4202/**
4203 * netdev_set_master - set up master/slave pair
4204 * @slave: slave device
4205 * @master: new master device
4206 *
4207 * Changes the master device of the slave. Pass %NULL to break the
4208 * bonding. The caller must hold the RTNL semaphore. On a failure
4209 * a negative errno code is returned. On success the reference counts
4210 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
4211 * function returns zero.
4212 */
4213int netdev_set_master(struct net_device *slave, struct net_device *master)
4214{
4215 struct net_device *old = slave->master;
4216
4217 ASSERT_RTNL();
4218
4219 if (master) {
4220 if (old)
4221 return -EBUSY;
4222 dev_hold(master);
4223 }
4224
4225 slave->master = master;
4ec93edb 4226
283f2fe8
ED
4227 if (old) {
4228 synchronize_net();
1da177e4 4229 dev_put(old);
283f2fe8 4230 }
1da177e4
LT
4231 if (master)
4232 slave->flags |= IFF_SLAVE;
4233 else
4234 slave->flags &= ~IFF_SLAVE;
4235
4236 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4237 return 0;
4238}
d1b19dff 4239EXPORT_SYMBOL(netdev_set_master);
1da177e4 4240
b6c40d68
PM
4241static void dev_change_rx_flags(struct net_device *dev, int flags)
4242{
d314774c
SH
4243 const struct net_device_ops *ops = dev->netdev_ops;
4244
4245 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4246 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
4247}
4248
dad9b335 4249static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
4250{
4251 unsigned short old_flags = dev->flags;
8192b0c4
DH
4252 uid_t uid;
4253 gid_t gid;
1da177e4 4254
24023451
PM
4255 ASSERT_RTNL();
4256
dad9b335
WC
4257 dev->flags |= IFF_PROMISC;
4258 dev->promiscuity += inc;
4259 if (dev->promiscuity == 0) {
4260 /*
4261 * Avoid overflow.
4262 * If inc causes overflow, untouch promisc and return error.
4263 */
4264 if (inc < 0)
4265 dev->flags &= ~IFF_PROMISC;
4266 else {
4267 dev->promiscuity -= inc;
4268 printk(KERN_WARNING "%s: promiscuity touches roof, "
4269 "set promiscuity failed, promiscuity feature "
4270 "of device might be broken.\n", dev->name);
4271 return -EOVERFLOW;
4272 }
4273 }
52609c0b 4274 if (dev->flags != old_flags) {
1da177e4
LT
4275 printk(KERN_INFO "device %s %s promiscuous mode\n",
4276 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 4277 "left");
8192b0c4
DH
4278 if (audit_enabled) {
4279 current_uid_gid(&uid, &gid);
7759db82
KHK
4280 audit_log(current->audit_context, GFP_ATOMIC,
4281 AUDIT_ANOM_PROMISCUOUS,
4282 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4283 dev->name, (dev->flags & IFF_PROMISC),
4284 (old_flags & IFF_PROMISC),
4285 audit_get_loginuid(current),
8192b0c4 4286 uid, gid,
7759db82 4287 audit_get_sessionid(current));
8192b0c4 4288 }
24023451 4289
b6c40d68 4290 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 4291 }
dad9b335 4292 return 0;
1da177e4
LT
4293}
4294
4417da66
PM
4295/**
4296 * dev_set_promiscuity - update promiscuity count on a device
4297 * @dev: device
4298 * @inc: modifier
4299 *
4300 * Add or remove promiscuity from a device. While the count in the device
4301 * remains above zero the interface remains promiscuous. Once it hits zero
4302 * the device reverts back to normal filtering operation. A negative inc
4303 * value is used to drop promiscuity on the device.
dad9b335 4304 * Return 0 if successful or a negative errno code on error.
4417da66 4305 */
dad9b335 4306int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66
PM
4307{
4308 unsigned short old_flags = dev->flags;
dad9b335 4309 int err;
4417da66 4310
dad9b335 4311 err = __dev_set_promiscuity(dev, inc);
4b5a698e 4312 if (err < 0)
dad9b335 4313 return err;
4417da66
PM
4314 if (dev->flags != old_flags)
4315 dev_set_rx_mode(dev);
dad9b335 4316 return err;
4417da66 4317}
d1b19dff 4318EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 4319
1da177e4
LT
4320/**
4321 * dev_set_allmulti - update allmulti count on a device
4322 * @dev: device
4323 * @inc: modifier
4324 *
4325 * Add or remove reception of all multicast frames to a device. While the
4326 * count in the device remains above zero the interface remains listening
4327 * to all interfaces. Once it hits zero the device reverts back to normal
4328 * filtering operation. A negative @inc value is used to drop the counter
4329 * when releasing a resource needing all multicasts.
dad9b335 4330 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
4331 */
4332
dad9b335 4333int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4
LT
4334{
4335 unsigned short old_flags = dev->flags;
4336
24023451
PM
4337 ASSERT_RTNL();
4338
1da177e4 4339 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
4340 dev->allmulti += inc;
4341 if (dev->allmulti == 0) {
4342 /*
4343 * Avoid overflow.
4344 * If inc causes overflow, untouch allmulti and return error.
4345 */
4346 if (inc < 0)
4347 dev->flags &= ~IFF_ALLMULTI;
4348 else {
4349 dev->allmulti -= inc;
4350 printk(KERN_WARNING "%s: allmulti touches roof, "
4351 "set allmulti failed, allmulti feature of "
4352 "device might be broken.\n", dev->name);
4353 return -EOVERFLOW;
4354 }
4355 }
24023451 4356 if (dev->flags ^ old_flags) {
b6c40d68 4357 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 4358 dev_set_rx_mode(dev);
24023451 4359 }
dad9b335 4360 return 0;
4417da66 4361}
d1b19dff 4362EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
4363
4364/*
4365 * Upload unicast and multicast address lists to device and
4366 * configure RX filtering. When the device doesn't support unicast
53ccaae1 4367 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
4368 * are present.
4369 */
4370void __dev_set_rx_mode(struct net_device *dev)
4371{
d314774c
SH
4372 const struct net_device_ops *ops = dev->netdev_ops;
4373
4417da66
PM
4374 /* dev_open will call this function so the list will stay sane. */
4375 if (!(dev->flags&IFF_UP))
4376 return;
4377
4378 if (!netif_device_present(dev))
40b77c94 4379 return;
4417da66 4380
d314774c
SH
4381 if (ops->ndo_set_rx_mode)
4382 ops->ndo_set_rx_mode(dev);
4417da66
PM
4383 else {
4384 /* Unicast addresses changes may only happen under the rtnl,
4385 * therefore calling __dev_set_promiscuity here is safe.
4386 */
32e7bfc4 4387 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4417da66
PM
4388 __dev_set_promiscuity(dev, 1);
4389 dev->uc_promisc = 1;
32e7bfc4 4390 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4417da66
PM
4391 __dev_set_promiscuity(dev, -1);
4392 dev->uc_promisc = 0;
4393 }
4394
d314774c
SH
4395 if (ops->ndo_set_multicast_list)
4396 ops->ndo_set_multicast_list(dev);
4417da66
PM
4397 }
4398}
4399
4400void dev_set_rx_mode(struct net_device *dev)
4401{
b9e40857 4402 netif_addr_lock_bh(dev);
4417da66 4403 __dev_set_rx_mode(dev);
b9e40857 4404 netif_addr_unlock_bh(dev);
1da177e4
LT
4405}
4406
f0db275a
SH
4407/**
4408 * dev_get_flags - get flags reported to userspace
4409 * @dev: device
4410 *
4411 * Get the combination of flag bits exported through APIs to userspace.
4412 */
1da177e4
LT
4413unsigned dev_get_flags(const struct net_device *dev)
4414{
4415 unsigned flags;
4416
4417 flags = (dev->flags & ~(IFF_PROMISC |
4418 IFF_ALLMULTI |
b00055aa
SR
4419 IFF_RUNNING |
4420 IFF_LOWER_UP |
4421 IFF_DORMANT)) |
1da177e4
LT
4422 (dev->gflags & (IFF_PROMISC |
4423 IFF_ALLMULTI));
4424
b00055aa
SR
4425 if (netif_running(dev)) {
4426 if (netif_oper_up(dev))
4427 flags |= IFF_RUNNING;
4428 if (netif_carrier_ok(dev))
4429 flags |= IFF_LOWER_UP;
4430 if (netif_dormant(dev))
4431 flags |= IFF_DORMANT;
4432 }
1da177e4
LT
4433
4434 return flags;
4435}
d1b19dff 4436EXPORT_SYMBOL(dev_get_flags);
1da177e4 4437
bd380811 4438int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 4439{
1da177e4 4440 int old_flags = dev->flags;
bd380811 4441 int ret;
1da177e4 4442
24023451
PM
4443 ASSERT_RTNL();
4444
1da177e4
LT
4445 /*
4446 * Set the flags on our device.
4447 */
4448
4449 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4450 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4451 IFF_AUTOMEDIA)) |
4452 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4453 IFF_ALLMULTI));
4454
4455 /*
4456 * Load in the correct multicast list now the flags have changed.
4457 */
4458
b6c40d68
PM
4459 if ((old_flags ^ flags) & IFF_MULTICAST)
4460 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 4461
4417da66 4462 dev_set_rx_mode(dev);
1da177e4
LT
4463
4464 /*
4465 * Have we downed the interface. We handle IFF_UP ourselves
4466 * according to user attempts to set it, rather than blindly
4467 * setting it.
4468 */
4469
4470 ret = 0;
4471 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 4472 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
4473
4474 if (!ret)
4417da66 4475 dev_set_rx_mode(dev);
1da177e4
LT
4476 }
4477
1da177e4 4478 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff
ED
4479 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4480
1da177e4
LT
4481 dev->gflags ^= IFF_PROMISC;
4482 dev_set_promiscuity(dev, inc);
4483 }
4484
4485 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4486 is important. Some (broken) drivers set IFF_PROMISC, when
4487 IFF_ALLMULTI is requested not asking us and not reporting.
4488 */
4489 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
4490 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4491
1da177e4
LT
4492 dev->gflags ^= IFF_ALLMULTI;
4493 dev_set_allmulti(dev, inc);
4494 }
4495
bd380811
PM
4496 return ret;
4497}
4498
4499void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4500{
4501 unsigned int changes = dev->flags ^ old_flags;
4502
4503 if (changes & IFF_UP) {
4504 if (dev->flags & IFF_UP)
4505 call_netdevice_notifiers(NETDEV_UP, dev);
4506 else
4507 call_netdevice_notifiers(NETDEV_DOWN, dev);
4508 }
4509
4510 if (dev->flags & IFF_UP &&
4511 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4512 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4513}
4514
4515/**
4516 * dev_change_flags - change device settings
4517 * @dev: device
4518 * @flags: device state flags
4519 *
4520 * Change settings on device based state flags. The flags are
4521 * in the userspace exported format.
4522 */
4523int dev_change_flags(struct net_device *dev, unsigned flags)
4524{
4525 int ret, changes;
4526 int old_flags = dev->flags;
4527
4528 ret = __dev_change_flags(dev, flags);
4529 if (ret < 0)
4530 return ret;
4531
4532 changes = old_flags ^ dev->flags;
7c355f53
TG
4533 if (changes)
4534 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4 4535
bd380811 4536 __dev_notify_flags(dev, old_flags);
1da177e4
LT
4537 return ret;
4538}
d1b19dff 4539EXPORT_SYMBOL(dev_change_flags);
1da177e4 4540
f0db275a
SH
4541/**
4542 * dev_set_mtu - Change maximum transfer unit
4543 * @dev: device
4544 * @new_mtu: new transfer unit
4545 *
4546 * Change the maximum transfer size of the network device.
4547 */
1da177e4
LT
4548int dev_set_mtu(struct net_device *dev, int new_mtu)
4549{
d314774c 4550 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4551 int err;
4552
4553 if (new_mtu == dev->mtu)
4554 return 0;
4555
4556 /* MTU must be positive. */
4557 if (new_mtu < 0)
4558 return -EINVAL;
4559
4560 if (!netif_device_present(dev))
4561 return -ENODEV;
4562
4563 err = 0;
d314774c
SH
4564 if (ops->ndo_change_mtu)
4565 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
4566 else
4567 dev->mtu = new_mtu;
d314774c 4568
1da177e4 4569 if (!err && dev->flags & IFF_UP)
056925ab 4570 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
4571 return err;
4572}
d1b19dff 4573EXPORT_SYMBOL(dev_set_mtu);
1da177e4 4574
f0db275a
SH
4575/**
4576 * dev_set_mac_address - Change Media Access Control Address
4577 * @dev: device
4578 * @sa: new address
4579 *
4580 * Change the hardware (MAC) address of the device
4581 */
1da177e4
LT
4582int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4583{
d314774c 4584 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4585 int err;
4586
d314774c 4587 if (!ops->ndo_set_mac_address)
1da177e4
LT
4588 return -EOPNOTSUPP;
4589 if (sa->sa_family != dev->type)
4590 return -EINVAL;
4591 if (!netif_device_present(dev))
4592 return -ENODEV;
d314774c 4593 err = ops->ndo_set_mac_address(dev, sa);
1da177e4 4594 if (!err)
056925ab 4595 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
4596 return err;
4597}
d1b19dff 4598EXPORT_SYMBOL(dev_set_mac_address);
1da177e4
LT
4599
4600/*
3710becf 4601 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
1da177e4 4602 */
14e3e079 4603static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
4604{
4605 int err;
3710becf 4606 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
1da177e4
LT
4607
4608 if (!dev)
4609 return -ENODEV;
4610
4611 switch (cmd) {
d1b19dff
ED
4612 case SIOCGIFFLAGS: /* Get interface flags */
4613 ifr->ifr_flags = (short) dev_get_flags(dev);
4614 return 0;
1da177e4 4615
d1b19dff
ED
4616 case SIOCGIFMETRIC: /* Get the metric on the interface
4617 (currently unused) */
4618 ifr->ifr_metric = 0;
4619 return 0;
1da177e4 4620
d1b19dff
ED
4621 case SIOCGIFMTU: /* Get the MTU of a device */
4622 ifr->ifr_mtu = dev->mtu;
4623 return 0;
1da177e4 4624
d1b19dff
ED
4625 case SIOCGIFHWADDR:
4626 if (!dev->addr_len)
4627 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4628 else
4629 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4630 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4631 ifr->ifr_hwaddr.sa_family = dev->type;
4632 return 0;
1da177e4 4633
d1b19dff
ED
4634 case SIOCGIFSLAVE:
4635 err = -EINVAL;
4636 break;
14e3e079 4637
d1b19dff
ED
4638 case SIOCGIFMAP:
4639 ifr->ifr_map.mem_start = dev->mem_start;
4640 ifr->ifr_map.mem_end = dev->mem_end;
4641 ifr->ifr_map.base_addr = dev->base_addr;
4642 ifr->ifr_map.irq = dev->irq;
4643 ifr->ifr_map.dma = dev->dma;
4644 ifr->ifr_map.port = dev->if_port;
4645 return 0;
14e3e079 4646
d1b19dff
ED
4647 case SIOCGIFINDEX:
4648 ifr->ifr_ifindex = dev->ifindex;
4649 return 0;
14e3e079 4650
d1b19dff
ED
4651 case SIOCGIFTXQLEN:
4652 ifr->ifr_qlen = dev->tx_queue_len;
4653 return 0;
14e3e079 4654
d1b19dff
ED
4655 default:
4656 /* dev_ioctl() should ensure this case
4657 * is never reached
4658 */
4659 WARN_ON(1);
4660 err = -EINVAL;
4661 break;
14e3e079
JG
4662
4663 }
4664 return err;
4665}
4666
4667/*
4668 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4669 */
4670static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4671{
4672 int err;
4673 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5f2f6da7 4674 const struct net_device_ops *ops;
14e3e079
JG
4675
4676 if (!dev)
4677 return -ENODEV;
4678
5f2f6da7
JP
4679 ops = dev->netdev_ops;
4680
14e3e079 4681 switch (cmd) {
d1b19dff
ED
4682 case SIOCSIFFLAGS: /* Set interface flags */
4683 return dev_change_flags(dev, ifr->ifr_flags);
14e3e079 4684
d1b19dff
ED
4685 case SIOCSIFMETRIC: /* Set the metric on the interface
4686 (currently unused) */
4687 return -EOPNOTSUPP;
14e3e079 4688
d1b19dff
ED
4689 case SIOCSIFMTU: /* Set the MTU of a device */
4690 return dev_set_mtu(dev, ifr->ifr_mtu);
1da177e4 4691
d1b19dff
ED
4692 case SIOCSIFHWADDR:
4693 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
1da177e4 4694
d1b19dff
ED
4695 case SIOCSIFHWBROADCAST:
4696 if (ifr->ifr_hwaddr.sa_family != dev->type)
4697 return -EINVAL;
4698 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4699 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4700 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4701 return 0;
1da177e4 4702
d1b19dff
ED
4703 case SIOCSIFMAP:
4704 if (ops->ndo_set_config) {
1da177e4
LT
4705 if (!netif_device_present(dev))
4706 return -ENODEV;
d1b19dff
ED
4707 return ops->ndo_set_config(dev, &ifr->ifr_map);
4708 }
4709 return -EOPNOTSUPP;
1da177e4 4710
d1b19dff
ED
4711 case SIOCADDMULTI:
4712 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4713 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4714 return -EINVAL;
4715 if (!netif_device_present(dev))
4716 return -ENODEV;
22bedad3 4717 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
d1b19dff
ED
4718
4719 case SIOCDELMULTI:
4720 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4721 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4722 return -EINVAL;
4723 if (!netif_device_present(dev))
4724 return -ENODEV;
22bedad3 4725 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
1da177e4 4726
d1b19dff
ED
4727 case SIOCSIFTXQLEN:
4728 if (ifr->ifr_qlen < 0)
4729 return -EINVAL;
4730 dev->tx_queue_len = ifr->ifr_qlen;
4731 return 0;
1da177e4 4732
d1b19dff
ED
4733 case SIOCSIFNAME:
4734 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4735 return dev_change_name(dev, ifr->ifr_newname);
1da177e4 4736
d1b19dff
ED
4737 /*
4738 * Unknown or private ioctl
4739 */
4740 default:
4741 if ((cmd >= SIOCDEVPRIVATE &&
4742 cmd <= SIOCDEVPRIVATE + 15) ||
4743 cmd == SIOCBONDENSLAVE ||
4744 cmd == SIOCBONDRELEASE ||
4745 cmd == SIOCBONDSETHWADDR ||
4746 cmd == SIOCBONDSLAVEINFOQUERY ||
4747 cmd == SIOCBONDINFOQUERY ||
4748 cmd == SIOCBONDCHANGEACTIVE ||
4749 cmd == SIOCGMIIPHY ||
4750 cmd == SIOCGMIIREG ||
4751 cmd == SIOCSMIIREG ||
4752 cmd == SIOCBRADDIF ||
4753 cmd == SIOCBRDELIF ||
4754 cmd == SIOCSHWTSTAMP ||
4755 cmd == SIOCWANDEV) {
4756 err = -EOPNOTSUPP;
4757 if (ops->ndo_do_ioctl) {
4758 if (netif_device_present(dev))
4759 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4760 else
4761 err = -ENODEV;
4762 }
4763 } else
4764 err = -EINVAL;
1da177e4
LT
4765
4766 }
4767 return err;
4768}
4769
4770/*
4771 * This function handles all "interface"-type I/O control requests. The actual
4772 * 'doing' part of this is dev_ifsioc above.
4773 */
4774
4775/**
4776 * dev_ioctl - network device ioctl
c4ea43c5 4777 * @net: the applicable net namespace
1da177e4
LT
4778 * @cmd: command to issue
4779 * @arg: pointer to a struct ifreq in user space
4780 *
4781 * Issue ioctl functions to devices. This is normally called by the
4782 * user space syscall interfaces but can sometimes be useful for
4783 * other purposes. The return value is the return from the syscall if
4784 * positive or a negative errno code on error.
4785 */
4786
881d966b 4787int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
4788{
4789 struct ifreq ifr;
4790 int ret;
4791 char *colon;
4792
4793 /* One special case: SIOCGIFCONF takes ifconf argument
4794 and requires shared lock, because it sleeps writing
4795 to user space.
4796 */
4797
4798 if (cmd == SIOCGIFCONF) {
6756ae4b 4799 rtnl_lock();
881d966b 4800 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 4801 rtnl_unlock();
1da177e4
LT
4802 return ret;
4803 }
4804 if (cmd == SIOCGIFNAME)
881d966b 4805 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
4806
4807 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4808 return -EFAULT;
4809
4810 ifr.ifr_name[IFNAMSIZ-1] = 0;
4811
4812 colon = strchr(ifr.ifr_name, ':');
4813 if (colon)
4814 *colon = 0;
4815
4816 /*
4817 * See which interface the caller is talking about.
4818 */
4819
4820 switch (cmd) {
d1b19dff
ED
4821 /*
4822 * These ioctl calls:
4823 * - can be done by all.
4824 * - atomic and do not require locking.
4825 * - return a value
4826 */
4827 case SIOCGIFFLAGS:
4828 case SIOCGIFMETRIC:
4829 case SIOCGIFMTU:
4830 case SIOCGIFHWADDR:
4831 case SIOCGIFSLAVE:
4832 case SIOCGIFMAP:
4833 case SIOCGIFINDEX:
4834 case SIOCGIFTXQLEN:
4835 dev_load(net, ifr.ifr_name);
3710becf 4836 rcu_read_lock();
d1b19dff 4837 ret = dev_ifsioc_locked(net, &ifr, cmd);
3710becf 4838 rcu_read_unlock();
d1b19dff
ED
4839 if (!ret) {
4840 if (colon)
4841 *colon = ':';
4842 if (copy_to_user(arg, &ifr,
4843 sizeof(struct ifreq)))
4844 ret = -EFAULT;
4845 }
4846 return ret;
1da177e4 4847
d1b19dff
ED
4848 case SIOCETHTOOL:
4849 dev_load(net, ifr.ifr_name);
4850 rtnl_lock();
4851 ret = dev_ethtool(net, &ifr);
4852 rtnl_unlock();
4853 if (!ret) {
4854 if (colon)
4855 *colon = ':';
4856 if (copy_to_user(arg, &ifr,
4857 sizeof(struct ifreq)))
4858 ret = -EFAULT;
4859 }
4860 return ret;
1da177e4 4861
d1b19dff
ED
4862 /*
4863 * These ioctl calls:
4864 * - require superuser power.
4865 * - require strict serialization.
4866 * - return a value
4867 */
4868 case SIOCGMIIPHY:
4869 case SIOCGMIIREG:
4870 case SIOCSIFNAME:
4871 if (!capable(CAP_NET_ADMIN))
4872 return -EPERM;
4873 dev_load(net, ifr.ifr_name);
4874 rtnl_lock();
4875 ret = dev_ifsioc(net, &ifr, cmd);
4876 rtnl_unlock();
4877 if (!ret) {
4878 if (colon)
4879 *colon = ':';
4880 if (copy_to_user(arg, &ifr,
4881 sizeof(struct ifreq)))
4882 ret = -EFAULT;
4883 }
4884 return ret;
1da177e4 4885
d1b19dff
ED
4886 /*
4887 * These ioctl calls:
4888 * - require superuser power.
4889 * - require strict serialization.
4890 * - do not return a value
4891 */
4892 case SIOCSIFFLAGS:
4893 case SIOCSIFMETRIC:
4894 case SIOCSIFMTU:
4895 case SIOCSIFMAP:
4896 case SIOCSIFHWADDR:
4897 case SIOCSIFSLAVE:
4898 case SIOCADDMULTI:
4899 case SIOCDELMULTI:
4900 case SIOCSIFHWBROADCAST:
4901 case SIOCSIFTXQLEN:
4902 case SIOCSMIIREG:
4903 case SIOCBONDENSLAVE:
4904 case SIOCBONDRELEASE:
4905 case SIOCBONDSETHWADDR:
4906 case SIOCBONDCHANGEACTIVE:
4907 case SIOCBRADDIF:
4908 case SIOCBRDELIF:
4909 case SIOCSHWTSTAMP:
4910 if (!capable(CAP_NET_ADMIN))
4911 return -EPERM;
4912 /* fall through */
4913 case SIOCBONDSLAVEINFOQUERY:
4914 case SIOCBONDINFOQUERY:
4915 dev_load(net, ifr.ifr_name);
4916 rtnl_lock();
4917 ret = dev_ifsioc(net, &ifr, cmd);
4918 rtnl_unlock();
4919 return ret;
4920
4921 case SIOCGIFMEM:
4922 /* Get the per device memory space. We can add this but
4923 * currently do not support it */
4924 case SIOCSIFMEM:
4925 /* Set the per device memory buffer space.
4926 * Not applicable in our case */
4927 case SIOCSIFLINK:
4928 return -EINVAL;
4929
4930 /*
4931 * Unknown or private ioctl.
4932 */
4933 default:
4934 if (cmd == SIOCWANDEV ||
4935 (cmd >= SIOCDEVPRIVATE &&
4936 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 4937 dev_load(net, ifr.ifr_name);
1da177e4 4938 rtnl_lock();
881d966b 4939 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4 4940 rtnl_unlock();
d1b19dff
ED
4941 if (!ret && copy_to_user(arg, &ifr,
4942 sizeof(struct ifreq)))
4943 ret = -EFAULT;
1da177e4 4944 return ret;
d1b19dff
ED
4945 }
4946 /* Take care of Wireless Extensions */
4947 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4948 return wext_handle_ioctl(net, &ifr, cmd, arg);
4949 return -EINVAL;
1da177e4
LT
4950 }
4951}
4952
4953
4954/**
4955 * dev_new_index - allocate an ifindex
c4ea43c5 4956 * @net: the applicable net namespace
1da177e4
LT
4957 *
4958 * Returns a suitable unique value for a new device interface
4959 * number. The caller must hold the rtnl semaphore or the
4960 * dev_base_lock to be sure it remains unique.
4961 */
881d966b 4962static int dev_new_index(struct net *net)
1da177e4
LT
4963{
4964 static int ifindex;
4965 for (;;) {
4966 if (++ifindex <= 0)
4967 ifindex = 1;
881d966b 4968 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
4969 return ifindex;
4970 }
4971}
4972
1da177e4 4973/* Delayed registration/unregisteration */
3b5b34fd 4974static LIST_HEAD(net_todo_list);
1da177e4 4975
6f05f629 4976static void net_set_todo(struct net_device *dev)
1da177e4 4977{
1da177e4 4978 list_add_tail(&dev->todo_list, &net_todo_list);
1da177e4
LT
4979}
4980
9b5e383c 4981static void rollback_registered_many(struct list_head *head)
93ee31f1 4982{
e93737b0 4983 struct net_device *dev, *tmp;
9b5e383c 4984
93ee31f1
DL
4985 BUG_ON(dev_boot_phase);
4986 ASSERT_RTNL();
4987
e93737b0 4988 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 4989 /* Some devices call without registering
e93737b0
KK
4990 * for initialization unwind. Remove those
4991 * devices and proceed with the remaining.
9b5e383c
ED
4992 */
4993 if (dev->reg_state == NETREG_UNINITIALIZED) {
4994 pr_debug("unregister_netdevice: device %s/%p never "
4995 "was registered\n", dev->name, dev);
93ee31f1 4996
9b5e383c 4997 WARN_ON(1);
e93737b0
KK
4998 list_del(&dev->unreg_list);
4999 continue;
9b5e383c 5000 }
93ee31f1 5001
9b5e383c 5002 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 5003 }
93ee31f1 5004
44345724
OP
5005 /* If device is running, close it first. */
5006 dev_close_many(head);
93ee31f1 5007
44345724 5008 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
5009 /* And unlink it from device chain. */
5010 unlist_netdevice(dev);
93ee31f1 5011
9b5e383c
ED
5012 dev->reg_state = NETREG_UNREGISTERING;
5013 }
93ee31f1
DL
5014
5015 synchronize_net();
5016
9b5e383c
ED
5017 list_for_each_entry(dev, head, unreg_list) {
5018 /* Shutdown queueing discipline. */
5019 dev_shutdown(dev);
93ee31f1
DL
5020
5021
9b5e383c
ED
5022 /* Notify protocols, that we are about to destroy
5023 this device. They should clean all the things.
5024 */
5025 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 5026
a2835763
PM
5027 if (!dev->rtnl_link_ops ||
5028 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5029 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5030
9b5e383c
ED
5031 /*
5032 * Flush the unicast and multicast chains
5033 */
a748ee24 5034 dev_uc_flush(dev);
22bedad3 5035 dev_mc_flush(dev);
93ee31f1 5036
9b5e383c
ED
5037 if (dev->netdev_ops->ndo_uninit)
5038 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 5039
9b5e383c
ED
5040 /* Notifier chain MUST detach us from master device. */
5041 WARN_ON(dev->master);
93ee31f1 5042
9b5e383c
ED
5043 /* Remove entries from kobject tree */
5044 netdev_unregister_kobject(dev);
5045 }
93ee31f1 5046
a5ee1551 5047 /* Process any work delayed until the end of the batch */
e5e26d75 5048 dev = list_first_entry(head, struct net_device, unreg_list);
a5ee1551 5049 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
93ee31f1 5050
ef885afb 5051 rcu_barrier();
395264d5 5052
a5ee1551 5053 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
5054 dev_put(dev);
5055}
5056
5057static void rollback_registered(struct net_device *dev)
5058{
5059 LIST_HEAD(single);
5060
5061 list_add(&dev->unreg_list, &single);
5062 rollback_registered_many(&single);
93ee31f1
DL
5063}
5064
b63365a2
HX
5065unsigned long netdev_fix_features(unsigned long features, const char *name)
5066{
5067 /* Fix illegal SG+CSUM combinations. */
5068 if ((features & NETIF_F_SG) &&
5069 !(features & NETIF_F_ALL_CSUM)) {
5070 if (name)
5071 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
5072 "checksum feature.\n", name);
5073 features &= ~NETIF_F_SG;
5074 }
5075
5076 /* TSO requires that SG is present as well. */
5077 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
5078 if (name)
5079 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
5080 "SG feature.\n", name);
5081 features &= ~NETIF_F_TSO;
5082 }
5083
5084 if (features & NETIF_F_UFO) {
79032644
MM
5085 /* maybe split UFO into V4 and V6? */
5086 if (!((features & NETIF_F_GEN_CSUM) ||
5087 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5088 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
b63365a2
HX
5089 if (name)
5090 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
79032644 5091 "since no checksum offload features.\n",
b63365a2
HX
5092 name);
5093 features &= ~NETIF_F_UFO;
5094 }
5095
5096 if (!(features & NETIF_F_SG)) {
5097 if (name)
5098 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
5099 "since no NETIF_F_SG feature.\n", name);
5100 features &= ~NETIF_F_UFO;
5101 }
5102 }
5103
5104 return features;
5105}
5106EXPORT_SYMBOL(netdev_fix_features);
5107
fc4a7489
PM
5108/**
5109 * netif_stacked_transfer_operstate - transfer operstate
5110 * @rootdev: the root or lower level device to transfer state from
5111 * @dev: the device to transfer operstate to
5112 *
5113 * Transfer operational state from root to device. This is normally
5114 * called when a stacking relationship exists between the root
5115 * device and the device(a leaf device).
5116 */
5117void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5118 struct net_device *dev)
5119{
5120 if (rootdev->operstate == IF_OPER_DORMANT)
5121 netif_dormant_on(dev);
5122 else
5123 netif_dormant_off(dev);
5124
5125 if (netif_carrier_ok(rootdev)) {
5126 if (!netif_carrier_ok(dev))
5127 netif_carrier_on(dev);
5128 } else {
5129 if (netif_carrier_ok(dev))
5130 netif_carrier_off(dev);
5131 }
5132}
5133EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5134
bf264145 5135#ifdef CONFIG_RPS
1b4bf461
ED
5136static int netif_alloc_rx_queues(struct net_device *dev)
5137{
1b4bf461 5138 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 5139 struct netdev_rx_queue *rx;
1b4bf461 5140
bd25fa7b 5141 BUG_ON(count < 1);
1b4bf461 5142
bd25fa7b
TH
5143 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5144 if (!rx) {
5145 pr_err("netdev: Unable to allocate %u rx queues.\n", count);
5146 return -ENOMEM;
1b4bf461 5147 }
bd25fa7b
TH
5148 dev->_rx = rx;
5149
bd25fa7b 5150 for (i = 0; i < count; i++)
fe822240 5151 rx[i].dev = dev;
1b4bf461
ED
5152 return 0;
5153}
bf264145 5154#endif
1b4bf461 5155
aa942104
CG
5156static void netdev_init_one_queue(struct net_device *dev,
5157 struct netdev_queue *queue, void *_unused)
5158{
5159 /* Initialize queue lock */
5160 spin_lock_init(&queue->_xmit_lock);
5161 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5162 queue->xmit_lock_owner = -1;
b236da69 5163 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104
CG
5164 queue->dev = dev;
5165}
5166
e6484930
TH
5167static int netif_alloc_netdev_queues(struct net_device *dev)
5168{
5169 unsigned int count = dev->num_tx_queues;
5170 struct netdev_queue *tx;
5171
5172 BUG_ON(count < 1);
5173
5174 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5175 if (!tx) {
5176 pr_err("netdev: Unable to allocate %u tx queues.\n",
5177 count);
5178 return -ENOMEM;
5179 }
5180 dev->_tx = tx;
1d24eb48 5181
e6484930
TH
5182 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5183 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
5184
5185 return 0;
e6484930
TH
5186}
5187
1da177e4
LT
5188/**
5189 * register_netdevice - register a network device
5190 * @dev: device to register
5191 *
5192 * Take a completed network device structure and add it to the kernel
5193 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5194 * chain. 0 is returned on success. A negative errno code is returned
5195 * on a failure to set up the device, or if the name is a duplicate.
5196 *
5197 * Callers must hold the rtnl semaphore. You may want
5198 * register_netdev() instead of this.
5199 *
5200 * BUGS:
5201 * The locking appears insufficient to guarantee two parallel registers
5202 * will not get the same name.
5203 */
5204
5205int register_netdevice(struct net_device *dev)
5206{
1da177e4 5207 int ret;
d314774c 5208 struct net *net = dev_net(dev);
1da177e4
LT
5209
5210 BUG_ON(dev_boot_phase);
5211 ASSERT_RTNL();
5212
b17a7c17
SH
5213 might_sleep();
5214
1da177e4
LT
5215 /* When net_device's are persistent, this will be fatal. */
5216 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 5217 BUG_ON(!net);
1da177e4 5218
f1f28aa3 5219 spin_lock_init(&dev->addr_list_lock);
cf508b12 5220 netdev_set_addr_lockdep_class(dev);
1da177e4 5221
1da177e4
LT
5222 dev->iflink = -1;
5223
5224 /* Init, if this function is available */
d314774c
SH
5225 if (dev->netdev_ops->ndo_init) {
5226 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
5227 if (ret) {
5228 if (ret > 0)
5229 ret = -EIO;
90833aa4 5230 goto out;
1da177e4
LT
5231 }
5232 }
4ec93edb 5233
8ce6cebc 5234 ret = dev_get_valid_name(dev, dev->name, 0);
d9031024 5235 if (ret)
7ce1b0ed 5236 goto err_uninit;
1da177e4 5237
881d966b 5238 dev->ifindex = dev_new_index(net);
1da177e4
LT
5239 if (dev->iflink == -1)
5240 dev->iflink = dev->ifindex;
5241
d212f87b
SH
5242 /* Fix illegal checksum combinations */
5243 if ((dev->features & NETIF_F_HW_CSUM) &&
5244 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5245 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5246 dev->name);
5247 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5248 }
5249
5250 if ((dev->features & NETIF_F_NO_CSUM) &&
5251 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5252 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5253 dev->name);
5254 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5255 }
5256
b63365a2 5257 dev->features = netdev_fix_features(dev->features, dev->name);
1da177e4 5258
e5a4a72d
LB
5259 /* Enable software GSO if SG is supported. */
5260 if (dev->features & NETIF_F_SG)
5261 dev->features |= NETIF_F_GSO;
5262
c5256c51
ED
5263 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5264 * vlan_dev_init() will do the dev->features check, so these features
5265 * are enabled only if supported by underlying device.
16c3ea78 5266 */
c5256c51 5267 dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
16c3ea78 5268
7ffbe3fd
JB
5269 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5270 ret = notifier_to_errno(ret);
5271 if (ret)
5272 goto err_uninit;
5273
8b41d188 5274 ret = netdev_register_kobject(dev);
b17a7c17 5275 if (ret)
7ce1b0ed 5276 goto err_uninit;
b17a7c17
SH
5277 dev->reg_state = NETREG_REGISTERED;
5278
1da177e4
LT
5279 /*
5280 * Default initial state at registry is that the
5281 * device is present.
5282 */
5283
5284 set_bit(__LINK_STATE_PRESENT, &dev->state);
5285
1da177e4 5286 dev_init_scheduler(dev);
1da177e4 5287 dev_hold(dev);
ce286d32 5288 list_netdevice(dev);
1da177e4
LT
5289
5290 /* Notify protocols, that a new device appeared. */
056925ab 5291 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 5292 ret = notifier_to_errno(ret);
93ee31f1
DL
5293 if (ret) {
5294 rollback_registered(dev);
5295 dev->reg_state = NETREG_UNREGISTERED;
5296 }
d90a909e
EB
5297 /*
5298 * Prevent userspace races by waiting until the network
5299 * device is fully setup before sending notifications.
5300 */
a2835763
PM
5301 if (!dev->rtnl_link_ops ||
5302 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5303 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1da177e4
LT
5304
5305out:
5306 return ret;
7ce1b0ed
HX
5307
5308err_uninit:
d314774c
SH
5309 if (dev->netdev_ops->ndo_uninit)
5310 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 5311 goto out;
1da177e4 5312}
d1b19dff 5313EXPORT_SYMBOL(register_netdevice);
1da177e4 5314
937f1ba5
BH
5315/**
5316 * init_dummy_netdev - init a dummy network device for NAPI
5317 * @dev: device to init
5318 *
5319 * This takes a network device structure and initialize the minimum
5320 * amount of fields so it can be used to schedule NAPI polls without
5321 * registering a full blown interface. This is to be used by drivers
5322 * that need to tie several hardware interfaces to a single NAPI
5323 * poll scheduler due to HW limitations.
5324 */
5325int init_dummy_netdev(struct net_device *dev)
5326{
5327 /* Clear everything. Note we don't initialize spinlocks
5328 * are they aren't supposed to be taken by any of the
5329 * NAPI code and this dummy netdev is supposed to be
5330 * only ever used for NAPI polls
5331 */
5332 memset(dev, 0, sizeof(struct net_device));
5333
5334 /* make sure we BUG if trying to hit standard
5335 * register/unregister code path
5336 */
5337 dev->reg_state = NETREG_DUMMY;
5338
937f1ba5
BH
5339 /* NAPI wants this */
5340 INIT_LIST_HEAD(&dev->napi_list);
5341
5342 /* a dummy interface is started by default */
5343 set_bit(__LINK_STATE_PRESENT, &dev->state);
5344 set_bit(__LINK_STATE_START, &dev->state);
5345
29b4433d
ED
5346 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5347 * because users of this 'device' dont need to change
5348 * its refcount.
5349 */
5350
937f1ba5
BH
5351 return 0;
5352}
5353EXPORT_SYMBOL_GPL(init_dummy_netdev);
5354
5355
1da177e4
LT
5356/**
5357 * register_netdev - register a network device
5358 * @dev: device to register
5359 *
5360 * Take a completed network device structure and add it to the kernel
5361 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5362 * chain. 0 is returned on success. A negative errno code is returned
5363 * on a failure to set up the device, or if the name is a duplicate.
5364 *
38b4da38 5365 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5366 * and expands the device name if you passed a format string to
5367 * alloc_netdev.
5368 */
5369int register_netdev(struct net_device *dev)
5370{
5371 int err;
5372
5373 rtnl_lock();
5374
5375 /*
5376 * If the name is a format string the caller wants us to do a
5377 * name allocation.
5378 */
5379 if (strchr(dev->name, '%')) {
5380 err = dev_alloc_name(dev, dev->name);
5381 if (err < 0)
5382 goto out;
5383 }
4ec93edb 5384
1da177e4
LT
5385 err = register_netdevice(dev);
5386out:
5387 rtnl_unlock();
5388 return err;
5389}
5390EXPORT_SYMBOL(register_netdev);
5391
29b4433d
ED
5392int netdev_refcnt_read(const struct net_device *dev)
5393{
5394 int i, refcnt = 0;
5395
5396 for_each_possible_cpu(i)
5397 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5398 return refcnt;
5399}
5400EXPORT_SYMBOL(netdev_refcnt_read);
5401
1da177e4
LT
5402/*
5403 * netdev_wait_allrefs - wait until all references are gone.
5404 *
5405 * This is called when unregistering network devices.
5406 *
5407 * Any protocol or device that holds a reference should register
5408 * for netdevice notification, and cleanup and put back the
5409 * reference if they receive an UNREGISTER event.
5410 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5411 * call dev_put.
1da177e4
LT
5412 */
5413static void netdev_wait_allrefs(struct net_device *dev)
5414{
5415 unsigned long rebroadcast_time, warning_time;
29b4433d 5416 int refcnt;
1da177e4 5417
e014debe
ED
5418 linkwatch_forget_dev(dev);
5419
1da177e4 5420 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
5421 refcnt = netdev_refcnt_read(dev);
5422
5423 while (refcnt != 0) {
1da177e4 5424 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5425 rtnl_lock();
1da177e4
LT
5426
5427 /* Rebroadcast unregister notification */
056925ab 5428 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5429 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
395264d5 5430 * should have already handle it the first time */
1da177e4
LT
5431
5432 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5433 &dev->state)) {
5434 /* We must not have linkwatch events
5435 * pending on unregister. If this
5436 * happens, we simply run the queue
5437 * unscheduled, resulting in a noop
5438 * for this device.
5439 */
5440 linkwatch_run_queue();
5441 }
5442
6756ae4b 5443 __rtnl_unlock();
1da177e4
LT
5444
5445 rebroadcast_time = jiffies;
5446 }
5447
5448 msleep(250);
5449
29b4433d
ED
5450 refcnt = netdev_refcnt_read(dev);
5451
1da177e4
LT
5452 if (time_after(jiffies, warning_time + 10 * HZ)) {
5453 printk(KERN_EMERG "unregister_netdevice: "
5454 "waiting for %s to become free. Usage "
5455 "count = %d\n",
29b4433d 5456 dev->name, refcnt);
1da177e4
LT
5457 warning_time = jiffies;
5458 }
5459 }
5460}
5461
5462/* The sequence is:
5463 *
5464 * rtnl_lock();
5465 * ...
5466 * register_netdevice(x1);
5467 * register_netdevice(x2);
5468 * ...
5469 * unregister_netdevice(y1);
5470 * unregister_netdevice(y2);
5471 * ...
5472 * rtnl_unlock();
5473 * free_netdev(y1);
5474 * free_netdev(y2);
5475 *
58ec3b4d 5476 * We are invoked by rtnl_unlock().
1da177e4 5477 * This allows us to deal with problems:
b17a7c17 5478 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
5479 * without deadlocking with linkwatch via keventd.
5480 * 2) Since we run with the RTNL semaphore not held, we can sleep
5481 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
5482 *
5483 * We must not return until all unregister events added during
5484 * the interval the lock was held have been completed.
1da177e4 5485 */
1da177e4
LT
5486void netdev_run_todo(void)
5487{
626ab0e6 5488 struct list_head list;
1da177e4 5489
1da177e4 5490 /* Snapshot list, allow later requests */
626ab0e6 5491 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
5492
5493 __rtnl_unlock();
626ab0e6 5494
1da177e4
LT
5495 while (!list_empty(&list)) {
5496 struct net_device *dev
e5e26d75 5497 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
5498 list_del(&dev->todo_list);
5499
b17a7c17
SH
5500 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5501 printk(KERN_ERR "network todo '%s' but state %d\n",
5502 dev->name, dev->reg_state);
5503 dump_stack();
5504 continue;
5505 }
1da177e4 5506
b17a7c17 5507 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 5508
152102c7 5509 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 5510
b17a7c17 5511 netdev_wait_allrefs(dev);
1da177e4 5512
b17a7c17 5513 /* paranoia */
29b4433d 5514 BUG_ON(netdev_refcnt_read(dev));
95ae6b22 5515 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
198caeca 5516 WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
547b792c 5517 WARN_ON(dev->dn_ptr);
1da177e4 5518
b17a7c17
SH
5519 if (dev->destructor)
5520 dev->destructor(dev);
9093bbb2
SH
5521
5522 /* Free network device */
5523 kobject_put(&dev->dev.kobj);
1da177e4 5524 }
1da177e4
LT
5525}
5526
d83345ad
ED
5527/**
5528 * dev_txq_stats_fold - fold tx_queues stats
5529 * @dev: device to get statistics from
3cfde79c 5530 * @stats: struct rtnl_link_stats64 to hold results
d83345ad
ED
5531 */
5532void dev_txq_stats_fold(const struct net_device *dev,
3cfde79c 5533 struct rtnl_link_stats64 *stats)
d83345ad 5534{
bd27290a 5535 u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
d83345ad
ED
5536 unsigned int i;
5537 struct netdev_queue *txq;
5538
5539 for (i = 0; i < dev->num_tx_queues; i++) {
5540 txq = netdev_get_tx_queue(dev, i);
bd27290a 5541 spin_lock_bh(&txq->_xmit_lock);
d83345ad
ED
5542 tx_bytes += txq->tx_bytes;
5543 tx_packets += txq->tx_packets;
5544 tx_dropped += txq->tx_dropped;
bd27290a 5545 spin_unlock_bh(&txq->_xmit_lock);
d83345ad
ED
5546 }
5547 if (tx_bytes || tx_packets || tx_dropped) {
5548 stats->tx_bytes = tx_bytes;
5549 stats->tx_packets = tx_packets;
5550 stats->tx_dropped = tx_dropped;
5551 }
5552}
5553EXPORT_SYMBOL(dev_txq_stats_fold);
5554
3cfde79c
BH
5555/* Convert net_device_stats to rtnl_link_stats64. They have the same
5556 * fields in the same order, with only the type differing.
5557 */
5558static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5559 const struct net_device_stats *netdev_stats)
5560{
5561#if BITS_PER_LONG == 64
5562 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5563 memcpy(stats64, netdev_stats, sizeof(*stats64));
5564#else
5565 size_t i, n = sizeof(*stats64) / sizeof(u64);
5566 const unsigned long *src = (const unsigned long *)netdev_stats;
5567 u64 *dst = (u64 *)stats64;
5568
5569 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5570 sizeof(*stats64) / sizeof(u64));
5571 for (i = 0; i < n; i++)
5572 dst[i] = src[i];
5573#endif
5574}
5575
eeda3fd6
SH
5576/**
5577 * dev_get_stats - get network device statistics
5578 * @dev: device to get statistics from
28172739 5579 * @storage: place to store stats
eeda3fd6 5580 *
d7753516
BH
5581 * Get network statistics from device. Return @storage.
5582 * The device driver may provide its own method by setting
5583 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5584 * otherwise the internal statistics structure is used.
eeda3fd6 5585 */
d7753516
BH
5586struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5587 struct rtnl_link_stats64 *storage)
7004bf25 5588{
eeda3fd6
SH
5589 const struct net_device_ops *ops = dev->netdev_ops;
5590
28172739
ED
5591 if (ops->ndo_get_stats64) {
5592 memset(storage, 0, sizeof(*storage));
caf586e5
ED
5593 ops->ndo_get_stats64(dev, storage);
5594 } else if (ops->ndo_get_stats) {
3cfde79c 5595 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
5596 } else {
5597 netdev_stats_to_stats64(storage, &dev->stats);
5598 dev_txq_stats_fold(dev, storage);
28172739 5599 }
caf586e5 5600 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
28172739 5601 return storage;
c45d286e 5602}
eeda3fd6 5603EXPORT_SYMBOL(dev_get_stats);
c45d286e 5604
24824a09 5605struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 5606{
24824a09 5607 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 5608
24824a09
ED
5609#ifdef CONFIG_NET_CLS_ACT
5610 if (queue)
5611 return queue;
5612 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5613 if (!queue)
5614 return NULL;
5615 netdev_init_one_queue(dev, queue, NULL);
24824a09
ED
5616 queue->qdisc = &noop_qdisc;
5617 queue->qdisc_sleeping = &noop_qdisc;
5618 rcu_assign_pointer(dev->ingress_queue, queue);
5619#endif
5620 return queue;
bb949fbd
DM
5621}
5622
1da177e4 5623/**
f25f4e44 5624 * alloc_netdev_mq - allocate network device
1da177e4
LT
5625 * @sizeof_priv: size of private data to allocate space for
5626 * @name: device name format string
5627 * @setup: callback to initialize device
f25f4e44 5628 * @queue_count: the number of subqueues to allocate
1da177e4
LT
5629 *
5630 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
5631 * and performs basic initialization. Also allocates subquue structs
5632 * for each queue on the device at the end of the netdevice.
1da177e4 5633 */
f25f4e44
PWJ
5634struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5635 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4 5636{
1da177e4 5637 struct net_device *dev;
7943986c 5638 size_t alloc_size;
1ce8e7b5 5639 struct net_device *p;
1da177e4 5640
b6fe17d6
SH
5641 BUG_ON(strlen(name) >= sizeof(dev->name));
5642
55513fb4
TH
5643 if (queue_count < 1) {
5644 pr_err("alloc_netdev: Unable to allocate device "
5645 "with zero queues.\n");
5646 return NULL;
5647 }
5648
fd2ea0a7 5649 alloc_size = sizeof(struct net_device);
d1643d24
AD
5650 if (sizeof_priv) {
5651 /* ensure 32-byte alignment of private area */
1ce8e7b5 5652 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
5653 alloc_size += sizeof_priv;
5654 }
5655 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 5656 alloc_size += NETDEV_ALIGN - 1;
1da177e4 5657
31380de9 5658 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 5659 if (!p) {
b6fe17d6 5660 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
5661 return NULL;
5662 }
1da177e4 5663
1ce8e7b5 5664 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 5665 dev->padded = (char *)dev - (char *)p;
ab9c73cc 5666
29b4433d
ED
5667 dev->pcpu_refcnt = alloc_percpu(int);
5668 if (!dev->pcpu_refcnt)
e6484930 5669 goto free_p;
ab9c73cc 5670
ab9c73cc 5671 if (dev_addr_init(dev))
29b4433d 5672 goto free_pcpu;
ab9c73cc 5673
22bedad3 5674 dev_mc_init(dev);
a748ee24 5675 dev_uc_init(dev);
ccffad25 5676
c346dca1 5677 dev_net_set(dev, &init_net);
1da177e4 5678
e8a0464c 5679 dev->num_tx_queues = queue_count;
fd2ea0a7 5680 dev->real_num_tx_queues = queue_count;
ed9af2e8
TH
5681 if (netif_alloc_netdev_queues(dev))
5682 goto free_pcpu;
e8a0464c 5683
df334545 5684#ifdef CONFIG_RPS
0a9627f2 5685 dev->num_rx_queues = queue_count;
62fe0b40 5686 dev->real_num_rx_queues = queue_count;
fe822240
TH
5687 if (netif_alloc_rx_queues(dev))
5688 goto free_pcpu;
df334545 5689#endif
0a9627f2 5690
82cc1a7a 5691 dev->gso_max_size = GSO_MAX_SIZE;
1da177e4 5692
15682bc4
PWJ
5693 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5694 dev->ethtool_ntuple_list.count = 0;
d565b0a1 5695 INIT_LIST_HEAD(&dev->napi_list);
9fdce099 5696 INIT_LIST_HEAD(&dev->unreg_list);
e014debe 5697 INIT_LIST_HEAD(&dev->link_watch_list);
93f154b5 5698 dev->priv_flags = IFF_XMIT_DST_RELEASE;
1da177e4
LT
5699 setup(dev);
5700 strcpy(dev->name, name);
5701 return dev;
ab9c73cc 5702
29b4433d
ED
5703free_pcpu:
5704 free_percpu(dev->pcpu_refcnt);
ed9af2e8 5705 kfree(dev->_tx);
fe822240
TH
5706#ifdef CONFIG_RPS
5707 kfree(dev->_rx);
5708#endif
5709
ab9c73cc
JP
5710free_p:
5711 kfree(p);
5712 return NULL;
1da177e4 5713}
f25f4e44 5714EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
5715
5716/**
5717 * free_netdev - free network device
5718 * @dev: device
5719 *
4ec93edb
YH
5720 * This function does the last stage of destroying an allocated device
5721 * interface. The reference to the device object is released.
1da177e4
LT
5722 * If this is the last reference then it will be freed.
5723 */
5724void free_netdev(struct net_device *dev)
5725{
d565b0a1
HX
5726 struct napi_struct *p, *n;
5727
f3005d7f
DL
5728 release_net(dev_net(dev));
5729
e8a0464c 5730 kfree(dev->_tx);
fe822240
TH
5731#ifdef CONFIG_RPS
5732 kfree(dev->_rx);
5733#endif
e8a0464c 5734
24824a09
ED
5735 kfree(rcu_dereference_raw(dev->ingress_queue));
5736
f001fde5
JP
5737 /* Flush device addresses */
5738 dev_addr_flush(dev);
5739
15682bc4
PWJ
5740 /* Clear ethtool n-tuple list */
5741 ethtool_ntuple_flush(dev);
5742
d565b0a1
HX
5743 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5744 netif_napi_del(p);
5745
29b4433d
ED
5746 free_percpu(dev->pcpu_refcnt);
5747 dev->pcpu_refcnt = NULL;
5748
3041a069 5749 /* Compatibility with error handling in drivers */
1da177e4
LT
5750 if (dev->reg_state == NETREG_UNINITIALIZED) {
5751 kfree((char *)dev - dev->padded);
5752 return;
5753 }
5754
5755 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5756 dev->reg_state = NETREG_RELEASED;
5757
43cb76d9
GKH
5758 /* will free via device release */
5759 put_device(&dev->dev);
1da177e4 5760}
d1b19dff 5761EXPORT_SYMBOL(free_netdev);
4ec93edb 5762
f0db275a
SH
5763/**
5764 * synchronize_net - Synchronize with packet receive processing
5765 *
5766 * Wait for packets currently being received to be done.
5767 * Does not block later packets from starting.
5768 */
4ec93edb 5769void synchronize_net(void)
1da177e4
LT
5770{
5771 might_sleep();
fbd568a3 5772 synchronize_rcu();
1da177e4 5773}
d1b19dff 5774EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
5775
5776/**
44a0873d 5777 * unregister_netdevice_queue - remove device from the kernel
1da177e4 5778 * @dev: device
44a0873d 5779 * @head: list
6ebfbc06 5780 *
1da177e4 5781 * This function shuts down a device interface and removes it
d59b54b1 5782 * from the kernel tables.
44a0873d 5783 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
5784 *
5785 * Callers must hold the rtnl semaphore. You may want
5786 * unregister_netdev() instead of this.
5787 */
5788
44a0873d 5789void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 5790{
a6620712
HX
5791 ASSERT_RTNL();
5792
44a0873d 5793 if (head) {
9fdce099 5794 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
5795 } else {
5796 rollback_registered(dev);
5797 /* Finish processing unregister after unlock */
5798 net_set_todo(dev);
5799 }
1da177e4 5800}
44a0873d 5801EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 5802
9b5e383c
ED
5803/**
5804 * unregister_netdevice_many - unregister many devices
5805 * @head: list of devices
9b5e383c
ED
5806 */
5807void unregister_netdevice_many(struct list_head *head)
5808{
5809 struct net_device *dev;
5810
5811 if (!list_empty(head)) {
5812 rollback_registered_many(head);
5813 list_for_each_entry(dev, head, unreg_list)
5814 net_set_todo(dev);
5815 }
5816}
63c8099d 5817EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 5818
1da177e4
LT
5819/**
5820 * unregister_netdev - remove device from the kernel
5821 * @dev: device
5822 *
5823 * This function shuts down a device interface and removes it
d59b54b1 5824 * from the kernel tables.
1da177e4
LT
5825 *
5826 * This is just a wrapper for unregister_netdevice that takes
5827 * the rtnl semaphore. In general you want to use this and not
5828 * unregister_netdevice.
5829 */
5830void unregister_netdev(struct net_device *dev)
5831{
5832 rtnl_lock();
5833 unregister_netdevice(dev);
5834 rtnl_unlock();
5835}
1da177e4
LT
5836EXPORT_SYMBOL(unregister_netdev);
5837
ce286d32
EB
5838/**
5839 * dev_change_net_namespace - move device to different nethost namespace
5840 * @dev: device
5841 * @net: network namespace
5842 * @pat: If not NULL name pattern to try if the current device name
5843 * is already taken in the destination network namespace.
5844 *
5845 * This function shuts down a device interface and moves it
5846 * to a new network namespace. On success 0 is returned, on
5847 * a failure a netagive errno code is returned.
5848 *
5849 * Callers must hold the rtnl semaphore.
5850 */
5851
5852int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5853{
ce286d32
EB
5854 int err;
5855
5856 ASSERT_RTNL();
5857
5858 /* Don't allow namespace local devices to be moved. */
5859 err = -EINVAL;
5860 if (dev->features & NETIF_F_NETNS_LOCAL)
5861 goto out;
5862
5863 /* Ensure the device has been registrered */
5864 err = -EINVAL;
5865 if (dev->reg_state != NETREG_REGISTERED)
5866 goto out;
5867
5868 /* Get out if there is nothing todo */
5869 err = 0;
878628fb 5870 if (net_eq(dev_net(dev), net))
ce286d32
EB
5871 goto out;
5872
5873 /* Pick the destination device name, and ensure
5874 * we can use it in the destination network namespace.
5875 */
5876 err = -EEXIST;
d9031024 5877 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
5878 /* We get here if we can't use the current device name */
5879 if (!pat)
5880 goto out;
8ce6cebc 5881 if (dev_get_valid_name(dev, pat, 1))
ce286d32
EB
5882 goto out;
5883 }
5884
5885 /*
5886 * And now a mini version of register_netdevice unregister_netdevice.
5887 */
5888
5889 /* If device is running close it first. */
9b772652 5890 dev_close(dev);
ce286d32
EB
5891
5892 /* And unlink it from device chain */
5893 err = -ENODEV;
5894 unlist_netdevice(dev);
5895
5896 synchronize_net();
5897
5898 /* Shutdown queueing discipline. */
5899 dev_shutdown(dev);
5900
5901 /* Notify protocols, that we are about to destroy
5902 this device. They should clean all the things.
3b27e105
DL
5903
5904 Note that dev->reg_state stays at NETREG_REGISTERED.
5905 This is wanted because this way 8021q and macvlan know
5906 the device is just moving and can keep their slaves up.
ce286d32
EB
5907 */
5908 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5909 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
ce286d32
EB
5910
5911 /*
5912 * Flush the unicast and multicast chains
5913 */
a748ee24 5914 dev_uc_flush(dev);
22bedad3 5915 dev_mc_flush(dev);
ce286d32
EB
5916
5917 /* Actually switch the network namespace */
c346dca1 5918 dev_net_set(dev, net);
ce286d32 5919
ce286d32
EB
5920 /* If there is an ifindex conflict assign a new one */
5921 if (__dev_get_by_index(net, dev->ifindex)) {
5922 int iflink = (dev->iflink == dev->ifindex);
5923 dev->ifindex = dev_new_index(net);
5924 if (iflink)
5925 dev->iflink = dev->ifindex;
5926 }
5927
8b41d188 5928 /* Fixup kobjects */
a1b3f594 5929 err = device_rename(&dev->dev, dev->name);
8b41d188 5930 WARN_ON(err);
ce286d32
EB
5931
5932 /* Add the device back in the hashes */
5933 list_netdevice(dev);
5934
5935 /* Notify protocols, that a new device appeared. */
5936 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5937
d90a909e
EB
5938 /*
5939 * Prevent userspace races by waiting until the network
5940 * device is fully setup before sending notifications.
5941 */
5942 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5943
ce286d32
EB
5944 synchronize_net();
5945 err = 0;
5946out:
5947 return err;
5948}
463d0183 5949EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 5950
1da177e4
LT
5951static int dev_cpu_callback(struct notifier_block *nfb,
5952 unsigned long action,
5953 void *ocpu)
5954{
5955 struct sk_buff **list_skb;
1da177e4
LT
5956 struct sk_buff *skb;
5957 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5958 struct softnet_data *sd, *oldsd;
5959
8bb78442 5960 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
5961 return NOTIFY_OK;
5962
5963 local_irq_disable();
5964 cpu = smp_processor_id();
5965 sd = &per_cpu(softnet_data, cpu);
5966 oldsd = &per_cpu(softnet_data, oldcpu);
5967
5968 /* Find end of our completion_queue. */
5969 list_skb = &sd->completion_queue;
5970 while (*list_skb)
5971 list_skb = &(*list_skb)->next;
5972 /* Append completion queue from offline CPU. */
5973 *list_skb = oldsd->completion_queue;
5974 oldsd->completion_queue = NULL;
5975
1da177e4 5976 /* Append output queue from offline CPU. */
a9cbd588
CG
5977 if (oldsd->output_queue) {
5978 *sd->output_queue_tailp = oldsd->output_queue;
5979 sd->output_queue_tailp = oldsd->output_queue_tailp;
5980 oldsd->output_queue = NULL;
5981 oldsd->output_queue_tailp = &oldsd->output_queue;
5982 }
1da177e4
LT
5983
5984 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5985 local_irq_enable();
5986
5987 /* Process offline CPU's input_pkt_queue */
76cc8b13 5988 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
1da177e4 5989 netif_rx(skb);
76cc8b13 5990 input_queue_head_incr(oldsd);
fec5e652 5991 }
76cc8b13 5992 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6e7676c1 5993 netif_rx(skb);
76cc8b13
TH
5994 input_queue_head_incr(oldsd);
5995 }
1da177e4
LT
5996
5997 return NOTIFY_OK;
5998}
1da177e4
LT
5999
6000
7f353bf2 6001/**
b63365a2
HX
6002 * netdev_increment_features - increment feature set by one
6003 * @all: current feature set
6004 * @one: new feature set
6005 * @mask: mask feature set
7f353bf2
HX
6006 *
6007 * Computes a new feature set after adding a device with feature set
b63365a2
HX
6008 * @one to the master device with current feature set @all. Will not
6009 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 6010 */
b63365a2
HX
6011unsigned long netdev_increment_features(unsigned long all, unsigned long one,
6012 unsigned long mask)
6013{
6014 /* If device needs checksumming, downgrade to it. */
d1b19dff 6015 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
b63365a2
HX
6016 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
6017 else if (mask & NETIF_F_ALL_CSUM) {
6018 /* If one device supports v4/v6 checksumming, set for all. */
6019 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
6020 !(all & NETIF_F_GEN_CSUM)) {
6021 all &= ~NETIF_F_ALL_CSUM;
6022 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6023 }
e2a6b852 6024
b63365a2
HX
6025 /* If one device supports hw checksumming, set for all. */
6026 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
6027 all &= ~NETIF_F_ALL_CSUM;
6028 all |= NETIF_F_HW_CSUM;
6029 }
6030 }
7f353bf2 6031
b63365a2 6032 one |= NETIF_F_ALL_CSUM;
7f353bf2 6033
b63365a2 6034 one |= all & NETIF_F_ONE_FOR_ALL;
d9f5950f 6035 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
b63365a2 6036 all |= one & mask & NETIF_F_ONE_FOR_ALL;
7f353bf2
HX
6037
6038 return all;
6039}
b63365a2 6040EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 6041
30d97d35
PE
6042static struct hlist_head *netdev_create_hash(void)
6043{
6044 int i;
6045 struct hlist_head *hash;
6046
6047 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6048 if (hash != NULL)
6049 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6050 INIT_HLIST_HEAD(&hash[i]);
6051
6052 return hash;
6053}
6054
881d966b 6055/* Initialize per network namespace state */
4665079c 6056static int __net_init netdev_init(struct net *net)
881d966b 6057{
881d966b 6058 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 6059
30d97d35
PE
6060 net->dev_name_head = netdev_create_hash();
6061 if (net->dev_name_head == NULL)
6062 goto err_name;
881d966b 6063
30d97d35
PE
6064 net->dev_index_head = netdev_create_hash();
6065 if (net->dev_index_head == NULL)
6066 goto err_idx;
881d966b
EB
6067
6068 return 0;
30d97d35
PE
6069
6070err_idx:
6071 kfree(net->dev_name_head);
6072err_name:
6073 return -ENOMEM;
881d966b
EB
6074}
6075
f0db275a
SH
6076/**
6077 * netdev_drivername - network driver for the device
6078 * @dev: network device
6079 * @buffer: buffer for resulting name
6080 * @len: size of buffer
6081 *
6082 * Determine network driver for device.
6083 */
cf04a4c7 6084char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6579e57b 6085{
cf04a4c7
SH
6086 const struct device_driver *driver;
6087 const struct device *parent;
6579e57b
AV
6088
6089 if (len <= 0 || !buffer)
6090 return buffer;
6091 buffer[0] = 0;
6092
6093 parent = dev->dev.parent;
6094
6095 if (!parent)
6096 return buffer;
6097
6098 driver = parent->driver;
6099 if (driver && driver->name)
6100 strlcpy(buffer, driver->name, len);
6101 return buffer;
6102}
6103
256df2f3
JP
6104static int __netdev_printk(const char *level, const struct net_device *dev,
6105 struct va_format *vaf)
6106{
6107 int r;
6108
6109 if (dev && dev->dev.parent)
6110 r = dev_printk(level, dev->dev.parent, "%s: %pV",
6111 netdev_name(dev), vaf);
6112 else if (dev)
6113 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6114 else
6115 r = printk("%s(NULL net_device): %pV", level, vaf);
6116
6117 return r;
6118}
6119
6120int netdev_printk(const char *level, const struct net_device *dev,
6121 const char *format, ...)
6122{
6123 struct va_format vaf;
6124 va_list args;
6125 int r;
6126
6127 va_start(args, format);
6128
6129 vaf.fmt = format;
6130 vaf.va = &args;
6131
6132 r = __netdev_printk(level, dev, &vaf);
6133 va_end(args);
6134
6135 return r;
6136}
6137EXPORT_SYMBOL(netdev_printk);
6138
6139#define define_netdev_printk_level(func, level) \
6140int func(const struct net_device *dev, const char *fmt, ...) \
6141{ \
6142 int r; \
6143 struct va_format vaf; \
6144 va_list args; \
6145 \
6146 va_start(args, fmt); \
6147 \
6148 vaf.fmt = fmt; \
6149 vaf.va = &args; \
6150 \
6151 r = __netdev_printk(level, dev, &vaf); \
6152 va_end(args); \
6153 \
6154 return r; \
6155} \
6156EXPORT_SYMBOL(func);
6157
6158define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6159define_netdev_printk_level(netdev_alert, KERN_ALERT);
6160define_netdev_printk_level(netdev_crit, KERN_CRIT);
6161define_netdev_printk_level(netdev_err, KERN_ERR);
6162define_netdev_printk_level(netdev_warn, KERN_WARNING);
6163define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6164define_netdev_printk_level(netdev_info, KERN_INFO);
6165
4665079c 6166static void __net_exit netdev_exit(struct net *net)
881d966b
EB
6167{
6168 kfree(net->dev_name_head);
6169 kfree(net->dev_index_head);
6170}
6171
022cbae6 6172static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
6173 .init = netdev_init,
6174 .exit = netdev_exit,
6175};
6176
4665079c 6177static void __net_exit default_device_exit(struct net *net)
ce286d32 6178{
e008b5fc 6179 struct net_device *dev, *aux;
ce286d32 6180 /*
e008b5fc 6181 * Push all migratable network devices back to the
ce286d32
EB
6182 * initial network namespace
6183 */
6184 rtnl_lock();
e008b5fc 6185 for_each_netdev_safe(net, dev, aux) {
ce286d32 6186 int err;
aca51397 6187 char fb_name[IFNAMSIZ];
ce286d32
EB
6188
6189 /* Ignore unmoveable devices (i.e. loopback) */
6190 if (dev->features & NETIF_F_NETNS_LOCAL)
6191 continue;
6192
e008b5fc
EB
6193 /* Leave virtual devices for the generic cleanup */
6194 if (dev->rtnl_link_ops)
6195 continue;
d0c082ce 6196
ce286d32 6197 /* Push remaing network devices to init_net */
aca51397
PE
6198 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6199 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 6200 if (err) {
aca51397 6201 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
ce286d32 6202 __func__, dev->name, err);
aca51397 6203 BUG();
ce286d32
EB
6204 }
6205 }
6206 rtnl_unlock();
6207}
6208
04dc7f6b
EB
6209static void __net_exit default_device_exit_batch(struct list_head *net_list)
6210{
6211 /* At exit all network devices most be removed from a network
6212 * namespace. Do this in the reverse order of registeration.
6213 * Do this across as many network namespaces as possible to
6214 * improve batching efficiency.
6215 */
6216 struct net_device *dev;
6217 struct net *net;
6218 LIST_HEAD(dev_kill_list);
6219
6220 rtnl_lock();
6221 list_for_each_entry(net, net_list, exit_list) {
6222 for_each_netdev_reverse(net, dev) {
6223 if (dev->rtnl_link_ops)
6224 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6225 else
6226 unregister_netdevice_queue(dev, &dev_kill_list);
6227 }
6228 }
6229 unregister_netdevice_many(&dev_kill_list);
6230 rtnl_unlock();
6231}
6232
022cbae6 6233static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 6234 .exit = default_device_exit,
04dc7f6b 6235 .exit_batch = default_device_exit_batch,
ce286d32
EB
6236};
6237
1da177e4
LT
6238/*
6239 * Initialize the DEV module. At boot time this walks the device list and
6240 * unhooks any devices that fail to initialise (normally hardware not
6241 * present) and leaves us with a valid list of present and active devices.
6242 *
6243 */
6244
6245/*
6246 * This is called single threaded during boot, so no need
6247 * to take the rtnl semaphore.
6248 */
6249static int __init net_dev_init(void)
6250{
6251 int i, rc = -ENOMEM;
6252
6253 BUG_ON(!dev_boot_phase);
6254
1da177e4
LT
6255 if (dev_proc_init())
6256 goto out;
6257
8b41d188 6258 if (netdev_kobject_init())
1da177e4
LT
6259 goto out;
6260
6261 INIT_LIST_HEAD(&ptype_all);
82d8a867 6262 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
6263 INIT_LIST_HEAD(&ptype_base[i]);
6264
881d966b
EB
6265 if (register_pernet_subsys(&netdev_net_ops))
6266 goto out;
1da177e4
LT
6267
6268 /*
6269 * Initialise the packet receive queues.
6270 */
6271
6f912042 6272 for_each_possible_cpu(i) {
e36fa2f7 6273 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 6274
dee42870 6275 memset(sd, 0, sizeof(*sd));
e36fa2f7 6276 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 6277 skb_queue_head_init(&sd->process_queue);
e36fa2f7
ED
6278 sd->completion_queue = NULL;
6279 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588
CG
6280 sd->output_queue = NULL;
6281 sd->output_queue_tailp = &sd->output_queue;
df334545 6282#ifdef CONFIG_RPS
e36fa2f7
ED
6283 sd->csd.func = rps_trigger_softirq;
6284 sd->csd.info = sd;
6285 sd->csd.flags = 0;
6286 sd->cpu = i;
1e94d72f 6287#endif
0a9627f2 6288
e36fa2f7
ED
6289 sd->backlog.poll = process_backlog;
6290 sd->backlog.weight = weight_p;
6291 sd->backlog.gro_list = NULL;
6292 sd->backlog.gro_count = 0;
1da177e4
LT
6293 }
6294
1da177e4
LT
6295 dev_boot_phase = 0;
6296
505d4f73
EB
6297 /* The loopback device is special if any other network devices
6298 * is present in a network namespace the loopback device must
6299 * be present. Since we now dynamically allocate and free the
6300 * loopback device ensure this invariant is maintained by
6301 * keeping the loopback device as the first device on the
6302 * list of network devices. Ensuring the loopback devices
6303 * is the first device that appears and the last network device
6304 * that disappears.
6305 */
6306 if (register_pernet_device(&loopback_net_ops))
6307 goto out;
6308
6309 if (register_pernet_device(&default_device_ops))
6310 goto out;
6311
962cf36c
CM
6312 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6313 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
6314
6315 hotcpu_notifier(dev_cpu_callback, 0);
6316 dst_init();
6317 dev_mcast_init();
6318 rc = 0;
6319out:
6320 return rc;
6321}
6322
6323subsys_initcall(net_dev_init);
6324
e88721f8
KK
6325static int __init initialize_hashrnd(void)
6326{
0a9627f2 6327 get_random_bytes(&hashrnd, sizeof(hashrnd));
e88721f8
KK
6328 return 0;
6329}
6330
6331late_initcall_sync(initialize_hashrnd);
6332