]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/core/dev.c
[PKT_SCHED]: Prefix tc actions with act_
[mirror_ubuntu-zesty-kernel.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
78#include <linux/config.h>
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
83#include <linux/string.h>
84#include <linux/mm.h>
85#include <linux/socket.h>
86#include <linux/sockios.h>
87#include <linux/errno.h>
88#include <linux/interrupt.h>
89#include <linux/if_ether.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/notifier.h>
93#include <linux/skbuff.h>
94#include <net/sock.h>
95#include <linux/rtnetlink.h>
96#include <linux/proc_fs.h>
97#include <linux/seq_file.h>
98#include <linux/stat.h>
99#include <linux/if_bridge.h>
100#include <linux/divert.h>
101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
104#include <linux/highmem.h>
105#include <linux/init.h>
106#include <linux/kmod.h>
107#include <linux/module.h>
108#include <linux/kallsyms.h>
109#include <linux/netpoll.h>
110#include <linux/rcupdate.h>
111#include <linux/delay.h>
112#ifdef CONFIG_NET_RADIO
113#include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
114#include <net/iw_handler.h>
115#endif /* CONFIG_NET_RADIO */
116#include <asm/current.h>
117
1da177e4
LT
118/*
119 * The list of packet types we will receive (as opposed to discard)
120 * and the routines to invoke.
121 *
122 * Why 16. Because with 16 the only overlap we get on a hash of the
123 * low nibble of the protocol value is RARP/SNAP/X.25.
124 *
125 * NOTE: That is no longer true with the addition of VLAN tags. Not
126 * sure which should go first, but I bet it won't make much
127 * difference if we are running VLANs. The good news is that
128 * this protocol won't be in the list unless compiled in, so
129 * the average user (w/out VLANs) will not be adversly affected.
130 * --BLG
131 *
132 * 0800 IP
133 * 8100 802.1Q VLAN
134 * 0001 802.3
135 * 0002 AX.25
136 * 0004 802.2
137 * 8035 RARP
138 * 0005 SNAP
139 * 0805 X.25
140 * 0806 ARP
141 * 8137 IPX
142 * 0009 Localtalk
143 * 86DD IPv6
144 */
145
146static DEFINE_SPINLOCK(ptype_lock);
147static struct list_head ptype_base[16]; /* 16 way hashed list */
148static struct list_head ptype_all; /* Taps */
149
1da177e4
LT
150/*
151 * The @dev_base list is protected by @dev_base_lock and the rtln
152 * semaphore.
153 *
154 * Pure readers hold dev_base_lock for reading.
155 *
156 * Writers must hold the rtnl semaphore while they loop through the
157 * dev_base list, and hold dev_base_lock for writing when they do the
158 * actual updates. This allows pure readers to access the list even
159 * while a writer is preparing to update it.
160 *
161 * To put it another way, dev_base_lock is held for writing only to
162 * protect against pure readers; the rtnl semaphore provides the
163 * protection against other writers.
164 *
165 * See, for example usages, register_netdevice() and
166 * unregister_netdevice(), which must be called with the rtnl
167 * semaphore held.
168 */
169struct net_device *dev_base;
170static struct net_device **dev_tail = &dev_base;
171DEFINE_RWLOCK(dev_base_lock);
172
173EXPORT_SYMBOL(dev_base);
174EXPORT_SYMBOL(dev_base_lock);
175
176#define NETDEV_HASHBITS 8
177static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
178static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
179
180static inline struct hlist_head *dev_name_hash(const char *name)
181{
182 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
183 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
184}
185
186static inline struct hlist_head *dev_index_hash(int ifindex)
187{
188 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
189}
190
191/*
192 * Our notifier list
193 */
194
195static struct notifier_block *netdev_chain;
196
197/*
198 * Device drivers call our routines to queue packets here. We empty the
199 * queue in the local softnet handler.
200 */
31aa02c5 201DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
1da177e4
LT
202
203#ifdef CONFIG_SYSFS
204extern int netdev_sysfs_init(void);
205extern int netdev_register_sysfs(struct net_device *);
206extern void netdev_unregister_sysfs(struct net_device *);
207#else
208#define netdev_sysfs_init() (0)
209#define netdev_register_sysfs(dev) (0)
210#define netdev_unregister_sysfs(dev) do { } while(0)
211#endif
212
213
214/*******************************************************************************
215
216 Protocol management and registration routines
217
218*******************************************************************************/
219
220/*
221 * For efficiency
222 */
223
224int netdev_nit;
225
226/*
227 * Add a protocol ID to the list. Now that the input handler is
228 * smarter we can dispense with all the messy stuff that used to be
229 * here.
230 *
231 * BEWARE!!! Protocol handlers, mangling input packets,
232 * MUST BE last in hash buckets and checking protocol handlers
233 * MUST start from promiscuous ptype_all chain in net_bh.
234 * It is true now, do not change it.
235 * Explanation follows: if protocol handler, mangling packet, will
236 * be the first on list, it is not able to sense, that packet
237 * is cloned and should be copied-on-write, so that it will
238 * change it and subsequent readers will get broken packet.
239 * --ANK (980803)
240 */
241
242/**
243 * dev_add_pack - add packet handler
244 * @pt: packet type declaration
245 *
246 * Add a protocol handler to the networking stack. The passed &packet_type
247 * is linked into kernel lists and may not be freed until it has been
248 * removed from the kernel lists.
249 *
250 * This call does not sleep therefore it can not
251 * guarantee all CPU's that are in middle of receiving packets
252 * will see the new packet type (until the next received packet).
253 */
254
255void dev_add_pack(struct packet_type *pt)
256{
257 int hash;
258
259 spin_lock_bh(&ptype_lock);
260 if (pt->type == htons(ETH_P_ALL)) {
261 netdev_nit++;
262 list_add_rcu(&pt->list, &ptype_all);
263 } else {
264 hash = ntohs(pt->type) & 15;
265 list_add_rcu(&pt->list, &ptype_base[hash]);
266 }
267 spin_unlock_bh(&ptype_lock);
268}
269
1da177e4
LT
270/**
271 * __dev_remove_pack - remove packet handler
272 * @pt: packet type declaration
273 *
274 * Remove a protocol handler that was previously added to the kernel
275 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
276 * from the kernel lists and can be freed or reused once this function
277 * returns.
278 *
279 * The packet type might still be in use by receivers
280 * and must not be freed until after all the CPU's have gone
281 * through a quiescent state.
282 */
283void __dev_remove_pack(struct packet_type *pt)
284{
285 struct list_head *head;
286 struct packet_type *pt1;
287
288 spin_lock_bh(&ptype_lock);
289
290 if (pt->type == htons(ETH_P_ALL)) {
291 netdev_nit--;
292 head = &ptype_all;
293 } else
294 head = &ptype_base[ntohs(pt->type) & 15];
295
296 list_for_each_entry(pt1, head, list) {
297 if (pt == pt1) {
298 list_del_rcu(&pt->list);
299 goto out;
300 }
301 }
302
303 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
304out:
305 spin_unlock_bh(&ptype_lock);
306}
307/**
308 * dev_remove_pack - remove packet handler
309 * @pt: packet type declaration
310 *
311 * Remove a protocol handler that was previously added to the kernel
312 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
313 * from the kernel lists and can be freed or reused once this function
314 * returns.
315 *
316 * This call sleeps to guarantee that no CPU is looking at the packet
317 * type after return.
318 */
319void dev_remove_pack(struct packet_type *pt)
320{
321 __dev_remove_pack(pt);
322
323 synchronize_net();
324}
325
326/******************************************************************************
327
328 Device Boot-time Settings Routines
329
330*******************************************************************************/
331
332/* Boot time configuration table */
333static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
334
335/**
336 * netdev_boot_setup_add - add new setup entry
337 * @name: name of the device
338 * @map: configured settings for the device
339 *
340 * Adds new setup entry to the dev_boot_setup list. The function
341 * returns 0 on error and 1 on success. This is a generic routine to
342 * all netdevices.
343 */
344static int netdev_boot_setup_add(char *name, struct ifmap *map)
345{
346 struct netdev_boot_setup *s;
347 int i;
348
349 s = dev_boot_setup;
350 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
351 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
352 memset(s[i].name, 0, sizeof(s[i].name));
353 strcpy(s[i].name, name);
354 memcpy(&s[i].map, map, sizeof(s[i].map));
355 break;
356 }
357 }
358
359 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
360}
361
362/**
363 * netdev_boot_setup_check - check boot time settings
364 * @dev: the netdevice
365 *
366 * Check boot time settings for the device.
367 * The found settings are set for the device to be used
368 * later in the device probing.
369 * Returns 0 if no settings found, 1 if they are.
370 */
371int netdev_boot_setup_check(struct net_device *dev)
372{
373 struct netdev_boot_setup *s = dev_boot_setup;
374 int i;
375
376 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
377 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
378 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
379 dev->irq = s[i].map.irq;
380 dev->base_addr = s[i].map.base_addr;
381 dev->mem_start = s[i].map.mem_start;
382 dev->mem_end = s[i].map.mem_end;
383 return 1;
384 }
385 }
386 return 0;
387}
388
389
390/**
391 * netdev_boot_base - get address from boot time settings
392 * @prefix: prefix for network device
393 * @unit: id for network device
394 *
395 * Check boot time settings for the base address of device.
396 * The found settings are set for the device to be used
397 * later in the device probing.
398 * Returns 0 if no settings found.
399 */
400unsigned long netdev_boot_base(const char *prefix, int unit)
401{
402 const struct netdev_boot_setup *s = dev_boot_setup;
403 char name[IFNAMSIZ];
404 int i;
405
406 sprintf(name, "%s%d", prefix, unit);
407
408 /*
409 * If device already registered then return base of 1
410 * to indicate not to probe for this interface
411 */
412 if (__dev_get_by_name(name))
413 return 1;
414
415 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
416 if (!strcmp(name, s[i].name))
417 return s[i].map.base_addr;
418 return 0;
419}
420
421/*
422 * Saves at boot time configured settings for any netdevice.
423 */
424int __init netdev_boot_setup(char *str)
425{
426 int ints[5];
427 struct ifmap map;
428
429 str = get_options(str, ARRAY_SIZE(ints), ints);
430 if (!str || !*str)
431 return 0;
432
433 /* Save settings */
434 memset(&map, 0, sizeof(map));
435 if (ints[0] > 0)
436 map.irq = ints[1];
437 if (ints[0] > 1)
438 map.base_addr = ints[2];
439 if (ints[0] > 2)
440 map.mem_start = ints[3];
441 if (ints[0] > 3)
442 map.mem_end = ints[4];
443
444 /* Add new entry to the list */
445 return netdev_boot_setup_add(str, &map);
446}
447
448__setup("netdev=", netdev_boot_setup);
449
450/*******************************************************************************
451
452 Device Interface Subroutines
453
454*******************************************************************************/
455
456/**
457 * __dev_get_by_name - find a device by its name
458 * @name: name to find
459 *
460 * Find an interface by name. Must be called under RTNL semaphore
461 * or @dev_base_lock. If the name is found a pointer to the device
462 * is returned. If the name is not found then %NULL is returned. The
463 * reference counters are not incremented so the caller must be
464 * careful with locks.
465 */
466
467struct net_device *__dev_get_by_name(const char *name)
468{
469 struct hlist_node *p;
470
471 hlist_for_each(p, dev_name_hash(name)) {
472 struct net_device *dev
473 = hlist_entry(p, struct net_device, name_hlist);
474 if (!strncmp(dev->name, name, IFNAMSIZ))
475 return dev;
476 }
477 return NULL;
478}
479
480/**
481 * dev_get_by_name - find a device by its name
482 * @name: name to find
483 *
484 * Find an interface by name. This can be called from any
485 * context and does its own locking. The returned handle has
486 * the usage count incremented and the caller must use dev_put() to
487 * release it when it is no longer needed. %NULL is returned if no
488 * matching device is found.
489 */
490
491struct net_device *dev_get_by_name(const char *name)
492{
493 struct net_device *dev;
494
495 read_lock(&dev_base_lock);
496 dev = __dev_get_by_name(name);
497 if (dev)
498 dev_hold(dev);
499 read_unlock(&dev_base_lock);
500 return dev;
501}
502
503/**
504 * __dev_get_by_index - find a device by its ifindex
505 * @ifindex: index of device
506 *
507 * Search for an interface by index. Returns %NULL if the device
508 * is not found or a pointer to the device. The device has not
509 * had its reference counter increased so the caller must be careful
510 * about locking. The caller must hold either the RTNL semaphore
511 * or @dev_base_lock.
512 */
513
514struct net_device *__dev_get_by_index(int ifindex)
515{
516 struct hlist_node *p;
517
518 hlist_for_each(p, dev_index_hash(ifindex)) {
519 struct net_device *dev
520 = hlist_entry(p, struct net_device, index_hlist);
521 if (dev->ifindex == ifindex)
522 return dev;
523 }
524 return NULL;
525}
526
527
528/**
529 * dev_get_by_index - find a device by its ifindex
530 * @ifindex: index of device
531 *
532 * Search for an interface by index. Returns NULL if the device
533 * is not found or a pointer to the device. The device returned has
534 * had a reference added and the pointer is safe until the user calls
535 * dev_put to indicate they have finished with it.
536 */
537
538struct net_device *dev_get_by_index(int ifindex)
539{
540 struct net_device *dev;
541
542 read_lock(&dev_base_lock);
543 dev = __dev_get_by_index(ifindex);
544 if (dev)
545 dev_hold(dev);
546 read_unlock(&dev_base_lock);
547 return dev;
548}
549
550/**
551 * dev_getbyhwaddr - find a device by its hardware address
552 * @type: media type of device
553 * @ha: hardware address
554 *
555 * Search for an interface by MAC address. Returns NULL if the device
556 * is not found or a pointer to the device. The caller must hold the
557 * rtnl semaphore. The returned device has not had its ref count increased
558 * and the caller must therefore be careful about locking
559 *
560 * BUGS:
561 * If the API was consistent this would be __dev_get_by_hwaddr
562 */
563
564struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
565{
566 struct net_device *dev;
567
568 ASSERT_RTNL();
569
570 for (dev = dev_base; dev; dev = dev->next)
571 if (dev->type == type &&
572 !memcmp(dev->dev_addr, ha, dev->addr_len))
573 break;
574 return dev;
575}
576
cf309e3f
JF
577EXPORT_SYMBOL(dev_getbyhwaddr);
578
1da177e4
LT
579struct net_device *dev_getfirstbyhwtype(unsigned short type)
580{
581 struct net_device *dev;
582
583 rtnl_lock();
584 for (dev = dev_base; dev; dev = dev->next) {
585 if (dev->type == type) {
586 dev_hold(dev);
587 break;
588 }
589 }
590 rtnl_unlock();
591 return dev;
592}
593
594EXPORT_SYMBOL(dev_getfirstbyhwtype);
595
596/**
597 * dev_get_by_flags - find any device with given flags
598 * @if_flags: IFF_* values
599 * @mask: bitmask of bits in if_flags to check
600 *
601 * Search for any interface with the given flags. Returns NULL if a device
602 * is not found or a pointer to the device. The device returned has
603 * had a reference added and the pointer is safe until the user calls
604 * dev_put to indicate they have finished with it.
605 */
606
607struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
608{
609 struct net_device *dev;
610
611 read_lock(&dev_base_lock);
612 for (dev = dev_base; dev != NULL; dev = dev->next) {
613 if (((dev->flags ^ if_flags) & mask) == 0) {
614 dev_hold(dev);
615 break;
616 }
617 }
618 read_unlock(&dev_base_lock);
619 return dev;
620}
621
622/**
623 * dev_valid_name - check if name is okay for network device
624 * @name: name string
625 *
626 * Network device names need to be valid file names to
627 * to allow sysfs to work
628 */
c2373ee9 629int dev_valid_name(const char *name)
1da177e4
LT
630{
631 return !(*name == '\0'
632 || !strcmp(name, ".")
633 || !strcmp(name, "..")
634 || strchr(name, '/'));
635}
636
637/**
638 * dev_alloc_name - allocate a name for a device
639 * @dev: device
640 * @name: name format string
641 *
642 * Passed a format string - eg "lt%d" it will try and find a suitable
643 * id. Not efficient for many devices, not called a lot. The caller
644 * must hold the dev_base or rtnl lock while allocating the name and
645 * adding the device in order to avoid duplicates. Returns the number
646 * of the unit assigned or a negative errno code.
647 */
648
649int dev_alloc_name(struct net_device *dev, const char *name)
650{
651 int i = 0;
652 char buf[IFNAMSIZ];
653 const char *p;
654 const int max_netdevices = 8*PAGE_SIZE;
655 long *inuse;
656 struct net_device *d;
657
658 p = strnchr(name, IFNAMSIZ-1, '%');
659 if (p) {
660 /*
661 * Verify the string as this thing may have come from
662 * the user. There must be either one "%d" and no other "%"
663 * characters.
664 */
665 if (p[1] != 'd' || strchr(p + 2, '%'))
666 return -EINVAL;
667
668 /* Use one page as a bit array of possible slots */
669 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
670 if (!inuse)
671 return -ENOMEM;
672
673 for (d = dev_base; d; d = d->next) {
674 if (!sscanf(d->name, name, &i))
675 continue;
676 if (i < 0 || i >= max_netdevices)
677 continue;
678
679 /* avoid cases where sscanf is not exact inverse of printf */
680 snprintf(buf, sizeof(buf), name, i);
681 if (!strncmp(buf, d->name, IFNAMSIZ))
682 set_bit(i, inuse);
683 }
684
685 i = find_first_zero_bit(inuse, max_netdevices);
686 free_page((unsigned long) inuse);
687 }
688
689 snprintf(buf, sizeof(buf), name, i);
690 if (!__dev_get_by_name(buf)) {
691 strlcpy(dev->name, buf, IFNAMSIZ);
692 return i;
693 }
694
695 /* It is possible to run out of possible slots
696 * when the name is long and there isn't enough space left
697 * for the digits, or if all bits are used.
698 */
699 return -ENFILE;
700}
701
702
703/**
704 * dev_change_name - change name of a device
705 * @dev: device
706 * @newname: name (or format string) must be at least IFNAMSIZ
707 *
708 * Change name of a device, can pass format strings "eth%d".
709 * for wildcarding.
710 */
711int dev_change_name(struct net_device *dev, char *newname)
712{
713 int err = 0;
714
715 ASSERT_RTNL();
716
717 if (dev->flags & IFF_UP)
718 return -EBUSY;
719
720 if (!dev_valid_name(newname))
721 return -EINVAL;
722
723 if (strchr(newname, '%')) {
724 err = dev_alloc_name(dev, newname);
725 if (err < 0)
726 return err;
727 strcpy(newname, dev->name);
728 }
729 else if (__dev_get_by_name(newname))
730 return -EEXIST;
731 else
732 strlcpy(dev->name, newname, IFNAMSIZ);
733
734 err = class_device_rename(&dev->class_dev, dev->name);
735 if (!err) {
736 hlist_del(&dev->name_hlist);
737 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
738 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
739 }
740
741 return err;
742}
743
d8a33ac4
SH
744/**
745 * netdev_features_change - device changes fatures
746 * @dev: device to cause notification
747 *
748 * Called to indicate a device has changed features.
749 */
750void netdev_features_change(struct net_device *dev)
751{
752 notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
753}
754EXPORT_SYMBOL(netdev_features_change);
755
1da177e4
LT
756/**
757 * netdev_state_change - device changes state
758 * @dev: device to cause notification
759 *
760 * Called to indicate a device has changed state. This function calls
761 * the notifier chains for netdev_chain and sends a NEWLINK message
762 * to the routing socket.
763 */
764void netdev_state_change(struct net_device *dev)
765{
766 if (dev->flags & IFF_UP) {
767 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
768 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
769 }
770}
771
772/**
773 * dev_load - load a network module
774 * @name: name of interface
775 *
776 * If a network interface is not present and the process has suitable
777 * privileges this function loads the module. If module loading is not
778 * available in this kernel then it becomes a nop.
779 */
780
781void dev_load(const char *name)
782{
783 struct net_device *dev;
784
785 read_lock(&dev_base_lock);
786 dev = __dev_get_by_name(name);
787 read_unlock(&dev_base_lock);
788
789 if (!dev && capable(CAP_SYS_MODULE))
790 request_module("%s", name);
791}
792
793static int default_rebuild_header(struct sk_buff *skb)
794{
795 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
796 skb->dev ? skb->dev->name : "NULL!!!");
797 kfree_skb(skb);
798 return 1;
799}
800
801
802/**
803 * dev_open - prepare an interface for use.
804 * @dev: device to open
805 *
806 * Takes a device from down to up state. The device's private open
807 * function is invoked and then the multicast lists are loaded. Finally
808 * the device is moved into the up state and a %NETDEV_UP message is
809 * sent to the netdev notifier chain.
810 *
811 * Calling this function on an active interface is a nop. On a failure
812 * a negative errno code is returned.
813 */
814int dev_open(struct net_device *dev)
815{
816 int ret = 0;
817
818 /*
819 * Is it already up?
820 */
821
822 if (dev->flags & IFF_UP)
823 return 0;
824
825 /*
826 * Is it even present?
827 */
828 if (!netif_device_present(dev))
829 return -ENODEV;
830
831 /*
832 * Call device private open method
833 */
834 set_bit(__LINK_STATE_START, &dev->state);
835 if (dev->open) {
836 ret = dev->open(dev);
837 if (ret)
838 clear_bit(__LINK_STATE_START, &dev->state);
839 }
840
841 /*
842 * If it went open OK then:
843 */
844
845 if (!ret) {
846 /*
847 * Set the flags.
848 */
849 dev->flags |= IFF_UP;
850
851 /*
852 * Initialize multicasting status
853 */
854 dev_mc_upload(dev);
855
856 /*
857 * Wakeup transmit queue engine
858 */
859 dev_activate(dev);
860
861 /*
862 * ... and announce new interface.
863 */
864 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
865 }
866 return ret;
867}
868
869/**
870 * dev_close - shutdown an interface.
871 * @dev: device to shutdown
872 *
873 * This function moves an active device into down state. A
874 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
875 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
876 * chain.
877 */
878int dev_close(struct net_device *dev)
879{
880 if (!(dev->flags & IFF_UP))
881 return 0;
882
883 /*
884 * Tell people we are going down, so that they can
885 * prepare to death, when device is still operating.
886 */
887 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
888
889 dev_deactivate(dev);
890
891 clear_bit(__LINK_STATE_START, &dev->state);
892
893 /* Synchronize to scheduled poll. We cannot touch poll list,
894 * it can be even on different cpu. So just clear netif_running(),
895 * and wait when poll really will happen. Actually, the best place
896 * for this is inside dev->stop() after device stopped its irq
897 * engine, but this requires more changes in devices. */
898
899 smp_mb__after_clear_bit(); /* Commit netif_running(). */
900 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
901 /* No hurry. */
6192b54b 902 msleep(1);
1da177e4
LT
903 }
904
905 /*
906 * Call the device specific close. This cannot fail.
907 * Only if device is UP
908 *
909 * We allow it to be called even after a DETACH hot-plug
910 * event.
911 */
912 if (dev->stop)
913 dev->stop(dev);
914
915 /*
916 * Device is now down.
917 */
918
919 dev->flags &= ~IFF_UP;
920
921 /*
922 * Tell people we are down
923 */
924 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
925
926 return 0;
927}
928
929
930/*
931 * Device change register/unregister. These are not inline or static
932 * as we export them to the world.
933 */
934
935/**
936 * register_netdevice_notifier - register a network notifier block
937 * @nb: notifier
938 *
939 * Register a notifier to be called when network device events occur.
940 * The notifier passed is linked into the kernel structures and must
941 * not be reused until it has been unregistered. A negative errno code
942 * is returned on a failure.
943 *
944 * When registered all registration and up events are replayed
945 * to the new notifier to allow device to have a race free
946 * view of the network device list.
947 */
948
949int register_netdevice_notifier(struct notifier_block *nb)
950{
951 struct net_device *dev;
952 int err;
953
954 rtnl_lock();
955 err = notifier_chain_register(&netdev_chain, nb);
956 if (!err) {
957 for (dev = dev_base; dev; dev = dev->next) {
958 nb->notifier_call(nb, NETDEV_REGISTER, dev);
959
960 if (dev->flags & IFF_UP)
961 nb->notifier_call(nb, NETDEV_UP, dev);
962 }
963 }
964 rtnl_unlock();
965 return err;
966}
967
968/**
969 * unregister_netdevice_notifier - unregister a network notifier block
970 * @nb: notifier
971 *
972 * Unregister a notifier previously registered by
973 * register_netdevice_notifier(). The notifier is unlinked into the
974 * kernel structures and may then be reused. A negative errno code
975 * is returned on a failure.
976 */
977
978int unregister_netdevice_notifier(struct notifier_block *nb)
979{
980 return notifier_chain_unregister(&netdev_chain, nb);
981}
982
983/**
984 * call_netdevice_notifiers - call all network notifier blocks
985 * @val: value passed unmodified to notifier function
986 * @v: pointer passed unmodified to notifier function
987 *
988 * Call all network notifier blocks. Parameters and return value
989 * are as for notifier_call_chain().
990 */
991
992int call_netdevice_notifiers(unsigned long val, void *v)
993{
994 return notifier_call_chain(&netdev_chain, val, v);
995}
996
997/* When > 0 there are consumers of rx skb time stamps */
998static atomic_t netstamp_needed = ATOMIC_INIT(0);
999
1000void net_enable_timestamp(void)
1001{
1002 atomic_inc(&netstamp_needed);
1003}
1004
1005void net_disable_timestamp(void)
1006{
1007 atomic_dec(&netstamp_needed);
1008}
1009
a61bbcf2
PM
1010void __net_timestamp(struct sk_buff *skb)
1011{
1012 struct timeval tv;
1013
1014 do_gettimeofday(&tv);
1015 skb_set_timestamp(skb, &tv);
1016}
1017EXPORT_SYMBOL(__net_timestamp);
1018
1019static inline void net_timestamp(struct sk_buff *skb)
1da177e4
LT
1020{
1021 if (atomic_read(&netstamp_needed))
a61bbcf2 1022 __net_timestamp(skb);
1da177e4 1023 else {
a61bbcf2
PM
1024 skb->tstamp.off_sec = 0;
1025 skb->tstamp.off_usec = 0;
1da177e4
LT
1026 }
1027}
1028
1029/*
1030 * Support routine. Sends outgoing frames to any network
1031 * taps currently in use.
1032 */
1033
1034void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1035{
1036 struct packet_type *ptype;
a61bbcf2
PM
1037
1038 net_timestamp(skb);
1da177e4
LT
1039
1040 rcu_read_lock();
1041 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1042 /* Never send packets back to the socket
1043 * they originated from - MvS (miquels@drinkel.ow.org)
1044 */
1045 if ((ptype->dev == dev || !ptype->dev) &&
1046 (ptype->af_packet_priv == NULL ||
1047 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1048 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1049 if (!skb2)
1050 break;
1051
1052 /* skb->nh should be correctly
1053 set by sender, so that the second statement is
1054 just protection against buggy protocols.
1055 */
1056 skb2->mac.raw = skb2->data;
1057
1058 if (skb2->nh.raw < skb2->data ||
1059 skb2->nh.raw > skb2->tail) {
1060 if (net_ratelimit())
1061 printk(KERN_CRIT "protocol %04x is "
1062 "buggy, dev %s\n",
1063 skb2->protocol, dev->name);
1064 skb2->nh.raw = skb2->data;
1065 }
1066
1067 skb2->h.raw = skb2->nh.raw;
1068 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1069 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1070 }
1071 }
1072 rcu_read_unlock();
1073}
1074
1075/*
1076 * Invalidate hardware checksum when packet is to be mangled, and
1077 * complete checksum manually on outgoing path.
1078 */
1079int skb_checksum_help(struct sk_buff *skb, int inward)
1080{
1081 unsigned int csum;
1082 int ret = 0, offset = skb->h.raw - skb->data;
1083
1084 if (inward) {
1085 skb->ip_summed = CHECKSUM_NONE;
1086 goto out;
1087 }
1088
1089 if (skb_cloned(skb)) {
1090 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1091 if (ret)
1092 goto out;
1093 }
1094
1095 if (offset > (int)skb->len)
1096 BUG();
1097 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1098
1099 offset = skb->tail - skb->h.raw;
1100 if (offset <= 0)
1101 BUG();
1102 if (skb->csum + 2 > offset)
1103 BUG();
1104
1105 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1106 skb->ip_summed = CHECKSUM_NONE;
1107out:
1108 return ret;
1109}
1110
fb286bb2
HX
1111/* Take action when hardware reception checksum errors are detected. */
1112#ifdef CONFIG_BUG
1113void netdev_rx_csum_fault(struct net_device *dev)
1114{
1115 if (net_ratelimit()) {
246a4212
SH
1116 printk(KERN_ERR "%s: hw csum failure.\n",
1117 dev ? dev->name : "<unknown>");
fb286bb2
HX
1118 dump_stack();
1119 }
1120}
1121EXPORT_SYMBOL(netdev_rx_csum_fault);
1122#endif
1123
1da177e4
LT
1124#ifdef CONFIG_HIGHMEM
1125/* Actually, we should eliminate this check as soon as we know, that:
1126 * 1. IOMMU is present and allows to map all the memory.
1127 * 2. No high memory really exists on this machine.
1128 */
1129
1130static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1131{
1132 int i;
1133
1134 if (dev->features & NETIF_F_HIGHDMA)
1135 return 0;
1136
1137 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1138 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1139 return 1;
1140
1141 return 0;
1142}
1143#else
1144#define illegal_highdma(dev, skb) (0)
1145#endif
1146
1da177e4 1147/* Keep head the same: replace data */
dd0fc66f 1148int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
1da177e4
LT
1149{
1150 unsigned int size;
1151 u8 *data;
1152 long offset;
1153 struct skb_shared_info *ninfo;
1154 int headerlen = skb->data - skb->head;
1155 int expand = (skb->tail + skb->data_len) - skb->end;
1156
1157 if (skb_shared(skb))
1158 BUG();
1159
1160 if (expand <= 0)
1161 expand = 0;
1162
1163 size = skb->end - skb->head + expand;
1164 size = SKB_DATA_ALIGN(size);
1165 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1166 if (!data)
1167 return -ENOMEM;
1168
1169 /* Copy entire thing */
1170 if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1171 BUG();
1172
1173 /* Set up shinfo */
1174 ninfo = (struct skb_shared_info*)(data + size);
1175 atomic_set(&ninfo->dataref, 1);
1176 ninfo->tso_size = skb_shinfo(skb)->tso_size;
1177 ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1178 ninfo->nr_frags = 0;
1179 ninfo->frag_list = NULL;
1180
1181 /* Offset between the two in bytes */
1182 offset = data - skb->head;
1183
1184 /* Free old data. */
1185 skb_release_data(skb);
1186
1187 skb->head = data;
1188 skb->end = data + size;
1189
1190 /* Set up new pointers */
1191 skb->h.raw += offset;
1192 skb->nh.raw += offset;
1193 skb->mac.raw += offset;
1194 skb->tail += offset;
1195 skb->data += offset;
1196
1197 /* We are no longer a clone, even if we were. */
1198 skb->cloned = 0;
1199
1200 skb->tail += skb->data_len;
1201 skb->data_len = 0;
1202 return 0;
1203}
1204
1205#define HARD_TX_LOCK(dev, cpu) { \
1206 if ((dev->features & NETIF_F_LLTX) == 0) { \
1207 spin_lock(&dev->xmit_lock); \
1208 dev->xmit_lock_owner = cpu; \
1209 } \
1210}
1211
1212#define HARD_TX_UNLOCK(dev) { \
1213 if ((dev->features & NETIF_F_LLTX) == 0) { \
1214 dev->xmit_lock_owner = -1; \
1215 spin_unlock(&dev->xmit_lock); \
1216 } \
1217}
1218
1219/**
1220 * dev_queue_xmit - transmit a buffer
1221 * @skb: buffer to transmit
1222 *
1223 * Queue a buffer for transmission to a network device. The caller must
1224 * have set the device and priority and built the buffer before calling
1225 * this function. The function can be called from an interrupt.
1226 *
1227 * A negative errno code is returned on a failure. A success does not
1228 * guarantee the frame will be transmitted as it may be dropped due
1229 * to congestion or traffic shaping.
af191367
BG
1230 *
1231 * -----------------------------------------------------------------------------------
1232 * I notice this method can also return errors from the queue disciplines,
1233 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1234 * be positive.
1235 *
1236 * Regardless of the return value, the skb is consumed, so it is currently
1237 * difficult to retry a send to this method. (You can bump the ref count
1238 * before sending to hold a reference for retry if you are careful.)
1239 *
1240 * When calling this method, interrupts MUST be enabled. This is because
1241 * the BH enable code must have IRQs enabled so that it will not deadlock.
1242 * --BLG
1da177e4
LT
1243 */
1244
1245int dev_queue_xmit(struct sk_buff *skb)
1246{
1247 struct net_device *dev = skb->dev;
1248 struct Qdisc *q;
1249 int rc = -ENOMEM;
1250
1251 if (skb_shinfo(skb)->frag_list &&
1252 !(dev->features & NETIF_F_FRAGLIST) &&
1253 __skb_linearize(skb, GFP_ATOMIC))
1254 goto out_kfree_skb;
1255
1256 /* Fragmented skb is linearized if device does not support SG,
1257 * or if at least one of fragments is in highmem and device
1258 * does not support DMA from it.
1259 */
1260 if (skb_shinfo(skb)->nr_frags &&
1261 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1262 __skb_linearize(skb, GFP_ATOMIC))
1263 goto out_kfree_skb;
1264
1265 /* If packet is not checksummed and device does not support
1266 * checksumming for this protocol, complete checksumming here.
1267 */
1268 if (skb->ip_summed == CHECKSUM_HW &&
1269 (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1270 (!(dev->features & NETIF_F_IP_CSUM) ||
1271 skb->protocol != htons(ETH_P_IP))))
1272 if (skb_checksum_help(skb, 0))
1273 goto out_kfree_skb;
1274
2d7ceece
ED
1275 spin_lock_prefetch(&dev->queue_lock);
1276
1da177e4
LT
1277 /* Disable soft irqs for various locks below. Also
1278 * stops preemption for RCU.
1279 */
1280 local_bh_disable();
1281
1282 /* Updates of qdisc are serialized by queue_lock.
1283 * The struct Qdisc which is pointed to by qdisc is now a
1284 * rcu structure - it may be accessed without acquiring
1285 * a lock (but the structure may be stale.) The freeing of the
1286 * qdisc will be deferred until it's known that there are no
1287 * more references to it.
1288 *
1289 * If the qdisc has an enqueue function, we still need to
1290 * hold the queue_lock before calling it, since queue_lock
1291 * also serializes access to the device queue.
1292 */
1293
1294 q = rcu_dereference(dev->qdisc);
1295#ifdef CONFIG_NET_CLS_ACT
1296 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1297#endif
1298 if (q->enqueue) {
1299 /* Grab device queue */
1300 spin_lock(&dev->queue_lock);
1301
1302 rc = q->enqueue(skb, q);
1303
1304 qdisc_run(dev);
1305
1306 spin_unlock(&dev->queue_lock);
1307 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1308 goto out;
1309 }
1310
1311 /* The device has no queue. Common case for software devices:
1312 loopback, all the sorts of tunnels...
1313
1314 Really, it is unlikely that xmit_lock protection is necessary here.
1315 (f.e. loopback and IP tunnels are clean ignoring statistics
1316 counters.)
1317 However, it is possible, that they rely on protection
1318 made by us here.
1319
1320 Check this and shot the lock. It is not prone from deadlocks.
1321 Either shot noqueue qdisc, it is even simpler 8)
1322 */
1323 if (dev->flags & IFF_UP) {
1324 int cpu = smp_processor_id(); /* ok because BHs are off */
1325
1326 if (dev->xmit_lock_owner != cpu) {
1327
1328 HARD_TX_LOCK(dev, cpu);
1329
1330 if (!netif_queue_stopped(dev)) {
1331 if (netdev_nit)
1332 dev_queue_xmit_nit(skb, dev);
1333
1334 rc = 0;
1335 if (!dev->hard_start_xmit(skb, dev)) {
1336 HARD_TX_UNLOCK(dev);
1337 goto out;
1338 }
1339 }
1340 HARD_TX_UNLOCK(dev);
1341 if (net_ratelimit())
1342 printk(KERN_CRIT "Virtual device %s asks to "
1343 "queue packet!\n", dev->name);
1344 } else {
1345 /* Recursion is detected! It is possible,
1346 * unfortunately */
1347 if (net_ratelimit())
1348 printk(KERN_CRIT "Dead loop on virtual device "
1349 "%s, fix it urgently!\n", dev->name);
1350 }
1351 }
1352
1353 rc = -ENETDOWN;
1354 local_bh_enable();
1355
1356out_kfree_skb:
1357 kfree_skb(skb);
1358 return rc;
1359out:
1360 local_bh_enable();
1361 return rc;
1362}
1363
1364
1365/*=======================================================================
1366 Receiver routines
1367 =======================================================================*/
1368
51b0bded
SH
1369int netdev_max_backlog = 1000;
1370int netdev_budget = 300;
1da177e4 1371int weight_p = 64; /* old backlog weight */
1da177e4
LT
1372
1373DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1374
1375
1da177e4
LT
1376/**
1377 * netif_rx - post buffer to the network code
1378 * @skb: buffer to post
1379 *
1380 * This function receives a packet from a device driver and queues it for
1381 * the upper (protocol) levels to process. It always succeeds. The buffer
1382 * may be dropped during processing for congestion control or by the
1383 * protocol layers.
1384 *
1385 * return values:
1386 * NET_RX_SUCCESS (no congestion)
1387 * NET_RX_CN_LOW (low congestion)
1388 * NET_RX_CN_MOD (moderate congestion)
1389 * NET_RX_CN_HIGH (high congestion)
1390 * NET_RX_DROP (packet was dropped)
1391 *
1392 */
1393
1394int netif_rx(struct sk_buff *skb)
1395{
1da177e4
LT
1396 struct softnet_data *queue;
1397 unsigned long flags;
1398
1399 /* if netpoll wants it, pretend we never saw it */
1400 if (netpoll_rx(skb))
1401 return NET_RX_DROP;
1402
a61bbcf2
PM
1403 if (!skb->tstamp.off_sec)
1404 net_timestamp(skb);
1da177e4
LT
1405
1406 /*
1407 * The code is rearranged so that the path is the most
1408 * short when CPU is congested, but is still operating.
1409 */
1410 local_irq_save(flags);
1da177e4
LT
1411 queue = &__get_cpu_var(softnet_data);
1412
1413 __get_cpu_var(netdev_rx_stat).total++;
1414 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1415 if (queue->input_pkt_queue.qlen) {
1da177e4
LT
1416enqueue:
1417 dev_hold(skb->dev);
1418 __skb_queue_tail(&queue->input_pkt_queue, skb);
1da177e4 1419 local_irq_restore(flags);
34008d8c 1420 return NET_RX_SUCCESS;
1da177e4
LT
1421 }
1422
1da177e4
LT
1423 netif_rx_schedule(&queue->backlog_dev);
1424 goto enqueue;
1425 }
1426
1da177e4
LT
1427 __get_cpu_var(netdev_rx_stat).dropped++;
1428 local_irq_restore(flags);
1429
1430 kfree_skb(skb);
1431 return NET_RX_DROP;
1432}
1433
1434int netif_rx_ni(struct sk_buff *skb)
1435{
1436 int err;
1437
1438 preempt_disable();
1439 err = netif_rx(skb);
1440 if (local_softirq_pending())
1441 do_softirq();
1442 preempt_enable();
1443
1444 return err;
1445}
1446
1447EXPORT_SYMBOL(netif_rx_ni);
1448
f2ccd8fa 1449static inline struct net_device *skb_bond(struct sk_buff *skb)
1da177e4
LT
1450{
1451 struct net_device *dev = skb->dev;
1452
f2ccd8fa 1453 if (dev->master)
1da177e4 1454 skb->dev = dev->master;
f2ccd8fa
DM
1455
1456 return dev;
1da177e4
LT
1457}
1458
1459static void net_tx_action(struct softirq_action *h)
1460{
1461 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1462
1463 if (sd->completion_queue) {
1464 struct sk_buff *clist;
1465
1466 local_irq_disable();
1467 clist = sd->completion_queue;
1468 sd->completion_queue = NULL;
1469 local_irq_enable();
1470
1471 while (clist) {
1472 struct sk_buff *skb = clist;
1473 clist = clist->next;
1474
1475 BUG_TRAP(!atomic_read(&skb->users));
1476 __kfree_skb(skb);
1477 }
1478 }
1479
1480 if (sd->output_queue) {
1481 struct net_device *head;
1482
1483 local_irq_disable();
1484 head = sd->output_queue;
1485 sd->output_queue = NULL;
1486 local_irq_enable();
1487
1488 while (head) {
1489 struct net_device *dev = head;
1490 head = head->next_sched;
1491
1492 smp_mb__before_clear_bit();
1493 clear_bit(__LINK_STATE_SCHED, &dev->state);
1494
1495 if (spin_trylock(&dev->queue_lock)) {
1496 qdisc_run(dev);
1497 spin_unlock(&dev->queue_lock);
1498 } else {
1499 netif_schedule(dev);
1500 }
1501 }
1502 }
1503}
1504
1505static __inline__ int deliver_skb(struct sk_buff *skb,
f2ccd8fa
DM
1506 struct packet_type *pt_prev,
1507 struct net_device *orig_dev)
1da177e4
LT
1508{
1509 atomic_inc(&skb->users);
f2ccd8fa 1510 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
1511}
1512
1513#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1514int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
1515struct net_bridge;
1516struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1517 unsigned char *addr);
1518void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
1519
1520static __inline__ int handle_bridge(struct sk_buff **pskb,
f2ccd8fa
DM
1521 struct packet_type **pt_prev, int *ret,
1522 struct net_device *orig_dev)
1da177e4
LT
1523{
1524 struct net_bridge_port *port;
1525
1526 if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
1527 (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
1528 return 0;
1529
1530 if (*pt_prev) {
f2ccd8fa 1531 *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1da177e4
LT
1532 *pt_prev = NULL;
1533 }
1534
1535 return br_handle_frame_hook(port, pskb);
1536}
1537#else
f2ccd8fa 1538#define handle_bridge(skb, pt_prev, ret, orig_dev) (0)
1da177e4
LT
1539#endif
1540
1541#ifdef CONFIG_NET_CLS_ACT
1542/* TODO: Maybe we should just force sch_ingress to be compiled in
1543 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1544 * a compare and 2 stores extra right now if we dont have it on
1545 * but have CONFIG_NET_CLS_ACT
1546 * NOTE: This doesnt stop any functionality; if you dont have
1547 * the ingress scheduler, you just cant add policies on ingress.
1548 *
1549 */
1550static int ing_filter(struct sk_buff *skb)
1551{
1552 struct Qdisc *q;
1553 struct net_device *dev = skb->dev;
1554 int result = TC_ACT_OK;
1555
1556 if (dev->qdisc_ingress) {
1557 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1558 if (MAX_RED_LOOP < ttl++) {
1559 printk("Redir loop detected Dropping packet (%s->%s)\n",
86e65da9 1560 skb->input_dev->name, skb->dev->name);
1da177e4
LT
1561 return TC_ACT_SHOT;
1562 }
1563
1564 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1565
1566 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
86e65da9 1567
1da177e4
LT
1568 spin_lock(&dev->ingress_lock);
1569 if ((q = dev->qdisc_ingress) != NULL)
1570 result = q->enqueue(skb, q);
1571 spin_unlock(&dev->ingress_lock);
1572
1573 }
1574
1575 return result;
1576}
1577#endif
1578
1579int netif_receive_skb(struct sk_buff *skb)
1580{
1581 struct packet_type *ptype, *pt_prev;
f2ccd8fa 1582 struct net_device *orig_dev;
1da177e4
LT
1583 int ret = NET_RX_DROP;
1584 unsigned short type;
1585
1586 /* if we've gotten here through NAPI, check netpoll */
1587 if (skb->dev->poll && netpoll_rx(skb))
1588 return NET_RX_DROP;
1589
a61bbcf2
PM
1590 if (!skb->tstamp.off_sec)
1591 net_timestamp(skb);
1da177e4 1592
86e65da9
DM
1593 if (!skb->input_dev)
1594 skb->input_dev = skb->dev;
1595
f2ccd8fa 1596 orig_dev = skb_bond(skb);
1da177e4
LT
1597
1598 __get_cpu_var(netdev_rx_stat).total++;
1599
1600 skb->h.raw = skb->nh.raw = skb->data;
1601 skb->mac_len = skb->nh.raw - skb->mac.raw;
1602
1603 pt_prev = NULL;
1604
1605 rcu_read_lock();
1606
1607#ifdef CONFIG_NET_CLS_ACT
1608 if (skb->tc_verd & TC_NCLS) {
1609 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1610 goto ncls;
1611 }
1612#endif
1613
1614 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1615 if (!ptype->dev || ptype->dev == skb->dev) {
1616 if (pt_prev)
f2ccd8fa 1617 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
1618 pt_prev = ptype;
1619 }
1620 }
1621
1622#ifdef CONFIG_NET_CLS_ACT
1623 if (pt_prev) {
f2ccd8fa 1624 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
1625 pt_prev = NULL; /* noone else should process this after*/
1626 } else {
1627 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1628 }
1629
1630 ret = ing_filter(skb);
1631
1632 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
1633 kfree_skb(skb);
1634 goto out;
1635 }
1636
1637 skb->tc_verd = 0;
1638ncls:
1639#endif
1640
1641 handle_diverter(skb);
1642
f2ccd8fa 1643 if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
1da177e4
LT
1644 goto out;
1645
1646 type = skb->protocol;
1647 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1648 if (ptype->type == type &&
1649 (!ptype->dev || ptype->dev == skb->dev)) {
1650 if (pt_prev)
f2ccd8fa 1651 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
1652 pt_prev = ptype;
1653 }
1654 }
1655
1656 if (pt_prev) {
f2ccd8fa 1657 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
1658 } else {
1659 kfree_skb(skb);
1660 /* Jamal, now you will not able to escape explaining
1661 * me how you were going to use this. :-)
1662 */
1663 ret = NET_RX_DROP;
1664 }
1665
1666out:
1667 rcu_read_unlock();
1668 return ret;
1669}
1670
1671static int process_backlog(struct net_device *backlog_dev, int *budget)
1672{
1673 int work = 0;
1674 int quota = min(backlog_dev->quota, *budget);
1675 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1676 unsigned long start_time = jiffies;
1677
e3876605 1678 backlog_dev->weight = weight_p;
1da177e4
LT
1679 for (;;) {
1680 struct sk_buff *skb;
1681 struct net_device *dev;
1682
1683 local_irq_disable();
1684 skb = __skb_dequeue(&queue->input_pkt_queue);
1685 if (!skb)
1686 goto job_done;
1687 local_irq_enable();
1688
1689 dev = skb->dev;
1690
1691 netif_receive_skb(skb);
1692
1693 dev_put(dev);
1694
1695 work++;
1696
1697 if (work >= quota || jiffies - start_time > 1)
1698 break;
1699
1700 }
1701
1702 backlog_dev->quota -= work;
1703 *budget -= work;
1704 return -1;
1705
1706job_done:
1707 backlog_dev->quota -= work;
1708 *budget -= work;
1709
1710 list_del(&backlog_dev->poll_list);
1711 smp_mb__before_clear_bit();
1712 netif_poll_enable(backlog_dev);
1713
1da177e4
LT
1714 local_irq_enable();
1715 return 0;
1716}
1717
1718static void net_rx_action(struct softirq_action *h)
1719{
1720 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1721 unsigned long start_time = jiffies;
51b0bded 1722 int budget = netdev_budget;
53fb95d3
MM
1723 void *have;
1724
1da177e4
LT
1725 local_irq_disable();
1726
1727 while (!list_empty(&queue->poll_list)) {
1728 struct net_device *dev;
1729
1730 if (budget <= 0 || jiffies - start_time > 1)
1731 goto softnet_break;
1732
1733 local_irq_enable();
1734
1735 dev = list_entry(queue->poll_list.next,
1736 struct net_device, poll_list);
53fb95d3 1737 have = netpoll_poll_lock(dev);
1da177e4
LT
1738
1739 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
53fb95d3 1740 netpoll_poll_unlock(have);
1da177e4
LT
1741 local_irq_disable();
1742 list_del(&dev->poll_list);
1743 list_add_tail(&dev->poll_list, &queue->poll_list);
1744 if (dev->quota < 0)
1745 dev->quota += dev->weight;
1746 else
1747 dev->quota = dev->weight;
1748 } else {
53fb95d3 1749 netpoll_poll_unlock(have);
1da177e4
LT
1750 dev_put(dev);
1751 local_irq_disable();
1752 }
1753 }
1754out:
1755 local_irq_enable();
1756 return;
1757
1758softnet_break:
1759 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1760 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1761 goto out;
1762}
1763
1764static gifconf_func_t * gifconf_list [NPROTO];
1765
1766/**
1767 * register_gifconf - register a SIOCGIF handler
1768 * @family: Address family
1769 * @gifconf: Function handler
1770 *
1771 * Register protocol dependent address dumping routines. The handler
1772 * that is passed must not be freed or reused until it has been replaced
1773 * by another handler.
1774 */
1775int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1776{
1777 if (family >= NPROTO)
1778 return -EINVAL;
1779 gifconf_list[family] = gifconf;
1780 return 0;
1781}
1782
1783
1784/*
1785 * Map an interface index to its name (SIOCGIFNAME)
1786 */
1787
1788/*
1789 * We need this ioctl for efficient implementation of the
1790 * if_indextoname() function required by the IPv6 API. Without
1791 * it, we would have to search all the interfaces to find a
1792 * match. --pb
1793 */
1794
1795static int dev_ifname(struct ifreq __user *arg)
1796{
1797 struct net_device *dev;
1798 struct ifreq ifr;
1799
1800 /*
1801 * Fetch the caller's info block.
1802 */
1803
1804 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1805 return -EFAULT;
1806
1807 read_lock(&dev_base_lock);
1808 dev = __dev_get_by_index(ifr.ifr_ifindex);
1809 if (!dev) {
1810 read_unlock(&dev_base_lock);
1811 return -ENODEV;
1812 }
1813
1814 strcpy(ifr.ifr_name, dev->name);
1815 read_unlock(&dev_base_lock);
1816
1817 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1818 return -EFAULT;
1819 return 0;
1820}
1821
1822/*
1823 * Perform a SIOCGIFCONF call. This structure will change
1824 * size eventually, and there is nothing I can do about it.
1825 * Thus we will need a 'compatibility mode'.
1826 */
1827
1828static int dev_ifconf(char __user *arg)
1829{
1830 struct ifconf ifc;
1831 struct net_device *dev;
1832 char __user *pos;
1833 int len;
1834 int total;
1835 int i;
1836
1837 /*
1838 * Fetch the caller's info block.
1839 */
1840
1841 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1842 return -EFAULT;
1843
1844 pos = ifc.ifc_buf;
1845 len = ifc.ifc_len;
1846
1847 /*
1848 * Loop over the interfaces, and write an info block for each.
1849 */
1850
1851 total = 0;
1852 for (dev = dev_base; dev; dev = dev->next) {
1853 for (i = 0; i < NPROTO; i++) {
1854 if (gifconf_list[i]) {
1855 int done;
1856 if (!pos)
1857 done = gifconf_list[i](dev, NULL, 0);
1858 else
1859 done = gifconf_list[i](dev, pos + total,
1860 len - total);
1861 if (done < 0)
1862 return -EFAULT;
1863 total += done;
1864 }
1865 }
1866 }
1867
1868 /*
1869 * All done. Write the updated control block back to the caller.
1870 */
1871 ifc.ifc_len = total;
1872
1873 /*
1874 * Both BSD and Solaris return 0 here, so we do too.
1875 */
1876 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
1877}
1878
1879#ifdef CONFIG_PROC_FS
1880/*
1881 * This is invoked by the /proc filesystem handler to display a device
1882 * in detail.
1883 */
1884static __inline__ struct net_device *dev_get_idx(loff_t pos)
1885{
1886 struct net_device *dev;
1887 loff_t i;
1888
1889 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
1890
1891 return i == pos ? dev : NULL;
1892}
1893
1894void *dev_seq_start(struct seq_file *seq, loff_t *pos)
1895{
1896 read_lock(&dev_base_lock);
1897 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
1898}
1899
1900void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1901{
1902 ++*pos;
1903 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
1904}
1905
1906void dev_seq_stop(struct seq_file *seq, void *v)
1907{
1908 read_unlock(&dev_base_lock);
1909}
1910
1911static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
1912{
1913 if (dev->get_stats) {
1914 struct net_device_stats *stats = dev->get_stats(dev);
1915
1916 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
1917 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
1918 dev->name, stats->rx_bytes, stats->rx_packets,
1919 stats->rx_errors,
1920 stats->rx_dropped + stats->rx_missed_errors,
1921 stats->rx_fifo_errors,
1922 stats->rx_length_errors + stats->rx_over_errors +
1923 stats->rx_crc_errors + stats->rx_frame_errors,
1924 stats->rx_compressed, stats->multicast,
1925 stats->tx_bytes, stats->tx_packets,
1926 stats->tx_errors, stats->tx_dropped,
1927 stats->tx_fifo_errors, stats->collisions,
1928 stats->tx_carrier_errors +
1929 stats->tx_aborted_errors +
1930 stats->tx_window_errors +
1931 stats->tx_heartbeat_errors,
1932 stats->tx_compressed);
1933 } else
1934 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
1935}
1936
1937/*
1938 * Called from the PROCfs module. This now uses the new arbitrary sized
1939 * /proc/net interface to create /proc/net/dev
1940 */
1941static int dev_seq_show(struct seq_file *seq, void *v)
1942{
1943 if (v == SEQ_START_TOKEN)
1944 seq_puts(seq, "Inter-| Receive "
1945 " | Transmit\n"
1946 " face |bytes packets errs drop fifo frame "
1947 "compressed multicast|bytes packets errs "
1948 "drop fifo colls carrier compressed\n");
1949 else
1950 dev_seq_printf_stats(seq, v);
1951 return 0;
1952}
1953
1954static struct netif_rx_stats *softnet_get_online(loff_t *pos)
1955{
1956 struct netif_rx_stats *rc = NULL;
1957
1958 while (*pos < NR_CPUS)
1959 if (cpu_online(*pos)) {
1960 rc = &per_cpu(netdev_rx_stat, *pos);
1961 break;
1962 } else
1963 ++*pos;
1964 return rc;
1965}
1966
1967static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
1968{
1969 return softnet_get_online(pos);
1970}
1971
1972static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1973{
1974 ++*pos;
1975 return softnet_get_online(pos);
1976}
1977
1978static void softnet_seq_stop(struct seq_file *seq, void *v)
1979{
1980}
1981
1982static int softnet_seq_show(struct seq_file *seq, void *v)
1983{
1984 struct netif_rx_stats *s = v;
1985
1986 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
31aa02c5 1987 s->total, s->dropped, s->time_squeeze, 0,
c1ebcdb8
SH
1988 0, 0, 0, 0, /* was fastroute */
1989 s->cpu_collision );
1da177e4
LT
1990 return 0;
1991}
1992
1993static struct seq_operations dev_seq_ops = {
1994 .start = dev_seq_start,
1995 .next = dev_seq_next,
1996 .stop = dev_seq_stop,
1997 .show = dev_seq_show,
1998};
1999
2000static int dev_seq_open(struct inode *inode, struct file *file)
2001{
2002 return seq_open(file, &dev_seq_ops);
2003}
2004
2005static struct file_operations dev_seq_fops = {
2006 .owner = THIS_MODULE,
2007 .open = dev_seq_open,
2008 .read = seq_read,
2009 .llseek = seq_lseek,
2010 .release = seq_release,
2011};
2012
2013static struct seq_operations softnet_seq_ops = {
2014 .start = softnet_seq_start,
2015 .next = softnet_seq_next,
2016 .stop = softnet_seq_stop,
2017 .show = softnet_seq_show,
2018};
2019
2020static int softnet_seq_open(struct inode *inode, struct file *file)
2021{
2022 return seq_open(file, &softnet_seq_ops);
2023}
2024
2025static struct file_operations softnet_seq_fops = {
2026 .owner = THIS_MODULE,
2027 .open = softnet_seq_open,
2028 .read = seq_read,
2029 .llseek = seq_lseek,
2030 .release = seq_release,
2031};
2032
2033#ifdef WIRELESS_EXT
2034extern int wireless_proc_init(void);
2035#else
2036#define wireless_proc_init() 0
2037#endif
2038
2039static int __init dev_proc_init(void)
2040{
2041 int rc = -ENOMEM;
2042
2043 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2044 goto out;
2045 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2046 goto out_dev;
2047 if (wireless_proc_init())
2048 goto out_softnet;
2049 rc = 0;
2050out:
2051 return rc;
2052out_softnet:
2053 proc_net_remove("softnet_stat");
2054out_dev:
2055 proc_net_remove("dev");
2056 goto out;
2057}
2058#else
2059#define dev_proc_init() 0
2060#endif /* CONFIG_PROC_FS */
2061
2062
2063/**
2064 * netdev_set_master - set up master/slave pair
2065 * @slave: slave device
2066 * @master: new master device
2067 *
2068 * Changes the master device of the slave. Pass %NULL to break the
2069 * bonding. The caller must hold the RTNL semaphore. On a failure
2070 * a negative errno code is returned. On success the reference counts
2071 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2072 * function returns zero.
2073 */
2074int netdev_set_master(struct net_device *slave, struct net_device *master)
2075{
2076 struct net_device *old = slave->master;
2077
2078 ASSERT_RTNL();
2079
2080 if (master) {
2081 if (old)
2082 return -EBUSY;
2083 dev_hold(master);
2084 }
2085
2086 slave->master = master;
2087
2088 synchronize_net();
2089
2090 if (old)
2091 dev_put(old);
2092
2093 if (master)
2094 slave->flags |= IFF_SLAVE;
2095 else
2096 slave->flags &= ~IFF_SLAVE;
2097
2098 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2099 return 0;
2100}
2101
2102/**
2103 * dev_set_promiscuity - update promiscuity count on a device
2104 * @dev: device
2105 * @inc: modifier
2106 *
2107 * Add or remove promsicuity from a device. While the count in the device
2108 * remains above zero the interface remains promiscuous. Once it hits zero
2109 * the device reverts back to normal filtering operation. A negative inc
2110 * value is used to drop promiscuity on the device.
2111 */
2112void dev_set_promiscuity(struct net_device *dev, int inc)
2113{
2114 unsigned short old_flags = dev->flags;
2115
1da177e4
LT
2116 if ((dev->promiscuity += inc) == 0)
2117 dev->flags &= ~IFF_PROMISC;
52609c0b
DC
2118 else
2119 dev->flags |= IFF_PROMISC;
2120 if (dev->flags != old_flags) {
1da177e4
LT
2121 dev_mc_upload(dev);
2122 printk(KERN_INFO "device %s %s promiscuous mode\n",
2123 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2124 "left");
2125 }
2126}
2127
2128/**
2129 * dev_set_allmulti - update allmulti count on a device
2130 * @dev: device
2131 * @inc: modifier
2132 *
2133 * Add or remove reception of all multicast frames to a device. While the
2134 * count in the device remains above zero the interface remains listening
2135 * to all interfaces. Once it hits zero the device reverts back to normal
2136 * filtering operation. A negative @inc value is used to drop the counter
2137 * when releasing a resource needing all multicasts.
2138 */
2139
2140void dev_set_allmulti(struct net_device *dev, int inc)
2141{
2142 unsigned short old_flags = dev->flags;
2143
2144 dev->flags |= IFF_ALLMULTI;
2145 if ((dev->allmulti += inc) == 0)
2146 dev->flags &= ~IFF_ALLMULTI;
2147 if (dev->flags ^ old_flags)
2148 dev_mc_upload(dev);
2149}
2150
2151unsigned dev_get_flags(const struct net_device *dev)
2152{
2153 unsigned flags;
2154
2155 flags = (dev->flags & ~(IFF_PROMISC |
2156 IFF_ALLMULTI |
2157 IFF_RUNNING)) |
2158 (dev->gflags & (IFF_PROMISC |
2159 IFF_ALLMULTI));
2160
2161 if (netif_running(dev) && netif_carrier_ok(dev))
2162 flags |= IFF_RUNNING;
2163
2164 return flags;
2165}
2166
2167int dev_change_flags(struct net_device *dev, unsigned flags)
2168{
2169 int ret;
2170 int old_flags = dev->flags;
2171
2172 /*
2173 * Set the flags on our device.
2174 */
2175
2176 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2177 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2178 IFF_AUTOMEDIA)) |
2179 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2180 IFF_ALLMULTI));
2181
2182 /*
2183 * Load in the correct multicast list now the flags have changed.
2184 */
2185
2186 dev_mc_upload(dev);
2187
2188 /*
2189 * Have we downed the interface. We handle IFF_UP ourselves
2190 * according to user attempts to set it, rather than blindly
2191 * setting it.
2192 */
2193
2194 ret = 0;
2195 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2196 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2197
2198 if (!ret)
2199 dev_mc_upload(dev);
2200 }
2201
2202 if (dev->flags & IFF_UP &&
2203 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2204 IFF_VOLATILE)))
2205 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2206
2207 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2208 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2209 dev->gflags ^= IFF_PROMISC;
2210 dev_set_promiscuity(dev, inc);
2211 }
2212
2213 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2214 is important. Some (broken) drivers set IFF_PROMISC, when
2215 IFF_ALLMULTI is requested not asking us and not reporting.
2216 */
2217 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2218 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2219 dev->gflags ^= IFF_ALLMULTI;
2220 dev_set_allmulti(dev, inc);
2221 }
2222
2223 if (old_flags ^ dev->flags)
2224 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2225
2226 return ret;
2227}
2228
2229int dev_set_mtu(struct net_device *dev, int new_mtu)
2230{
2231 int err;
2232
2233 if (new_mtu == dev->mtu)
2234 return 0;
2235
2236 /* MTU must be positive. */
2237 if (new_mtu < 0)
2238 return -EINVAL;
2239
2240 if (!netif_device_present(dev))
2241 return -ENODEV;
2242
2243 err = 0;
2244 if (dev->change_mtu)
2245 err = dev->change_mtu(dev, new_mtu);
2246 else
2247 dev->mtu = new_mtu;
2248 if (!err && dev->flags & IFF_UP)
2249 notifier_call_chain(&netdev_chain,
2250 NETDEV_CHANGEMTU, dev);
2251 return err;
2252}
2253
2254int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2255{
2256 int err;
2257
2258 if (!dev->set_mac_address)
2259 return -EOPNOTSUPP;
2260 if (sa->sa_family != dev->type)
2261 return -EINVAL;
2262 if (!netif_device_present(dev))
2263 return -ENODEV;
2264 err = dev->set_mac_address(dev, sa);
2265 if (!err)
2266 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
2267 return err;
2268}
2269
2270/*
2271 * Perform the SIOCxIFxxx calls.
2272 */
2273static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2274{
2275 int err;
2276 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2277
2278 if (!dev)
2279 return -ENODEV;
2280
2281 switch (cmd) {
2282 case SIOCGIFFLAGS: /* Get interface flags */
2283 ifr->ifr_flags = dev_get_flags(dev);
2284 return 0;
2285
2286 case SIOCSIFFLAGS: /* Set interface flags */
2287 return dev_change_flags(dev, ifr->ifr_flags);
2288
2289 case SIOCGIFMETRIC: /* Get the metric on the interface
2290 (currently unused) */
2291 ifr->ifr_metric = 0;
2292 return 0;
2293
2294 case SIOCSIFMETRIC: /* Set the metric on the interface
2295 (currently unused) */
2296 return -EOPNOTSUPP;
2297
2298 case SIOCGIFMTU: /* Get the MTU of a device */
2299 ifr->ifr_mtu = dev->mtu;
2300 return 0;
2301
2302 case SIOCSIFMTU: /* Set the MTU of a device */
2303 return dev_set_mtu(dev, ifr->ifr_mtu);
2304
2305 case SIOCGIFHWADDR:
2306 if (!dev->addr_len)
2307 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
2308 else
2309 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2310 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2311 ifr->ifr_hwaddr.sa_family = dev->type;
2312 return 0;
2313
2314 case SIOCSIFHWADDR:
2315 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
2316
2317 case SIOCSIFHWBROADCAST:
2318 if (ifr->ifr_hwaddr.sa_family != dev->type)
2319 return -EINVAL;
2320 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2321 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2322 notifier_call_chain(&netdev_chain,
2323 NETDEV_CHANGEADDR, dev);
2324 return 0;
2325
2326 case SIOCGIFMAP:
2327 ifr->ifr_map.mem_start = dev->mem_start;
2328 ifr->ifr_map.mem_end = dev->mem_end;
2329 ifr->ifr_map.base_addr = dev->base_addr;
2330 ifr->ifr_map.irq = dev->irq;
2331 ifr->ifr_map.dma = dev->dma;
2332 ifr->ifr_map.port = dev->if_port;
2333 return 0;
2334
2335 case SIOCSIFMAP:
2336 if (dev->set_config) {
2337 if (!netif_device_present(dev))
2338 return -ENODEV;
2339 return dev->set_config(dev, &ifr->ifr_map);
2340 }
2341 return -EOPNOTSUPP;
2342
2343 case SIOCADDMULTI:
2344 if (!dev->set_multicast_list ||
2345 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2346 return -EINVAL;
2347 if (!netif_device_present(dev))
2348 return -ENODEV;
2349 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2350 dev->addr_len, 1);
2351
2352 case SIOCDELMULTI:
2353 if (!dev->set_multicast_list ||
2354 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2355 return -EINVAL;
2356 if (!netif_device_present(dev))
2357 return -ENODEV;
2358 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2359 dev->addr_len, 1);
2360
2361 case SIOCGIFINDEX:
2362 ifr->ifr_ifindex = dev->ifindex;
2363 return 0;
2364
2365 case SIOCGIFTXQLEN:
2366 ifr->ifr_qlen = dev->tx_queue_len;
2367 return 0;
2368
2369 case SIOCSIFTXQLEN:
2370 if (ifr->ifr_qlen < 0)
2371 return -EINVAL;
2372 dev->tx_queue_len = ifr->ifr_qlen;
2373 return 0;
2374
2375 case SIOCSIFNAME:
2376 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2377 return dev_change_name(dev, ifr->ifr_newname);
2378
2379 /*
2380 * Unknown or private ioctl
2381 */
2382
2383 default:
2384 if ((cmd >= SIOCDEVPRIVATE &&
2385 cmd <= SIOCDEVPRIVATE + 15) ||
2386 cmd == SIOCBONDENSLAVE ||
2387 cmd == SIOCBONDRELEASE ||
2388 cmd == SIOCBONDSETHWADDR ||
2389 cmd == SIOCBONDSLAVEINFOQUERY ||
2390 cmd == SIOCBONDINFOQUERY ||
2391 cmd == SIOCBONDCHANGEACTIVE ||
2392 cmd == SIOCGMIIPHY ||
2393 cmd == SIOCGMIIREG ||
2394 cmd == SIOCSMIIREG ||
2395 cmd == SIOCBRADDIF ||
2396 cmd == SIOCBRDELIF ||
2397 cmd == SIOCWANDEV) {
2398 err = -EOPNOTSUPP;
2399 if (dev->do_ioctl) {
2400 if (netif_device_present(dev))
2401 err = dev->do_ioctl(dev, ifr,
2402 cmd);
2403 else
2404 err = -ENODEV;
2405 }
2406 } else
2407 err = -EINVAL;
2408
2409 }
2410 return err;
2411}
2412
2413/*
2414 * This function handles all "interface"-type I/O control requests. The actual
2415 * 'doing' part of this is dev_ifsioc above.
2416 */
2417
2418/**
2419 * dev_ioctl - network device ioctl
2420 * @cmd: command to issue
2421 * @arg: pointer to a struct ifreq in user space
2422 *
2423 * Issue ioctl functions to devices. This is normally called by the
2424 * user space syscall interfaces but can sometimes be useful for
2425 * other purposes. The return value is the return from the syscall if
2426 * positive or a negative errno code on error.
2427 */
2428
2429int dev_ioctl(unsigned int cmd, void __user *arg)
2430{
2431 struct ifreq ifr;
2432 int ret;
2433 char *colon;
2434
2435 /* One special case: SIOCGIFCONF takes ifconf argument
2436 and requires shared lock, because it sleeps writing
2437 to user space.
2438 */
2439
2440 if (cmd == SIOCGIFCONF) {
2441 rtnl_shlock();
2442 ret = dev_ifconf((char __user *) arg);
2443 rtnl_shunlock();
2444 return ret;
2445 }
2446 if (cmd == SIOCGIFNAME)
2447 return dev_ifname((struct ifreq __user *)arg);
2448
2449 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2450 return -EFAULT;
2451
2452 ifr.ifr_name[IFNAMSIZ-1] = 0;
2453
2454 colon = strchr(ifr.ifr_name, ':');
2455 if (colon)
2456 *colon = 0;
2457
2458 /*
2459 * See which interface the caller is talking about.
2460 */
2461
2462 switch (cmd) {
2463 /*
2464 * These ioctl calls:
2465 * - can be done by all.
2466 * - atomic and do not require locking.
2467 * - return a value
2468 */
2469 case SIOCGIFFLAGS:
2470 case SIOCGIFMETRIC:
2471 case SIOCGIFMTU:
2472 case SIOCGIFHWADDR:
2473 case SIOCGIFSLAVE:
2474 case SIOCGIFMAP:
2475 case SIOCGIFINDEX:
2476 case SIOCGIFTXQLEN:
2477 dev_load(ifr.ifr_name);
2478 read_lock(&dev_base_lock);
2479 ret = dev_ifsioc(&ifr, cmd);
2480 read_unlock(&dev_base_lock);
2481 if (!ret) {
2482 if (colon)
2483 *colon = ':';
2484 if (copy_to_user(arg, &ifr,
2485 sizeof(struct ifreq)))
2486 ret = -EFAULT;
2487 }
2488 return ret;
2489
2490 case SIOCETHTOOL:
2491 dev_load(ifr.ifr_name);
2492 rtnl_lock();
2493 ret = dev_ethtool(&ifr);
2494 rtnl_unlock();
2495 if (!ret) {
2496 if (colon)
2497 *colon = ':';
2498 if (copy_to_user(arg, &ifr,
2499 sizeof(struct ifreq)))
2500 ret = -EFAULT;
2501 }
2502 return ret;
2503
2504 /*
2505 * These ioctl calls:
2506 * - require superuser power.
2507 * - require strict serialization.
2508 * - return a value
2509 */
2510 case SIOCGMIIPHY:
2511 case SIOCGMIIREG:
2512 case SIOCSIFNAME:
2513 if (!capable(CAP_NET_ADMIN))
2514 return -EPERM;
2515 dev_load(ifr.ifr_name);
2516 rtnl_lock();
2517 ret = dev_ifsioc(&ifr, cmd);
2518 rtnl_unlock();
2519 if (!ret) {
2520 if (colon)
2521 *colon = ':';
2522 if (copy_to_user(arg, &ifr,
2523 sizeof(struct ifreq)))
2524 ret = -EFAULT;
2525 }
2526 return ret;
2527
2528 /*
2529 * These ioctl calls:
2530 * - require superuser power.
2531 * - require strict serialization.
2532 * - do not return a value
2533 */
2534 case SIOCSIFFLAGS:
2535 case SIOCSIFMETRIC:
2536 case SIOCSIFMTU:
2537 case SIOCSIFMAP:
2538 case SIOCSIFHWADDR:
2539 case SIOCSIFSLAVE:
2540 case SIOCADDMULTI:
2541 case SIOCDELMULTI:
2542 case SIOCSIFHWBROADCAST:
2543 case SIOCSIFTXQLEN:
2544 case SIOCSMIIREG:
2545 case SIOCBONDENSLAVE:
2546 case SIOCBONDRELEASE:
2547 case SIOCBONDSETHWADDR:
2548 case SIOCBONDSLAVEINFOQUERY:
2549 case SIOCBONDINFOQUERY:
2550 case SIOCBONDCHANGEACTIVE:
2551 case SIOCBRADDIF:
2552 case SIOCBRDELIF:
2553 if (!capable(CAP_NET_ADMIN))
2554 return -EPERM;
2555 dev_load(ifr.ifr_name);
2556 rtnl_lock();
2557 ret = dev_ifsioc(&ifr, cmd);
2558 rtnl_unlock();
2559 return ret;
2560
2561 case SIOCGIFMEM:
2562 /* Get the per device memory space. We can add this but
2563 * currently do not support it */
2564 case SIOCSIFMEM:
2565 /* Set the per device memory buffer space.
2566 * Not applicable in our case */
2567 case SIOCSIFLINK:
2568 return -EINVAL;
2569
2570 /*
2571 * Unknown or private ioctl.
2572 */
2573 default:
2574 if (cmd == SIOCWANDEV ||
2575 (cmd >= SIOCDEVPRIVATE &&
2576 cmd <= SIOCDEVPRIVATE + 15)) {
2577 dev_load(ifr.ifr_name);
2578 rtnl_lock();
2579 ret = dev_ifsioc(&ifr, cmd);
2580 rtnl_unlock();
2581 if (!ret && copy_to_user(arg, &ifr,
2582 sizeof(struct ifreq)))
2583 ret = -EFAULT;
2584 return ret;
2585 }
2586#ifdef WIRELESS_EXT
2587 /* Take care of Wireless Extensions */
2588 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2589 /* If command is `set a parameter', or
2590 * `get the encoding parameters', check if
2591 * the user has the right to do it */
2592 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2593 if (!capable(CAP_NET_ADMIN))
2594 return -EPERM;
2595 }
2596 dev_load(ifr.ifr_name);
2597 rtnl_lock();
2598 /* Follow me in net/core/wireless.c */
2599 ret = wireless_process_ioctl(&ifr, cmd);
2600 rtnl_unlock();
2601 if (IW_IS_GET(cmd) &&
2602 copy_to_user(arg, &ifr,
2603 sizeof(struct ifreq)))
2604 ret = -EFAULT;
2605 return ret;
2606 }
2607#endif /* WIRELESS_EXT */
2608 return -EINVAL;
2609 }
2610}
2611
2612
2613/**
2614 * dev_new_index - allocate an ifindex
2615 *
2616 * Returns a suitable unique value for a new device interface
2617 * number. The caller must hold the rtnl semaphore or the
2618 * dev_base_lock to be sure it remains unique.
2619 */
2620static int dev_new_index(void)
2621{
2622 static int ifindex;
2623 for (;;) {
2624 if (++ifindex <= 0)
2625 ifindex = 1;
2626 if (!__dev_get_by_index(ifindex))
2627 return ifindex;
2628 }
2629}
2630
2631static int dev_boot_phase = 1;
2632
2633/* Delayed registration/unregisteration */
2634static DEFINE_SPINLOCK(net_todo_list_lock);
2635static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2636
2637static inline void net_set_todo(struct net_device *dev)
2638{
2639 spin_lock(&net_todo_list_lock);
2640 list_add_tail(&dev->todo_list, &net_todo_list);
2641 spin_unlock(&net_todo_list_lock);
2642}
2643
2644/**
2645 * register_netdevice - register a network device
2646 * @dev: device to register
2647 *
2648 * Take a completed network device structure and add it to the kernel
2649 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2650 * chain. 0 is returned on success. A negative errno code is returned
2651 * on a failure to set up the device, or if the name is a duplicate.
2652 *
2653 * Callers must hold the rtnl semaphore. You may want
2654 * register_netdev() instead of this.
2655 *
2656 * BUGS:
2657 * The locking appears insufficient to guarantee two parallel registers
2658 * will not get the same name.
2659 */
2660
2661int register_netdevice(struct net_device *dev)
2662{
2663 struct hlist_head *head;
2664 struct hlist_node *p;
2665 int ret;
2666
2667 BUG_ON(dev_boot_phase);
2668 ASSERT_RTNL();
2669
2670 /* When net_device's are persistent, this will be fatal. */
2671 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2672
2673 spin_lock_init(&dev->queue_lock);
2674 spin_lock_init(&dev->xmit_lock);
2675 dev->xmit_lock_owner = -1;
2676#ifdef CONFIG_NET_CLS_ACT
2677 spin_lock_init(&dev->ingress_lock);
2678#endif
2679
2680 ret = alloc_divert_blk(dev);
2681 if (ret)
2682 goto out;
2683
2684 dev->iflink = -1;
2685
2686 /* Init, if this function is available */
2687 if (dev->init) {
2688 ret = dev->init(dev);
2689 if (ret) {
2690 if (ret > 0)
2691 ret = -EIO;
2692 goto out_err;
2693 }
2694 }
2695
2696 if (!dev_valid_name(dev->name)) {
2697 ret = -EINVAL;
2698 goto out_err;
2699 }
2700
2701 dev->ifindex = dev_new_index();
2702 if (dev->iflink == -1)
2703 dev->iflink = dev->ifindex;
2704
2705 /* Check for existence of name */
2706 head = dev_name_hash(dev->name);
2707 hlist_for_each(p, head) {
2708 struct net_device *d
2709 = hlist_entry(p, struct net_device, name_hlist);
2710 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2711 ret = -EEXIST;
2712 goto out_err;
2713 }
2714 }
2715
2716 /* Fix illegal SG+CSUM combinations. */
2717 if ((dev->features & NETIF_F_SG) &&
2718 !(dev->features & (NETIF_F_IP_CSUM |
2719 NETIF_F_NO_CSUM |
2720 NETIF_F_HW_CSUM))) {
2721 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2722 dev->name);
2723 dev->features &= ~NETIF_F_SG;
2724 }
2725
2726 /* TSO requires that SG is present as well. */
2727 if ((dev->features & NETIF_F_TSO) &&
2728 !(dev->features & NETIF_F_SG)) {
2729 printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
2730 dev->name);
2731 dev->features &= ~NETIF_F_TSO;
2732 }
e89e9cf5
AR
2733 if (dev->features & NETIF_F_UFO) {
2734 if (!(dev->features & NETIF_F_HW_CSUM)) {
2735 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2736 "NETIF_F_HW_CSUM feature.\n",
2737 dev->name);
2738 dev->features &= ~NETIF_F_UFO;
2739 }
2740 if (!(dev->features & NETIF_F_SG)) {
2741 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2742 "NETIF_F_SG feature.\n",
2743 dev->name);
2744 dev->features &= ~NETIF_F_UFO;
2745 }
2746 }
1da177e4
LT
2747
2748 /*
2749 * nil rebuild_header routine,
2750 * that should be never called and used as just bug trap.
2751 */
2752
2753 if (!dev->rebuild_header)
2754 dev->rebuild_header = default_rebuild_header;
2755
2756 /*
2757 * Default initial state at registry is that the
2758 * device is present.
2759 */
2760
2761 set_bit(__LINK_STATE_PRESENT, &dev->state);
2762
2763 dev->next = NULL;
2764 dev_init_scheduler(dev);
2765 write_lock_bh(&dev_base_lock);
2766 *dev_tail = dev;
2767 dev_tail = &dev->next;
2768 hlist_add_head(&dev->name_hlist, head);
2769 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2770 dev_hold(dev);
2771 dev->reg_state = NETREG_REGISTERING;
2772 write_unlock_bh(&dev_base_lock);
2773
2774 /* Notify protocols, that a new device appeared. */
2775 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2776
2777 /* Finish registration after unlock */
2778 net_set_todo(dev);
2779 ret = 0;
2780
2781out:
2782 return ret;
2783out_err:
2784 free_divert_blk(dev);
2785 goto out;
2786}
2787
2788/**
2789 * register_netdev - register a network device
2790 * @dev: device to register
2791 *
2792 * Take a completed network device structure and add it to the kernel
2793 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2794 * chain. 0 is returned on success. A negative errno code is returned
2795 * on a failure to set up the device, or if the name is a duplicate.
2796 *
2797 * This is a wrapper around register_netdev that takes the rtnl semaphore
2798 * and expands the device name if you passed a format string to
2799 * alloc_netdev.
2800 */
2801int register_netdev(struct net_device *dev)
2802{
2803 int err;
2804
2805 rtnl_lock();
2806
2807 /*
2808 * If the name is a format string the caller wants us to do a
2809 * name allocation.
2810 */
2811 if (strchr(dev->name, '%')) {
2812 err = dev_alloc_name(dev, dev->name);
2813 if (err < 0)
2814 goto out;
2815 }
2816
2817 /*
2818 * Back compatibility hook. Kill this one in 2.5
2819 */
2820 if (dev->name[0] == 0 || dev->name[0] == ' ') {
2821 err = dev_alloc_name(dev, "eth%d");
2822 if (err < 0)
2823 goto out;
2824 }
2825
2826 err = register_netdevice(dev);
2827out:
2828 rtnl_unlock();
2829 return err;
2830}
2831EXPORT_SYMBOL(register_netdev);
2832
2833/*
2834 * netdev_wait_allrefs - wait until all references are gone.
2835 *
2836 * This is called when unregistering network devices.
2837 *
2838 * Any protocol or device that holds a reference should register
2839 * for netdevice notification, and cleanup and put back the
2840 * reference if they receive an UNREGISTER event.
2841 * We can get stuck here if buggy protocols don't correctly
2842 * call dev_put.
2843 */
2844static void netdev_wait_allrefs(struct net_device *dev)
2845{
2846 unsigned long rebroadcast_time, warning_time;
2847
2848 rebroadcast_time = warning_time = jiffies;
2849 while (atomic_read(&dev->refcnt) != 0) {
2850 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2851 rtnl_shlock();
2852
2853 /* Rebroadcast unregister notification */
2854 notifier_call_chain(&netdev_chain,
2855 NETDEV_UNREGISTER, dev);
2856
2857 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2858 &dev->state)) {
2859 /* We must not have linkwatch events
2860 * pending on unregister. If this
2861 * happens, we simply run the queue
2862 * unscheduled, resulting in a noop
2863 * for this device.
2864 */
2865 linkwatch_run_queue();
2866 }
2867
2868 rtnl_shunlock();
2869
2870 rebroadcast_time = jiffies;
2871 }
2872
2873 msleep(250);
2874
2875 if (time_after(jiffies, warning_time + 10 * HZ)) {
2876 printk(KERN_EMERG "unregister_netdevice: "
2877 "waiting for %s to become free. Usage "
2878 "count = %d\n",
2879 dev->name, atomic_read(&dev->refcnt));
2880 warning_time = jiffies;
2881 }
2882 }
2883}
2884
2885/* The sequence is:
2886 *
2887 * rtnl_lock();
2888 * ...
2889 * register_netdevice(x1);
2890 * register_netdevice(x2);
2891 * ...
2892 * unregister_netdevice(y1);
2893 * unregister_netdevice(y2);
2894 * ...
2895 * rtnl_unlock();
2896 * free_netdev(y1);
2897 * free_netdev(y2);
2898 *
2899 * We are invoked by rtnl_unlock() after it drops the semaphore.
2900 * This allows us to deal with problems:
2901 * 1) We can create/delete sysfs objects which invoke hotplug
2902 * without deadlocking with linkwatch via keventd.
2903 * 2) Since we run with the RTNL semaphore not held, we can sleep
2904 * safely in order to wait for the netdev refcnt to drop to zero.
2905 */
2906static DECLARE_MUTEX(net_todo_run_mutex);
2907void netdev_run_todo(void)
2908{
2909 struct list_head list = LIST_HEAD_INIT(list);
2910 int err;
2911
2912
2913 /* Need to guard against multiple cpu's getting out of order. */
2914 down(&net_todo_run_mutex);
2915
2916 /* Not safe to do outside the semaphore. We must not return
2917 * until all unregister events invoked by the local processor
2918 * have been completed (either by this todo run, or one on
2919 * another cpu).
2920 */
2921 if (list_empty(&net_todo_list))
2922 goto out;
2923
2924 /* Snapshot list, allow later requests */
2925 spin_lock(&net_todo_list_lock);
2926 list_splice_init(&net_todo_list, &list);
2927 spin_unlock(&net_todo_list_lock);
2928
2929 while (!list_empty(&list)) {
2930 struct net_device *dev
2931 = list_entry(list.next, struct net_device, todo_list);
2932 list_del(&dev->todo_list);
2933
2934 switch(dev->reg_state) {
2935 case NETREG_REGISTERING:
2936 err = netdev_register_sysfs(dev);
2937 if (err)
2938 printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
2939 dev->name, err);
2940 dev->reg_state = NETREG_REGISTERED;
2941 break;
2942
2943 case NETREG_UNREGISTERING:
2944 netdev_unregister_sysfs(dev);
2945 dev->reg_state = NETREG_UNREGISTERED;
2946
2947 netdev_wait_allrefs(dev);
2948
2949 /* paranoia */
2950 BUG_ON(atomic_read(&dev->refcnt));
2951 BUG_TRAP(!dev->ip_ptr);
2952 BUG_TRAP(!dev->ip6_ptr);
2953 BUG_TRAP(!dev->dn_ptr);
2954
2955
2956 /* It must be the very last action,
2957 * after this 'dev' may point to freed up memory.
2958 */
2959 if (dev->destructor)
2960 dev->destructor(dev);
2961 break;
2962
2963 default:
2964 printk(KERN_ERR "network todo '%s' but state %d\n",
2965 dev->name, dev->reg_state);
2966 break;
2967 }
2968 }
2969
2970out:
2971 up(&net_todo_run_mutex);
2972}
2973
2974/**
2975 * alloc_netdev - allocate network device
2976 * @sizeof_priv: size of private data to allocate space for
2977 * @name: device name format string
2978 * @setup: callback to initialize device
2979 *
2980 * Allocates a struct net_device with private data area for driver use
2981 * and performs basic initialization.
2982 */
2983struct net_device *alloc_netdev(int sizeof_priv, const char *name,
2984 void (*setup)(struct net_device *))
2985{
2986 void *p;
2987 struct net_device *dev;
2988 int alloc_size;
2989
2990 /* ensure 32-byte alignment of both the device and private area */
2991 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
2992 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
2993
2994 p = kmalloc(alloc_size, GFP_KERNEL);
2995 if (!p) {
2996 printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
2997 return NULL;
2998 }
2999 memset(p, 0, alloc_size);
3000
3001 dev = (struct net_device *)
3002 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3003 dev->padded = (char *)dev - (char *)p;
3004
3005 if (sizeof_priv)
3006 dev->priv = netdev_priv(dev);
3007
3008 setup(dev);
3009 strcpy(dev->name, name);
3010 return dev;
3011}
3012EXPORT_SYMBOL(alloc_netdev);
3013
3014/**
3015 * free_netdev - free network device
3016 * @dev: device
3017 *
3018 * This function does the last stage of destroying an allocated device
3019 * interface. The reference to the device object is released.
3020 * If this is the last reference then it will be freed.
3021 */
3022void free_netdev(struct net_device *dev)
3023{
3024#ifdef CONFIG_SYSFS
3025 /* Compatiablity with error handling in drivers */
3026 if (dev->reg_state == NETREG_UNINITIALIZED) {
3027 kfree((char *)dev - dev->padded);
3028 return;
3029 }
3030
3031 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3032 dev->reg_state = NETREG_RELEASED;
3033
3034 /* will free via class release */
3035 class_device_put(&dev->class_dev);
3036#else
3037 kfree((char *)dev - dev->padded);
3038#endif
3039}
3040
3041/* Synchronize with packet receive processing. */
3042void synchronize_net(void)
3043{
3044 might_sleep();
fbd568a3 3045 synchronize_rcu();
1da177e4
LT
3046}
3047
3048/**
3049 * unregister_netdevice - remove device from the kernel
3050 * @dev: device
3051 *
3052 * This function shuts down a device interface and removes it
3053 * from the kernel tables. On success 0 is returned, on a failure
3054 * a negative errno code is returned.
3055 *
3056 * Callers must hold the rtnl semaphore. You may want
3057 * unregister_netdev() instead of this.
3058 */
3059
3060int unregister_netdevice(struct net_device *dev)
3061{
3062 struct net_device *d, **dp;
3063
3064 BUG_ON(dev_boot_phase);
3065 ASSERT_RTNL();
3066
3067 /* Some devices call without registering for initialization unwind. */
3068 if (dev->reg_state == NETREG_UNINITIALIZED) {
3069 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3070 "was registered\n", dev->name, dev);
3071 return -ENODEV;
3072 }
3073
3074 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3075
3076 /* If device is running, close it first. */
3077 if (dev->flags & IFF_UP)
3078 dev_close(dev);
3079
3080 /* And unlink it from device chain. */
3081 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3082 if (d == dev) {
3083 write_lock_bh(&dev_base_lock);
3084 hlist_del(&dev->name_hlist);
3085 hlist_del(&dev->index_hlist);
3086 if (dev_tail == &dev->next)
3087 dev_tail = dp;
3088 *dp = d->next;
3089 write_unlock_bh(&dev_base_lock);
3090 break;
3091 }
3092 }
3093 if (!d) {
3094 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3095 dev->name);
3096 return -ENODEV;
3097 }
3098
3099 dev->reg_state = NETREG_UNREGISTERING;
3100
3101 synchronize_net();
3102
3103 /* Shutdown queueing discipline. */
3104 dev_shutdown(dev);
3105
3106
3107 /* Notify protocols, that we are about to destroy
3108 this device. They should clean all the things.
3109 */
3110 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3111
3112 /*
3113 * Flush the multicast chain
3114 */
3115 dev_mc_discard(dev);
3116
3117 if (dev->uninit)
3118 dev->uninit(dev);
3119
3120 /* Notifier chain MUST detach us from master device. */
3121 BUG_TRAP(!dev->master);
3122
3123 free_divert_blk(dev);
3124
3125 /* Finish processing unregister after unlock */
3126 net_set_todo(dev);
3127
3128 synchronize_net();
3129
3130 dev_put(dev);
3131 return 0;
3132}
3133
3134/**
3135 * unregister_netdev - remove device from the kernel
3136 * @dev: device
3137 *
3138 * This function shuts down a device interface and removes it
3139 * from the kernel tables. On success 0 is returned, on a failure
3140 * a negative errno code is returned.
3141 *
3142 * This is just a wrapper for unregister_netdevice that takes
3143 * the rtnl semaphore. In general you want to use this and not
3144 * unregister_netdevice.
3145 */
3146void unregister_netdev(struct net_device *dev)
3147{
3148 rtnl_lock();
3149 unregister_netdevice(dev);
3150 rtnl_unlock();
3151}
3152
3153EXPORT_SYMBOL(unregister_netdev);
3154
3155#ifdef CONFIG_HOTPLUG_CPU
3156static int dev_cpu_callback(struct notifier_block *nfb,
3157 unsigned long action,
3158 void *ocpu)
3159{
3160 struct sk_buff **list_skb;
3161 struct net_device **list_net;
3162 struct sk_buff *skb;
3163 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3164 struct softnet_data *sd, *oldsd;
3165
3166 if (action != CPU_DEAD)
3167 return NOTIFY_OK;
3168
3169 local_irq_disable();
3170 cpu = smp_processor_id();
3171 sd = &per_cpu(softnet_data, cpu);
3172 oldsd = &per_cpu(softnet_data, oldcpu);
3173
3174 /* Find end of our completion_queue. */
3175 list_skb = &sd->completion_queue;
3176 while (*list_skb)
3177 list_skb = &(*list_skb)->next;
3178 /* Append completion queue from offline CPU. */
3179 *list_skb = oldsd->completion_queue;
3180 oldsd->completion_queue = NULL;
3181
3182 /* Find end of our output_queue. */
3183 list_net = &sd->output_queue;
3184 while (*list_net)
3185 list_net = &(*list_net)->next_sched;
3186 /* Append output queue from offline CPU. */
3187 *list_net = oldsd->output_queue;
3188 oldsd->output_queue = NULL;
3189
3190 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3191 local_irq_enable();
3192
3193 /* Process offline CPU's input_pkt_queue */
3194 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3195 netif_rx(skb);
3196
3197 return NOTIFY_OK;
3198}
3199#endif /* CONFIG_HOTPLUG_CPU */
3200
3201
3202/*
3203 * Initialize the DEV module. At boot time this walks the device list and
3204 * unhooks any devices that fail to initialise (normally hardware not
3205 * present) and leaves us with a valid list of present and active devices.
3206 *
3207 */
3208
3209/*
3210 * This is called single threaded during boot, so no need
3211 * to take the rtnl semaphore.
3212 */
3213static int __init net_dev_init(void)
3214{
3215 int i, rc = -ENOMEM;
3216
3217 BUG_ON(!dev_boot_phase);
3218
3219 net_random_init();
3220
3221 if (dev_proc_init())
3222 goto out;
3223
3224 if (netdev_sysfs_init())
3225 goto out;
3226
3227 INIT_LIST_HEAD(&ptype_all);
3228 for (i = 0; i < 16; i++)
3229 INIT_LIST_HEAD(&ptype_base[i]);
3230
3231 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3232 INIT_HLIST_HEAD(&dev_name_head[i]);
3233
3234 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3235 INIT_HLIST_HEAD(&dev_index_head[i]);
3236
3237 /*
3238 * Initialise the packet receive queues.
3239 */
3240
3241 for (i = 0; i < NR_CPUS; i++) {
3242 struct softnet_data *queue;
3243
3244 queue = &per_cpu(softnet_data, i);
3245 skb_queue_head_init(&queue->input_pkt_queue);
1da177e4
LT
3246 queue->completion_queue = NULL;
3247 INIT_LIST_HEAD(&queue->poll_list);
3248 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3249 queue->backlog_dev.weight = weight_p;
3250 queue->backlog_dev.poll = process_backlog;
3251 atomic_set(&queue->backlog_dev.refcnt, 1);
3252 }
3253
1da177e4
LT
3254 dev_boot_phase = 0;
3255
3256 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3257 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3258
3259 hotcpu_notifier(dev_cpu_callback, 0);
3260 dst_init();
3261 dev_mcast_init();
3262 rc = 0;
3263out:
3264 return rc;
3265}
3266
3267subsys_initcall(net_dev_init);
3268
3269EXPORT_SYMBOL(__dev_get_by_index);
3270EXPORT_SYMBOL(__dev_get_by_name);
3271EXPORT_SYMBOL(__dev_remove_pack);
3272EXPORT_SYMBOL(__skb_linearize);
c2373ee9 3273EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
3274EXPORT_SYMBOL(dev_add_pack);
3275EXPORT_SYMBOL(dev_alloc_name);
3276EXPORT_SYMBOL(dev_close);
3277EXPORT_SYMBOL(dev_get_by_flags);
3278EXPORT_SYMBOL(dev_get_by_index);
3279EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
3280EXPORT_SYMBOL(dev_open);
3281EXPORT_SYMBOL(dev_queue_xmit);
3282EXPORT_SYMBOL(dev_remove_pack);
3283EXPORT_SYMBOL(dev_set_allmulti);
3284EXPORT_SYMBOL(dev_set_promiscuity);
3285EXPORT_SYMBOL(dev_change_flags);
3286EXPORT_SYMBOL(dev_set_mtu);
3287EXPORT_SYMBOL(dev_set_mac_address);
3288EXPORT_SYMBOL(free_netdev);
3289EXPORT_SYMBOL(netdev_boot_setup_check);
3290EXPORT_SYMBOL(netdev_set_master);
3291EXPORT_SYMBOL(netdev_state_change);
3292EXPORT_SYMBOL(netif_receive_skb);
3293EXPORT_SYMBOL(netif_rx);
3294EXPORT_SYMBOL(register_gifconf);
3295EXPORT_SYMBOL(register_netdevice);
3296EXPORT_SYMBOL(register_netdevice_notifier);
3297EXPORT_SYMBOL(skb_checksum_help);
3298EXPORT_SYMBOL(synchronize_net);
3299EXPORT_SYMBOL(unregister_netdevice);
3300EXPORT_SYMBOL(unregister_netdevice_notifier);
3301EXPORT_SYMBOL(net_enable_timestamp);
3302EXPORT_SYMBOL(net_disable_timestamp);
3303EXPORT_SYMBOL(dev_get_flags);
3304
3305#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3306EXPORT_SYMBOL(br_handle_frame_hook);
3307EXPORT_SYMBOL(br_fdb_get_hook);
3308EXPORT_SYMBOL(br_fdb_put_hook);
3309#endif
3310
3311#ifdef CONFIG_KMOD
3312EXPORT_SYMBOL(dev_load);
3313#endif
3314
3315EXPORT_PER_CPU_SYMBOL(softnet_data);