]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * NET3 Protocol independent device support routines. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Derived from the non IP parts of dev.c 1.0.19 | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
13 | * | |
14 | * Additional Authors: | |
15 | * Florian la Roche <rzsfl@rz.uni-sb.de> | |
16 | * Alan Cox <gw4pts@gw4pts.ampr.org> | |
17 | * David Hinds <dahinds@users.sourceforge.net> | |
18 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | |
19 | * Adam Sulmicki <adam@cfar.umd.edu> | |
20 | * Pekka Riikonen <priikone@poesidon.pspt.fi> | |
21 | * | |
22 | * Changes: | |
23 | * D.J. Barrow : Fixed bug where dev->refcnt gets set | |
24 | * to 2 if register_netdev gets called | |
25 | * before net_dev_init & also removed a | |
26 | * few lines of code in the process. | |
27 | * Alan Cox : device private ioctl copies fields back. | |
28 | * Alan Cox : Transmit queue code does relevant | |
29 | * stunts to keep the queue safe. | |
30 | * Alan Cox : Fixed double lock. | |
31 | * Alan Cox : Fixed promisc NULL pointer trap | |
32 | * ???????? : Support the full private ioctl range | |
33 | * Alan Cox : Moved ioctl permission check into | |
34 | * drivers | |
35 | * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI | |
36 | * Alan Cox : 100 backlog just doesn't cut it when | |
37 | * you start doing multicast video 8) | |
38 | * Alan Cox : Rewrote net_bh and list manager. | |
39 | * Alan Cox : Fix ETH_P_ALL echoback lengths. | |
40 | * Alan Cox : Took out transmit every packet pass | |
41 | * Saved a few bytes in the ioctl handler | |
42 | * Alan Cox : Network driver sets packet type before | |
43 | * calling netif_rx. Saves a function | |
44 | * call a packet. | |
45 | * Alan Cox : Hashed net_bh() | |
46 | * Richard Kooijman: Timestamp fixes. | |
47 | * Alan Cox : Wrong field in SIOCGIFDSTADDR | |
48 | * Alan Cox : Device lock protection. | |
49 | * Alan Cox : Fixed nasty side effect of device close | |
50 | * changes. | |
51 | * Rudi Cilibrasi : Pass the right thing to | |
52 | * set_mac_address() | |
53 | * Dave Miller : 32bit quantity for the device lock to | |
54 | * make it work out on a Sparc. | |
55 | * Bjorn Ekwall : Added KERNELD hack. | |
56 | * Alan Cox : Cleaned up the backlog initialise. | |
57 | * Craig Metz : SIOCGIFCONF fix if space for under | |
58 | * 1 device. | |
59 | * Thomas Bogendoerfer : Return ENODEV for dev_open, if there | |
60 | * is no device open function. | |
61 | * Andi Kleen : Fix error reporting for SIOCGIFCONF | |
62 | * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF | |
63 | * Cyrus Durgin : Cleaned for KMOD | |
64 | * Adam Sulmicki : Bug Fix : Network Device Unload | |
65 | * A network device unload needs to purge | |
66 | * the backlog queue. | |
67 | * Paul Rusty Russell : SIOCSIFNAME | |
68 | * Pekka Riikonen : Netdev boot-time settings code | |
69 | * Andrew Morton : Make unregister_netdevice wait | |
70 | * indefinitely on dev->refcnt | |
71 | * J Hadi Salim : - Backlog queue sampling | |
72 | * - netif_rx() feedback | |
73 | */ | |
74 | ||
75 | #include <asm/uaccess.h> | |
76 | #include <asm/system.h> | |
77 | #include <linux/bitops.h> | |
4fc268d2 | 78 | #include <linux/capability.h> |
1da177e4 LT |
79 | #include <linux/cpu.h> |
80 | #include <linux/types.h> | |
81 | #include <linux/kernel.h> | |
82 | #include <linux/sched.h> | |
4a3e2f71 | 83 | #include <linux/mutex.h> |
1da177e4 LT |
84 | #include <linux/string.h> |
85 | #include <linux/mm.h> | |
86 | #include <linux/socket.h> | |
87 | #include <linux/sockios.h> | |
88 | #include <linux/errno.h> | |
89 | #include <linux/interrupt.h> | |
90 | #include <linux/if_ether.h> | |
91 | #include <linux/netdevice.h> | |
92 | #include <linux/etherdevice.h> | |
0187bdfb | 93 | #include <linux/ethtool.h> |
1da177e4 LT |
94 | #include <linux/notifier.h> |
95 | #include <linux/skbuff.h> | |
457c4cbc | 96 | #include <net/net_namespace.h> |
1da177e4 LT |
97 | #include <net/sock.h> |
98 | #include <linux/rtnetlink.h> | |
99 | #include <linux/proc_fs.h> | |
100 | #include <linux/seq_file.h> | |
101 | #include <linux/stat.h> | |
102 | #include <linux/if_bridge.h> | |
b863ceb7 | 103 | #include <linux/if_macvlan.h> |
1da177e4 LT |
104 | #include <net/dst.h> |
105 | #include <net/pkt_sched.h> | |
106 | #include <net/checksum.h> | |
107 | #include <linux/highmem.h> | |
108 | #include <linux/init.h> | |
109 | #include <linux/kmod.h> | |
110 | #include <linux/module.h> | |
111 | #include <linux/kallsyms.h> | |
112 | #include <linux/netpoll.h> | |
113 | #include <linux/rcupdate.h> | |
114 | #include <linux/delay.h> | |
295f4a1f | 115 | #include <net/wext.h> |
1da177e4 | 116 | #include <net/iw_handler.h> |
1da177e4 | 117 | #include <asm/current.h> |
5bdb9886 | 118 | #include <linux/audit.h> |
db217334 | 119 | #include <linux/dmaengine.h> |
f6a78bfc | 120 | #include <linux/err.h> |
c7fa9d18 | 121 | #include <linux/ctype.h> |
723e98b7 | 122 | #include <linux/if_arp.h> |
6de329e2 | 123 | #include <linux/if_vlan.h> |
1da177e4 | 124 | |
342709ef PE |
125 | #include "net-sysfs.h" |
126 | ||
1da177e4 LT |
127 | /* |
128 | * The list of packet types we will receive (as opposed to discard) | |
129 | * and the routines to invoke. | |
130 | * | |
131 | * Why 16. Because with 16 the only overlap we get on a hash of the | |
132 | * low nibble of the protocol value is RARP/SNAP/X.25. | |
133 | * | |
134 | * NOTE: That is no longer true with the addition of VLAN tags. Not | |
135 | * sure which should go first, but I bet it won't make much | |
136 | * difference if we are running VLANs. The good news is that | |
137 | * this protocol won't be in the list unless compiled in, so | |
3041a069 | 138 | * the average user (w/out VLANs) will not be adversely affected. |
1da177e4 LT |
139 | * --BLG |
140 | * | |
141 | * 0800 IP | |
142 | * 8100 802.1Q VLAN | |
143 | * 0001 802.3 | |
144 | * 0002 AX.25 | |
145 | * 0004 802.2 | |
146 | * 8035 RARP | |
147 | * 0005 SNAP | |
148 | * 0805 X.25 | |
149 | * 0806 ARP | |
150 | * 8137 IPX | |
151 | * 0009 Localtalk | |
152 | * 86DD IPv6 | |
153 | */ | |
154 | ||
82d8a867 PE |
155 | #define PTYPE_HASH_SIZE (16) |
156 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) | |
157 | ||
1da177e4 | 158 | static DEFINE_SPINLOCK(ptype_lock); |
82d8a867 | 159 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
6b2bedc3 | 160 | static struct list_head ptype_all __read_mostly; /* Taps */ |
1da177e4 | 161 | |
db217334 | 162 | #ifdef CONFIG_NET_DMA |
d379b01e DW |
163 | struct net_dma { |
164 | struct dma_client client; | |
165 | spinlock_t lock; | |
166 | cpumask_t channel_mask; | |
0c0b0aca | 167 | struct dma_chan **channels; |
d379b01e DW |
168 | }; |
169 | ||
170 | static enum dma_state_client | |
171 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | |
172 | enum dma_state state); | |
173 | ||
174 | static struct net_dma net_dma = { | |
175 | .client = { | |
176 | .event_callback = netdev_dma_event, | |
177 | }, | |
178 | }; | |
db217334 CL |
179 | #endif |
180 | ||
1da177e4 | 181 | /* |
7562f876 | 182 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
1da177e4 LT |
183 | * semaphore. |
184 | * | |
185 | * Pure readers hold dev_base_lock for reading. | |
186 | * | |
187 | * Writers must hold the rtnl semaphore while they loop through the | |
7562f876 | 188 | * dev_base_head list, and hold dev_base_lock for writing when they do the |
1da177e4 LT |
189 | * actual updates. This allows pure readers to access the list even |
190 | * while a writer is preparing to update it. | |
191 | * | |
192 | * To put it another way, dev_base_lock is held for writing only to | |
193 | * protect against pure readers; the rtnl semaphore provides the | |
194 | * protection against other writers. | |
195 | * | |
196 | * See, for example usages, register_netdevice() and | |
197 | * unregister_netdevice(), which must be called with the rtnl | |
198 | * semaphore held. | |
199 | */ | |
1da177e4 LT |
200 | DEFINE_RWLOCK(dev_base_lock); |
201 | ||
1da177e4 LT |
202 | EXPORT_SYMBOL(dev_base_lock); |
203 | ||
204 | #define NETDEV_HASHBITS 8 | |
881d966b | 205 | #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) |
1da177e4 | 206 | |
881d966b | 207 | static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) |
1da177e4 LT |
208 | { |
209 | unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); | |
881d966b | 210 | return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)]; |
1da177e4 LT |
211 | } |
212 | ||
881d966b | 213 | static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) |
1da177e4 | 214 | { |
881d966b | 215 | return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; |
1da177e4 LT |
216 | } |
217 | ||
ce286d32 EB |
218 | /* Device list insertion */ |
219 | static int list_netdevice(struct net_device *dev) | |
220 | { | |
c346dca1 | 221 | struct net *net = dev_net(dev); |
ce286d32 EB |
222 | |
223 | ASSERT_RTNL(); | |
224 | ||
225 | write_lock_bh(&dev_base_lock); | |
226 | list_add_tail(&dev->dev_list, &net->dev_base_head); | |
227 | hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); | |
228 | hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); | |
229 | write_unlock_bh(&dev_base_lock); | |
230 | return 0; | |
231 | } | |
232 | ||
233 | /* Device list removal */ | |
234 | static void unlist_netdevice(struct net_device *dev) | |
235 | { | |
236 | ASSERT_RTNL(); | |
237 | ||
238 | /* Unlink dev from the device chain */ | |
239 | write_lock_bh(&dev_base_lock); | |
240 | list_del(&dev->dev_list); | |
241 | hlist_del(&dev->name_hlist); | |
242 | hlist_del(&dev->index_hlist); | |
243 | write_unlock_bh(&dev_base_lock); | |
244 | } | |
245 | ||
1da177e4 LT |
246 | /* |
247 | * Our notifier list | |
248 | */ | |
249 | ||
f07d5b94 | 250 | static RAW_NOTIFIER_HEAD(netdev_chain); |
1da177e4 LT |
251 | |
252 | /* | |
253 | * Device drivers call our routines to queue packets here. We empty the | |
254 | * queue in the local softnet handler. | |
255 | */ | |
bea3348e SH |
256 | |
257 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | |
1da177e4 | 258 | |
723e98b7 JP |
259 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
260 | /* | |
c773e847 | 261 | * register_netdevice() inits txq->_xmit_lock and sets lockdep class |
723e98b7 JP |
262 | * according to dev->type |
263 | */ | |
264 | static const unsigned short netdev_lock_type[] = | |
265 | {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, | |
266 | ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, | |
267 | ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, | |
268 | ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, | |
269 | ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, | |
270 | ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, | |
271 | ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, | |
272 | ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, | |
273 | ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, | |
274 | ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, | |
275 | ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, | |
276 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, | |
277 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, | |
278 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID, | |
279 | ARPHRD_NONE}; | |
280 | ||
281 | static const char *netdev_lock_name[] = | |
282 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", | |
283 | "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", | |
284 | "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", | |
285 | "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", | |
286 | "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", | |
287 | "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", | |
288 | "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", | |
289 | "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", | |
290 | "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", | |
291 | "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", | |
292 | "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", | |
293 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", | |
294 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", | |
295 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID", | |
296 | "_xmit_NONE"}; | |
297 | ||
298 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; | |
299 | ||
300 | static inline unsigned short netdev_lock_pos(unsigned short dev_type) | |
301 | { | |
302 | int i; | |
303 | ||
304 | for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) | |
305 | if (netdev_lock_type[i] == dev_type) | |
306 | return i; | |
307 | /* the last key is used by default */ | |
308 | return ARRAY_SIZE(netdev_lock_type) - 1; | |
309 | } | |
310 | ||
311 | static inline void netdev_set_lockdep_class(spinlock_t *lock, | |
312 | unsigned short dev_type) | |
313 | { | |
314 | int i; | |
315 | ||
316 | i = netdev_lock_pos(dev_type); | |
317 | lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], | |
318 | netdev_lock_name[i]); | |
319 | } | |
320 | #else | |
321 | static inline void netdev_set_lockdep_class(spinlock_t *lock, | |
322 | unsigned short dev_type) | |
323 | { | |
324 | } | |
325 | #endif | |
1da177e4 LT |
326 | |
327 | /******************************************************************************* | |
328 | ||
329 | Protocol management and registration routines | |
330 | ||
331 | *******************************************************************************/ | |
332 | ||
1da177e4 LT |
333 | /* |
334 | * Add a protocol ID to the list. Now that the input handler is | |
335 | * smarter we can dispense with all the messy stuff that used to be | |
336 | * here. | |
337 | * | |
338 | * BEWARE!!! Protocol handlers, mangling input packets, | |
339 | * MUST BE last in hash buckets and checking protocol handlers | |
340 | * MUST start from promiscuous ptype_all chain in net_bh. | |
341 | * It is true now, do not change it. | |
342 | * Explanation follows: if protocol handler, mangling packet, will | |
343 | * be the first on list, it is not able to sense, that packet | |
344 | * is cloned and should be copied-on-write, so that it will | |
345 | * change it and subsequent readers will get broken packet. | |
346 | * --ANK (980803) | |
347 | */ | |
348 | ||
349 | /** | |
350 | * dev_add_pack - add packet handler | |
351 | * @pt: packet type declaration | |
352 | * | |
353 | * Add a protocol handler to the networking stack. The passed &packet_type | |
354 | * is linked into kernel lists and may not be freed until it has been | |
355 | * removed from the kernel lists. | |
356 | * | |
4ec93edb | 357 | * This call does not sleep therefore it can not |
1da177e4 LT |
358 | * guarantee all CPU's that are in middle of receiving packets |
359 | * will see the new packet type (until the next received packet). | |
360 | */ | |
361 | ||
362 | void dev_add_pack(struct packet_type *pt) | |
363 | { | |
364 | int hash; | |
365 | ||
366 | spin_lock_bh(&ptype_lock); | |
9be9a6b9 | 367 | if (pt->type == htons(ETH_P_ALL)) |
1da177e4 | 368 | list_add_rcu(&pt->list, &ptype_all); |
9be9a6b9 | 369 | else { |
82d8a867 | 370 | hash = ntohs(pt->type) & PTYPE_HASH_MASK; |
1da177e4 LT |
371 | list_add_rcu(&pt->list, &ptype_base[hash]); |
372 | } | |
373 | spin_unlock_bh(&ptype_lock); | |
374 | } | |
375 | ||
1da177e4 LT |
376 | /** |
377 | * __dev_remove_pack - remove packet handler | |
378 | * @pt: packet type declaration | |
379 | * | |
380 | * Remove a protocol handler that was previously added to the kernel | |
381 | * protocol handlers by dev_add_pack(). The passed &packet_type is removed | |
382 | * from the kernel lists and can be freed or reused once this function | |
4ec93edb | 383 | * returns. |
1da177e4 LT |
384 | * |
385 | * The packet type might still be in use by receivers | |
386 | * and must not be freed until after all the CPU's have gone | |
387 | * through a quiescent state. | |
388 | */ | |
389 | void __dev_remove_pack(struct packet_type *pt) | |
390 | { | |
391 | struct list_head *head; | |
392 | struct packet_type *pt1; | |
393 | ||
394 | spin_lock_bh(&ptype_lock); | |
395 | ||
9be9a6b9 | 396 | if (pt->type == htons(ETH_P_ALL)) |
1da177e4 | 397 | head = &ptype_all; |
9be9a6b9 | 398 | else |
82d8a867 | 399 | head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; |
1da177e4 LT |
400 | |
401 | list_for_each_entry(pt1, head, list) { | |
402 | if (pt == pt1) { | |
403 | list_del_rcu(&pt->list); | |
404 | goto out; | |
405 | } | |
406 | } | |
407 | ||
408 | printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); | |
409 | out: | |
410 | spin_unlock_bh(&ptype_lock); | |
411 | } | |
412 | /** | |
413 | * dev_remove_pack - remove packet handler | |
414 | * @pt: packet type declaration | |
415 | * | |
416 | * Remove a protocol handler that was previously added to the kernel | |
417 | * protocol handlers by dev_add_pack(). The passed &packet_type is removed | |
418 | * from the kernel lists and can be freed or reused once this function | |
419 | * returns. | |
420 | * | |
421 | * This call sleeps to guarantee that no CPU is looking at the packet | |
422 | * type after return. | |
423 | */ | |
424 | void dev_remove_pack(struct packet_type *pt) | |
425 | { | |
426 | __dev_remove_pack(pt); | |
4ec93edb | 427 | |
1da177e4 LT |
428 | synchronize_net(); |
429 | } | |
430 | ||
431 | /****************************************************************************** | |
432 | ||
433 | Device Boot-time Settings Routines | |
434 | ||
435 | *******************************************************************************/ | |
436 | ||
437 | /* Boot time configuration table */ | |
438 | static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; | |
439 | ||
440 | /** | |
441 | * netdev_boot_setup_add - add new setup entry | |
442 | * @name: name of the device | |
443 | * @map: configured settings for the device | |
444 | * | |
445 | * Adds new setup entry to the dev_boot_setup list. The function | |
446 | * returns 0 on error and 1 on success. This is a generic routine to | |
447 | * all netdevices. | |
448 | */ | |
449 | static int netdev_boot_setup_add(char *name, struct ifmap *map) | |
450 | { | |
451 | struct netdev_boot_setup *s; | |
452 | int i; | |
453 | ||
454 | s = dev_boot_setup; | |
455 | for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { | |
456 | if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { | |
457 | memset(s[i].name, 0, sizeof(s[i].name)); | |
93b3cff9 | 458 | strlcpy(s[i].name, name, IFNAMSIZ); |
1da177e4 LT |
459 | memcpy(&s[i].map, map, sizeof(s[i].map)); |
460 | break; | |
461 | } | |
462 | } | |
463 | ||
464 | return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; | |
465 | } | |
466 | ||
467 | /** | |
468 | * netdev_boot_setup_check - check boot time settings | |
469 | * @dev: the netdevice | |
470 | * | |
471 | * Check boot time settings for the device. | |
472 | * The found settings are set for the device to be used | |
473 | * later in the device probing. | |
474 | * Returns 0 if no settings found, 1 if they are. | |
475 | */ | |
476 | int netdev_boot_setup_check(struct net_device *dev) | |
477 | { | |
478 | struct netdev_boot_setup *s = dev_boot_setup; | |
479 | int i; | |
480 | ||
481 | for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { | |
482 | if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && | |
93b3cff9 | 483 | !strcmp(dev->name, s[i].name)) { |
1da177e4 LT |
484 | dev->irq = s[i].map.irq; |
485 | dev->base_addr = s[i].map.base_addr; | |
486 | dev->mem_start = s[i].map.mem_start; | |
487 | dev->mem_end = s[i].map.mem_end; | |
488 | return 1; | |
489 | } | |
490 | } | |
491 | return 0; | |
492 | } | |
493 | ||
494 | ||
495 | /** | |
496 | * netdev_boot_base - get address from boot time settings | |
497 | * @prefix: prefix for network device | |
498 | * @unit: id for network device | |
499 | * | |
500 | * Check boot time settings for the base address of device. | |
501 | * The found settings are set for the device to be used | |
502 | * later in the device probing. | |
503 | * Returns 0 if no settings found. | |
504 | */ | |
505 | unsigned long netdev_boot_base(const char *prefix, int unit) | |
506 | { | |
507 | const struct netdev_boot_setup *s = dev_boot_setup; | |
508 | char name[IFNAMSIZ]; | |
509 | int i; | |
510 | ||
511 | sprintf(name, "%s%d", prefix, unit); | |
512 | ||
513 | /* | |
514 | * If device already registered then return base of 1 | |
515 | * to indicate not to probe for this interface | |
516 | */ | |
881d966b | 517 | if (__dev_get_by_name(&init_net, name)) |
1da177e4 LT |
518 | return 1; |
519 | ||
520 | for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) | |
521 | if (!strcmp(name, s[i].name)) | |
522 | return s[i].map.base_addr; | |
523 | return 0; | |
524 | } | |
525 | ||
526 | /* | |
527 | * Saves at boot time configured settings for any netdevice. | |
528 | */ | |
529 | int __init netdev_boot_setup(char *str) | |
530 | { | |
531 | int ints[5]; | |
532 | struct ifmap map; | |
533 | ||
534 | str = get_options(str, ARRAY_SIZE(ints), ints); | |
535 | if (!str || !*str) | |
536 | return 0; | |
537 | ||
538 | /* Save settings */ | |
539 | memset(&map, 0, sizeof(map)); | |
540 | if (ints[0] > 0) | |
541 | map.irq = ints[1]; | |
542 | if (ints[0] > 1) | |
543 | map.base_addr = ints[2]; | |
544 | if (ints[0] > 2) | |
545 | map.mem_start = ints[3]; | |
546 | if (ints[0] > 3) | |
547 | map.mem_end = ints[4]; | |
548 | ||
549 | /* Add new entry to the list */ | |
550 | return netdev_boot_setup_add(str, &map); | |
551 | } | |
552 | ||
553 | __setup("netdev=", netdev_boot_setup); | |
554 | ||
555 | /******************************************************************************* | |
556 | ||
557 | Device Interface Subroutines | |
558 | ||
559 | *******************************************************************************/ | |
560 | ||
561 | /** | |
562 | * __dev_get_by_name - find a device by its name | |
c4ea43c5 | 563 | * @net: the applicable net namespace |
1da177e4 LT |
564 | * @name: name to find |
565 | * | |
566 | * Find an interface by name. Must be called under RTNL semaphore | |
567 | * or @dev_base_lock. If the name is found a pointer to the device | |
568 | * is returned. If the name is not found then %NULL is returned. The | |
569 | * reference counters are not incremented so the caller must be | |
570 | * careful with locks. | |
571 | */ | |
572 | ||
881d966b | 573 | struct net_device *__dev_get_by_name(struct net *net, const char *name) |
1da177e4 LT |
574 | { |
575 | struct hlist_node *p; | |
576 | ||
881d966b | 577 | hlist_for_each(p, dev_name_hash(net, name)) { |
1da177e4 LT |
578 | struct net_device *dev |
579 | = hlist_entry(p, struct net_device, name_hlist); | |
580 | if (!strncmp(dev->name, name, IFNAMSIZ)) | |
581 | return dev; | |
582 | } | |
583 | return NULL; | |
584 | } | |
585 | ||
586 | /** | |
587 | * dev_get_by_name - find a device by its name | |
c4ea43c5 | 588 | * @net: the applicable net namespace |
1da177e4 LT |
589 | * @name: name to find |
590 | * | |
591 | * Find an interface by name. This can be called from any | |
592 | * context and does its own locking. The returned handle has | |
593 | * the usage count incremented and the caller must use dev_put() to | |
594 | * release it when it is no longer needed. %NULL is returned if no | |
595 | * matching device is found. | |
596 | */ | |
597 | ||
881d966b | 598 | struct net_device *dev_get_by_name(struct net *net, const char *name) |
1da177e4 LT |
599 | { |
600 | struct net_device *dev; | |
601 | ||
602 | read_lock(&dev_base_lock); | |
881d966b | 603 | dev = __dev_get_by_name(net, name); |
1da177e4 LT |
604 | if (dev) |
605 | dev_hold(dev); | |
606 | read_unlock(&dev_base_lock); | |
607 | return dev; | |
608 | } | |
609 | ||
610 | /** | |
611 | * __dev_get_by_index - find a device by its ifindex | |
c4ea43c5 | 612 | * @net: the applicable net namespace |
1da177e4 LT |
613 | * @ifindex: index of device |
614 | * | |
615 | * Search for an interface by index. Returns %NULL if the device | |
616 | * is not found or a pointer to the device. The device has not | |
617 | * had its reference counter increased so the caller must be careful | |
618 | * about locking. The caller must hold either the RTNL semaphore | |
619 | * or @dev_base_lock. | |
620 | */ | |
621 | ||
881d966b | 622 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) |
1da177e4 LT |
623 | { |
624 | struct hlist_node *p; | |
625 | ||
881d966b | 626 | hlist_for_each(p, dev_index_hash(net, ifindex)) { |
1da177e4 LT |
627 | struct net_device *dev |
628 | = hlist_entry(p, struct net_device, index_hlist); | |
629 | if (dev->ifindex == ifindex) | |
630 | return dev; | |
631 | } | |
632 | return NULL; | |
633 | } | |
634 | ||
635 | ||
636 | /** | |
637 | * dev_get_by_index - find a device by its ifindex | |
c4ea43c5 | 638 | * @net: the applicable net namespace |
1da177e4 LT |
639 | * @ifindex: index of device |
640 | * | |
641 | * Search for an interface by index. Returns NULL if the device | |
642 | * is not found or a pointer to the device. The device returned has | |
643 | * had a reference added and the pointer is safe until the user calls | |
644 | * dev_put to indicate they have finished with it. | |
645 | */ | |
646 | ||
881d966b | 647 | struct net_device *dev_get_by_index(struct net *net, int ifindex) |
1da177e4 LT |
648 | { |
649 | struct net_device *dev; | |
650 | ||
651 | read_lock(&dev_base_lock); | |
881d966b | 652 | dev = __dev_get_by_index(net, ifindex); |
1da177e4 LT |
653 | if (dev) |
654 | dev_hold(dev); | |
655 | read_unlock(&dev_base_lock); | |
656 | return dev; | |
657 | } | |
658 | ||
659 | /** | |
660 | * dev_getbyhwaddr - find a device by its hardware address | |
c4ea43c5 | 661 | * @net: the applicable net namespace |
1da177e4 LT |
662 | * @type: media type of device |
663 | * @ha: hardware address | |
664 | * | |
665 | * Search for an interface by MAC address. Returns NULL if the device | |
666 | * is not found or a pointer to the device. The caller must hold the | |
667 | * rtnl semaphore. The returned device has not had its ref count increased | |
668 | * and the caller must therefore be careful about locking | |
669 | * | |
670 | * BUGS: | |
671 | * If the API was consistent this would be __dev_get_by_hwaddr | |
672 | */ | |
673 | ||
881d966b | 674 | struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha) |
1da177e4 LT |
675 | { |
676 | struct net_device *dev; | |
677 | ||
678 | ASSERT_RTNL(); | |
679 | ||
81103a52 | 680 | for_each_netdev(net, dev) |
1da177e4 LT |
681 | if (dev->type == type && |
682 | !memcmp(dev->dev_addr, ha, dev->addr_len)) | |
7562f876 PE |
683 | return dev; |
684 | ||
685 | return NULL; | |
1da177e4 LT |
686 | } |
687 | ||
cf309e3f JF |
688 | EXPORT_SYMBOL(dev_getbyhwaddr); |
689 | ||
881d966b | 690 | struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) |
1da177e4 LT |
691 | { |
692 | struct net_device *dev; | |
693 | ||
4e9cac2b | 694 | ASSERT_RTNL(); |
881d966b | 695 | for_each_netdev(net, dev) |
4e9cac2b | 696 | if (dev->type == type) |
7562f876 PE |
697 | return dev; |
698 | ||
699 | return NULL; | |
4e9cac2b PM |
700 | } |
701 | ||
702 | EXPORT_SYMBOL(__dev_getfirstbyhwtype); | |
703 | ||
881d966b | 704 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) |
4e9cac2b PM |
705 | { |
706 | struct net_device *dev; | |
707 | ||
708 | rtnl_lock(); | |
881d966b | 709 | dev = __dev_getfirstbyhwtype(net, type); |
4e9cac2b PM |
710 | if (dev) |
711 | dev_hold(dev); | |
1da177e4 LT |
712 | rtnl_unlock(); |
713 | return dev; | |
714 | } | |
715 | ||
716 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | |
717 | ||
718 | /** | |
719 | * dev_get_by_flags - find any device with given flags | |
c4ea43c5 | 720 | * @net: the applicable net namespace |
1da177e4 LT |
721 | * @if_flags: IFF_* values |
722 | * @mask: bitmask of bits in if_flags to check | |
723 | * | |
724 | * Search for any interface with the given flags. Returns NULL if a device | |
4ec93edb | 725 | * is not found or a pointer to the device. The device returned has |
1da177e4 LT |
726 | * had a reference added and the pointer is safe until the user calls |
727 | * dev_put to indicate they have finished with it. | |
728 | */ | |
729 | ||
881d966b | 730 | struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask) |
1da177e4 | 731 | { |
7562f876 | 732 | struct net_device *dev, *ret; |
1da177e4 | 733 | |
7562f876 | 734 | ret = NULL; |
1da177e4 | 735 | read_lock(&dev_base_lock); |
881d966b | 736 | for_each_netdev(net, dev) { |
1da177e4 LT |
737 | if (((dev->flags ^ if_flags) & mask) == 0) { |
738 | dev_hold(dev); | |
7562f876 | 739 | ret = dev; |
1da177e4 LT |
740 | break; |
741 | } | |
742 | } | |
743 | read_unlock(&dev_base_lock); | |
7562f876 | 744 | return ret; |
1da177e4 LT |
745 | } |
746 | ||
747 | /** | |
748 | * dev_valid_name - check if name is okay for network device | |
749 | * @name: name string | |
750 | * | |
751 | * Network device names need to be valid file names to | |
c7fa9d18 DM |
752 | * to allow sysfs to work. We also disallow any kind of |
753 | * whitespace. | |
1da177e4 | 754 | */ |
c2373ee9 | 755 | int dev_valid_name(const char *name) |
1da177e4 | 756 | { |
c7fa9d18 DM |
757 | if (*name == '\0') |
758 | return 0; | |
b6fe17d6 SH |
759 | if (strlen(name) >= IFNAMSIZ) |
760 | return 0; | |
c7fa9d18 DM |
761 | if (!strcmp(name, ".") || !strcmp(name, "..")) |
762 | return 0; | |
763 | ||
764 | while (*name) { | |
765 | if (*name == '/' || isspace(*name)) | |
766 | return 0; | |
767 | name++; | |
768 | } | |
769 | return 1; | |
1da177e4 LT |
770 | } |
771 | ||
772 | /** | |
b267b179 EB |
773 | * __dev_alloc_name - allocate a name for a device |
774 | * @net: network namespace to allocate the device name in | |
1da177e4 | 775 | * @name: name format string |
b267b179 | 776 | * @buf: scratch buffer and result name string |
1da177e4 LT |
777 | * |
778 | * Passed a format string - eg "lt%d" it will try and find a suitable | |
3041a069 SH |
779 | * id. It scans list of devices to build up a free map, then chooses |
780 | * the first empty slot. The caller must hold the dev_base or rtnl lock | |
781 | * while allocating the name and adding the device in order to avoid | |
782 | * duplicates. | |
783 | * Limited to bits_per_byte * page size devices (ie 32K on most platforms). | |
784 | * Returns the number of the unit assigned or a negative errno code. | |
1da177e4 LT |
785 | */ |
786 | ||
b267b179 | 787 | static int __dev_alloc_name(struct net *net, const char *name, char *buf) |
1da177e4 LT |
788 | { |
789 | int i = 0; | |
1da177e4 LT |
790 | const char *p; |
791 | const int max_netdevices = 8*PAGE_SIZE; | |
cfcabdcc | 792 | unsigned long *inuse; |
1da177e4 LT |
793 | struct net_device *d; |
794 | ||
795 | p = strnchr(name, IFNAMSIZ-1, '%'); | |
796 | if (p) { | |
797 | /* | |
798 | * Verify the string as this thing may have come from | |
799 | * the user. There must be either one "%d" and no other "%" | |
800 | * characters. | |
801 | */ | |
802 | if (p[1] != 'd' || strchr(p + 2, '%')) | |
803 | return -EINVAL; | |
804 | ||
805 | /* Use one page as a bit array of possible slots */ | |
cfcabdcc | 806 | inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); |
1da177e4 LT |
807 | if (!inuse) |
808 | return -ENOMEM; | |
809 | ||
881d966b | 810 | for_each_netdev(net, d) { |
1da177e4 LT |
811 | if (!sscanf(d->name, name, &i)) |
812 | continue; | |
813 | if (i < 0 || i >= max_netdevices) | |
814 | continue; | |
815 | ||
816 | /* avoid cases where sscanf is not exact inverse of printf */ | |
b267b179 | 817 | snprintf(buf, IFNAMSIZ, name, i); |
1da177e4 LT |
818 | if (!strncmp(buf, d->name, IFNAMSIZ)) |
819 | set_bit(i, inuse); | |
820 | } | |
821 | ||
822 | i = find_first_zero_bit(inuse, max_netdevices); | |
823 | free_page((unsigned long) inuse); | |
824 | } | |
825 | ||
b267b179 EB |
826 | snprintf(buf, IFNAMSIZ, name, i); |
827 | if (!__dev_get_by_name(net, buf)) | |
1da177e4 | 828 | return i; |
1da177e4 LT |
829 | |
830 | /* It is possible to run out of possible slots | |
831 | * when the name is long and there isn't enough space left | |
832 | * for the digits, or if all bits are used. | |
833 | */ | |
834 | return -ENFILE; | |
835 | } | |
836 | ||
b267b179 EB |
837 | /** |
838 | * dev_alloc_name - allocate a name for a device | |
839 | * @dev: device | |
840 | * @name: name format string | |
841 | * | |
842 | * Passed a format string - eg "lt%d" it will try and find a suitable | |
843 | * id. It scans list of devices to build up a free map, then chooses | |
844 | * the first empty slot. The caller must hold the dev_base or rtnl lock | |
845 | * while allocating the name and adding the device in order to avoid | |
846 | * duplicates. | |
847 | * Limited to bits_per_byte * page size devices (ie 32K on most platforms). | |
848 | * Returns the number of the unit assigned or a negative errno code. | |
849 | */ | |
850 | ||
851 | int dev_alloc_name(struct net_device *dev, const char *name) | |
852 | { | |
853 | char buf[IFNAMSIZ]; | |
854 | struct net *net; | |
855 | int ret; | |
856 | ||
c346dca1 YH |
857 | BUG_ON(!dev_net(dev)); |
858 | net = dev_net(dev); | |
b267b179 EB |
859 | ret = __dev_alloc_name(net, name, buf); |
860 | if (ret >= 0) | |
861 | strlcpy(dev->name, buf, IFNAMSIZ); | |
862 | return ret; | |
863 | } | |
864 | ||
1da177e4 LT |
865 | |
866 | /** | |
867 | * dev_change_name - change name of a device | |
868 | * @dev: device | |
869 | * @newname: name (or format string) must be at least IFNAMSIZ | |
870 | * | |
871 | * Change name of a device, can pass format strings "eth%d". | |
872 | * for wildcarding. | |
873 | */ | |
874 | int dev_change_name(struct net_device *dev, char *newname) | |
875 | { | |
fcc5a03a | 876 | char oldname[IFNAMSIZ]; |
1da177e4 | 877 | int err = 0; |
fcc5a03a | 878 | int ret; |
881d966b | 879 | struct net *net; |
1da177e4 LT |
880 | |
881 | ASSERT_RTNL(); | |
c346dca1 | 882 | BUG_ON(!dev_net(dev)); |
1da177e4 | 883 | |
c346dca1 | 884 | net = dev_net(dev); |
1da177e4 LT |
885 | if (dev->flags & IFF_UP) |
886 | return -EBUSY; | |
887 | ||
888 | if (!dev_valid_name(newname)) | |
889 | return -EINVAL; | |
890 | ||
c8d90dca SH |
891 | if (strncmp(newname, dev->name, IFNAMSIZ) == 0) |
892 | return 0; | |
893 | ||
fcc5a03a HX |
894 | memcpy(oldname, dev->name, IFNAMSIZ); |
895 | ||
1da177e4 LT |
896 | if (strchr(newname, '%')) { |
897 | err = dev_alloc_name(dev, newname); | |
898 | if (err < 0) | |
899 | return err; | |
900 | strcpy(newname, dev->name); | |
901 | } | |
881d966b | 902 | else if (__dev_get_by_name(net, newname)) |
1da177e4 LT |
903 | return -EEXIST; |
904 | else | |
905 | strlcpy(dev->name, newname, IFNAMSIZ); | |
906 | ||
fcc5a03a | 907 | rollback: |
dcc99773 SH |
908 | err = device_rename(&dev->dev, dev->name); |
909 | if (err) { | |
910 | memcpy(dev->name, oldname, IFNAMSIZ); | |
911 | return err; | |
912 | } | |
7f988eab HX |
913 | |
914 | write_lock_bh(&dev_base_lock); | |
92749821 | 915 | hlist_del(&dev->name_hlist); |
881d966b | 916 | hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); |
7f988eab HX |
917 | write_unlock_bh(&dev_base_lock); |
918 | ||
056925ab | 919 | ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); |
fcc5a03a HX |
920 | ret = notifier_to_errno(ret); |
921 | ||
922 | if (ret) { | |
923 | if (err) { | |
924 | printk(KERN_ERR | |
925 | "%s: name change rollback failed: %d.\n", | |
926 | dev->name, ret); | |
927 | } else { | |
928 | err = ret; | |
929 | memcpy(dev->name, oldname, IFNAMSIZ); | |
930 | goto rollback; | |
931 | } | |
932 | } | |
1da177e4 LT |
933 | |
934 | return err; | |
935 | } | |
936 | ||
d8a33ac4 | 937 | /** |
3041a069 | 938 | * netdev_features_change - device changes features |
d8a33ac4 SH |
939 | * @dev: device to cause notification |
940 | * | |
941 | * Called to indicate a device has changed features. | |
942 | */ | |
943 | void netdev_features_change(struct net_device *dev) | |
944 | { | |
056925ab | 945 | call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); |
d8a33ac4 SH |
946 | } |
947 | EXPORT_SYMBOL(netdev_features_change); | |
948 | ||
1da177e4 LT |
949 | /** |
950 | * netdev_state_change - device changes state | |
951 | * @dev: device to cause notification | |
952 | * | |
953 | * Called to indicate a device has changed state. This function calls | |
954 | * the notifier chains for netdev_chain and sends a NEWLINK message | |
955 | * to the routing socket. | |
956 | */ | |
957 | void netdev_state_change(struct net_device *dev) | |
958 | { | |
959 | if (dev->flags & IFF_UP) { | |
056925ab | 960 | call_netdevice_notifiers(NETDEV_CHANGE, dev); |
1da177e4 LT |
961 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); |
962 | } | |
963 | } | |
964 | ||
c1da4ac7 OG |
965 | void netdev_bonding_change(struct net_device *dev) |
966 | { | |
967 | call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev); | |
968 | } | |
969 | EXPORT_SYMBOL(netdev_bonding_change); | |
970 | ||
1da177e4 LT |
971 | /** |
972 | * dev_load - load a network module | |
c4ea43c5 | 973 | * @net: the applicable net namespace |
1da177e4 LT |
974 | * @name: name of interface |
975 | * | |
976 | * If a network interface is not present and the process has suitable | |
977 | * privileges this function loads the module. If module loading is not | |
978 | * available in this kernel then it becomes a nop. | |
979 | */ | |
980 | ||
881d966b | 981 | void dev_load(struct net *net, const char *name) |
1da177e4 | 982 | { |
4ec93edb | 983 | struct net_device *dev; |
1da177e4 LT |
984 | |
985 | read_lock(&dev_base_lock); | |
881d966b | 986 | dev = __dev_get_by_name(net, name); |
1da177e4 LT |
987 | read_unlock(&dev_base_lock); |
988 | ||
989 | if (!dev && capable(CAP_SYS_MODULE)) | |
990 | request_module("%s", name); | |
991 | } | |
992 | ||
1da177e4 LT |
993 | /** |
994 | * dev_open - prepare an interface for use. | |
995 | * @dev: device to open | |
996 | * | |
997 | * Takes a device from down to up state. The device's private open | |
998 | * function is invoked and then the multicast lists are loaded. Finally | |
999 | * the device is moved into the up state and a %NETDEV_UP message is | |
1000 | * sent to the netdev notifier chain. | |
1001 | * | |
1002 | * Calling this function on an active interface is a nop. On a failure | |
1003 | * a negative errno code is returned. | |
1004 | */ | |
1005 | int dev_open(struct net_device *dev) | |
1006 | { | |
1007 | int ret = 0; | |
1008 | ||
e46b66bc BH |
1009 | ASSERT_RTNL(); |
1010 | ||
1da177e4 LT |
1011 | /* |
1012 | * Is it already up? | |
1013 | */ | |
1014 | ||
1015 | if (dev->flags & IFF_UP) | |
1016 | return 0; | |
1017 | ||
1018 | /* | |
1019 | * Is it even present? | |
1020 | */ | |
1021 | if (!netif_device_present(dev)) | |
1022 | return -ENODEV; | |
1023 | ||
1024 | /* | |
1025 | * Call device private open method | |
1026 | */ | |
1027 | set_bit(__LINK_STATE_START, &dev->state); | |
bada339b JG |
1028 | |
1029 | if (dev->validate_addr) | |
1030 | ret = dev->validate_addr(dev); | |
1031 | ||
1032 | if (!ret && dev->open) | |
1da177e4 | 1033 | ret = dev->open(dev); |
1da177e4 | 1034 | |
4ec93edb | 1035 | /* |
1da177e4 LT |
1036 | * If it went open OK then: |
1037 | */ | |
1038 | ||
bada339b JG |
1039 | if (ret) |
1040 | clear_bit(__LINK_STATE_START, &dev->state); | |
1041 | else { | |
1da177e4 LT |
1042 | /* |
1043 | * Set the flags. | |
1044 | */ | |
1045 | dev->flags |= IFF_UP; | |
1046 | ||
1047 | /* | |
1048 | * Initialize multicasting status | |
1049 | */ | |
4417da66 | 1050 | dev_set_rx_mode(dev); |
1da177e4 LT |
1051 | |
1052 | /* | |
1053 | * Wakeup transmit queue engine | |
1054 | */ | |
1055 | dev_activate(dev); | |
1056 | ||
1057 | /* | |
1058 | * ... and announce new interface. | |
1059 | */ | |
056925ab | 1060 | call_netdevice_notifiers(NETDEV_UP, dev); |
1da177e4 | 1061 | } |
bada339b | 1062 | |
1da177e4 LT |
1063 | return ret; |
1064 | } | |
1065 | ||
1066 | /** | |
1067 | * dev_close - shutdown an interface. | |
1068 | * @dev: device to shutdown | |
1069 | * | |
1070 | * This function moves an active device into down state. A | |
1071 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | |
1072 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | |
1073 | * chain. | |
1074 | */ | |
1075 | int dev_close(struct net_device *dev) | |
1076 | { | |
e46b66bc BH |
1077 | ASSERT_RTNL(); |
1078 | ||
9d5010db DM |
1079 | might_sleep(); |
1080 | ||
1da177e4 LT |
1081 | if (!(dev->flags & IFF_UP)) |
1082 | return 0; | |
1083 | ||
1084 | /* | |
1085 | * Tell people we are going down, so that they can | |
1086 | * prepare to death, when device is still operating. | |
1087 | */ | |
056925ab | 1088 | call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); |
1da177e4 | 1089 | |
1da177e4 LT |
1090 | clear_bit(__LINK_STATE_START, &dev->state); |
1091 | ||
1092 | /* Synchronize to scheduled poll. We cannot touch poll list, | |
bea3348e SH |
1093 | * it can be even on different cpu. So just clear netif_running(). |
1094 | * | |
1095 | * dev->stop() will invoke napi_disable() on all of it's | |
1096 | * napi_struct instances on this device. | |
1097 | */ | |
1da177e4 | 1098 | smp_mb__after_clear_bit(); /* Commit netif_running(). */ |
1da177e4 | 1099 | |
d8b2a4d2 ML |
1100 | dev_deactivate(dev); |
1101 | ||
1da177e4 LT |
1102 | /* |
1103 | * Call the device specific close. This cannot fail. | |
1104 | * Only if device is UP | |
1105 | * | |
1106 | * We allow it to be called even after a DETACH hot-plug | |
1107 | * event. | |
1108 | */ | |
1109 | if (dev->stop) | |
1110 | dev->stop(dev); | |
1111 | ||
1112 | /* | |
1113 | * Device is now down. | |
1114 | */ | |
1115 | ||
1116 | dev->flags &= ~IFF_UP; | |
1117 | ||
1118 | /* | |
1119 | * Tell people we are down | |
1120 | */ | |
056925ab | 1121 | call_netdevice_notifiers(NETDEV_DOWN, dev); |
1da177e4 LT |
1122 | |
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | ||
0187bdfb BH |
1127 | /** |
1128 | * dev_disable_lro - disable Large Receive Offload on a device | |
1129 | * @dev: device | |
1130 | * | |
1131 | * Disable Large Receive Offload (LRO) on a net device. Must be | |
1132 | * called under RTNL. This is needed if received packets may be | |
1133 | * forwarded to another interface. | |
1134 | */ | |
1135 | void dev_disable_lro(struct net_device *dev) | |
1136 | { | |
1137 | if (dev->ethtool_ops && dev->ethtool_ops->get_flags && | |
1138 | dev->ethtool_ops->set_flags) { | |
1139 | u32 flags = dev->ethtool_ops->get_flags(dev); | |
1140 | if (flags & ETH_FLAG_LRO) { | |
1141 | flags &= ~ETH_FLAG_LRO; | |
1142 | dev->ethtool_ops->set_flags(dev, flags); | |
1143 | } | |
1144 | } | |
1145 | WARN_ON(dev->features & NETIF_F_LRO); | |
1146 | } | |
1147 | EXPORT_SYMBOL(dev_disable_lro); | |
1148 | ||
1149 | ||
881d966b EB |
1150 | static int dev_boot_phase = 1; |
1151 | ||
1da177e4 LT |
1152 | /* |
1153 | * Device change register/unregister. These are not inline or static | |
1154 | * as we export them to the world. | |
1155 | */ | |
1156 | ||
1157 | /** | |
1158 | * register_netdevice_notifier - register a network notifier block | |
1159 | * @nb: notifier | |
1160 | * | |
1161 | * Register a notifier to be called when network device events occur. | |
1162 | * The notifier passed is linked into the kernel structures and must | |
1163 | * not be reused until it has been unregistered. A negative errno code | |
1164 | * is returned on a failure. | |
1165 | * | |
1166 | * When registered all registration and up events are replayed | |
4ec93edb | 1167 | * to the new notifier to allow device to have a race free |
1da177e4 LT |
1168 | * view of the network device list. |
1169 | */ | |
1170 | ||
1171 | int register_netdevice_notifier(struct notifier_block *nb) | |
1172 | { | |
1173 | struct net_device *dev; | |
fcc5a03a | 1174 | struct net_device *last; |
881d966b | 1175 | struct net *net; |
1da177e4 LT |
1176 | int err; |
1177 | ||
1178 | rtnl_lock(); | |
f07d5b94 | 1179 | err = raw_notifier_chain_register(&netdev_chain, nb); |
fcc5a03a HX |
1180 | if (err) |
1181 | goto unlock; | |
881d966b EB |
1182 | if (dev_boot_phase) |
1183 | goto unlock; | |
1184 | for_each_net(net) { | |
1185 | for_each_netdev(net, dev) { | |
1186 | err = nb->notifier_call(nb, NETDEV_REGISTER, dev); | |
1187 | err = notifier_to_errno(err); | |
1188 | if (err) | |
1189 | goto rollback; | |
1190 | ||
1191 | if (!(dev->flags & IFF_UP)) | |
1192 | continue; | |
1da177e4 | 1193 | |
881d966b EB |
1194 | nb->notifier_call(nb, NETDEV_UP, dev); |
1195 | } | |
1da177e4 | 1196 | } |
fcc5a03a HX |
1197 | |
1198 | unlock: | |
1da177e4 LT |
1199 | rtnl_unlock(); |
1200 | return err; | |
fcc5a03a HX |
1201 | |
1202 | rollback: | |
1203 | last = dev; | |
881d966b EB |
1204 | for_each_net(net) { |
1205 | for_each_netdev(net, dev) { | |
1206 | if (dev == last) | |
1207 | break; | |
fcc5a03a | 1208 | |
881d966b EB |
1209 | if (dev->flags & IFF_UP) { |
1210 | nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); | |
1211 | nb->notifier_call(nb, NETDEV_DOWN, dev); | |
1212 | } | |
1213 | nb->notifier_call(nb, NETDEV_UNREGISTER, dev); | |
fcc5a03a | 1214 | } |
fcc5a03a | 1215 | } |
c67625a1 PE |
1216 | |
1217 | raw_notifier_chain_unregister(&netdev_chain, nb); | |
fcc5a03a | 1218 | goto unlock; |
1da177e4 LT |
1219 | } |
1220 | ||
1221 | /** | |
1222 | * unregister_netdevice_notifier - unregister a network notifier block | |
1223 | * @nb: notifier | |
1224 | * | |
1225 | * Unregister a notifier previously registered by | |
1226 | * register_netdevice_notifier(). The notifier is unlinked into the | |
1227 | * kernel structures and may then be reused. A negative errno code | |
1228 | * is returned on a failure. | |
1229 | */ | |
1230 | ||
1231 | int unregister_netdevice_notifier(struct notifier_block *nb) | |
1232 | { | |
9f514950 HX |
1233 | int err; |
1234 | ||
1235 | rtnl_lock(); | |
f07d5b94 | 1236 | err = raw_notifier_chain_unregister(&netdev_chain, nb); |
9f514950 HX |
1237 | rtnl_unlock(); |
1238 | return err; | |
1da177e4 LT |
1239 | } |
1240 | ||
1241 | /** | |
1242 | * call_netdevice_notifiers - call all network notifier blocks | |
1243 | * @val: value passed unmodified to notifier function | |
c4ea43c5 | 1244 | * @dev: net_device pointer passed unmodified to notifier function |
1da177e4 LT |
1245 | * |
1246 | * Call all network notifier blocks. Parameters and return value | |
f07d5b94 | 1247 | * are as for raw_notifier_call_chain(). |
1da177e4 LT |
1248 | */ |
1249 | ||
ad7379d4 | 1250 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev) |
1da177e4 | 1251 | { |
ad7379d4 | 1252 | return raw_notifier_call_chain(&netdev_chain, val, dev); |
1da177e4 LT |
1253 | } |
1254 | ||
1255 | /* When > 0 there are consumers of rx skb time stamps */ | |
1256 | static atomic_t netstamp_needed = ATOMIC_INIT(0); | |
1257 | ||
1258 | void net_enable_timestamp(void) | |
1259 | { | |
1260 | atomic_inc(&netstamp_needed); | |
1261 | } | |
1262 | ||
1263 | void net_disable_timestamp(void) | |
1264 | { | |
1265 | atomic_dec(&netstamp_needed); | |
1266 | } | |
1267 | ||
a61bbcf2 | 1268 | static inline void net_timestamp(struct sk_buff *skb) |
1da177e4 LT |
1269 | { |
1270 | if (atomic_read(&netstamp_needed)) | |
a61bbcf2 | 1271 | __net_timestamp(skb); |
b7aa0bf7 ED |
1272 | else |
1273 | skb->tstamp.tv64 = 0; | |
1da177e4 LT |
1274 | } |
1275 | ||
1276 | /* | |
1277 | * Support routine. Sends outgoing frames to any network | |
1278 | * taps currently in use. | |
1279 | */ | |
1280 | ||
f6a78bfc | 1281 | static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) |
1da177e4 LT |
1282 | { |
1283 | struct packet_type *ptype; | |
a61bbcf2 PM |
1284 | |
1285 | net_timestamp(skb); | |
1da177e4 LT |
1286 | |
1287 | rcu_read_lock(); | |
1288 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | |
1289 | /* Never send packets back to the socket | |
1290 | * they originated from - MvS (miquels@drinkel.ow.org) | |
1291 | */ | |
1292 | if ((ptype->dev == dev || !ptype->dev) && | |
1293 | (ptype->af_packet_priv == NULL || | |
1294 | (struct sock *)ptype->af_packet_priv != skb->sk)) { | |
1295 | struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC); | |
1296 | if (!skb2) | |
1297 | break; | |
1298 | ||
1299 | /* skb->nh should be correctly | |
1300 | set by sender, so that the second statement is | |
1301 | just protection against buggy protocols. | |
1302 | */ | |
459a98ed | 1303 | skb_reset_mac_header(skb2); |
1da177e4 | 1304 | |
d56f90a7 | 1305 | if (skb_network_header(skb2) < skb2->data || |
27a884dc | 1306 | skb2->network_header > skb2->tail) { |
1da177e4 LT |
1307 | if (net_ratelimit()) |
1308 | printk(KERN_CRIT "protocol %04x is " | |
1309 | "buggy, dev %s\n", | |
1310 | skb2->protocol, dev->name); | |
c1d2bbe1 | 1311 | skb_reset_network_header(skb2); |
1da177e4 LT |
1312 | } |
1313 | ||
b0e380b1 | 1314 | skb2->transport_header = skb2->network_header; |
1da177e4 | 1315 | skb2->pkt_type = PACKET_OUTGOING; |
f2ccd8fa | 1316 | ptype->func(skb2, skb->dev, ptype, skb->dev); |
1da177e4 LT |
1317 | } |
1318 | } | |
1319 | rcu_read_unlock(); | |
1320 | } | |
1321 | ||
56079431 | 1322 | |
86d804e1 | 1323 | void __netif_schedule(struct netdev_queue *txq) |
56079431 | 1324 | { |
86d804e1 DM |
1325 | struct net_device *dev = txq->dev; |
1326 | ||
56079431 | 1327 | if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { |
56079431 | 1328 | struct softnet_data *sd; |
86d804e1 | 1329 | unsigned long flags; |
56079431 DV |
1330 | |
1331 | local_irq_save(flags); | |
1332 | sd = &__get_cpu_var(softnet_data); | |
ee609cb3 DM |
1333 | txq->next_sched = sd->output_queue; |
1334 | sd->output_queue = txq; | |
56079431 DV |
1335 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
1336 | local_irq_restore(flags); | |
1337 | } | |
1338 | } | |
1339 | EXPORT_SYMBOL(__netif_schedule); | |
1340 | ||
bea3348e | 1341 | void dev_kfree_skb_irq(struct sk_buff *skb) |
56079431 | 1342 | { |
bea3348e SH |
1343 | if (atomic_dec_and_test(&skb->users)) { |
1344 | struct softnet_data *sd; | |
1345 | unsigned long flags; | |
56079431 | 1346 | |
bea3348e SH |
1347 | local_irq_save(flags); |
1348 | sd = &__get_cpu_var(softnet_data); | |
1349 | skb->next = sd->completion_queue; | |
1350 | sd->completion_queue = skb; | |
1351 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | |
1352 | local_irq_restore(flags); | |
1353 | } | |
56079431 | 1354 | } |
bea3348e | 1355 | EXPORT_SYMBOL(dev_kfree_skb_irq); |
56079431 DV |
1356 | |
1357 | void dev_kfree_skb_any(struct sk_buff *skb) | |
1358 | { | |
1359 | if (in_irq() || irqs_disabled()) | |
1360 | dev_kfree_skb_irq(skb); | |
1361 | else | |
1362 | dev_kfree_skb(skb); | |
1363 | } | |
1364 | EXPORT_SYMBOL(dev_kfree_skb_any); | |
1365 | ||
1366 | ||
bea3348e SH |
1367 | /** |
1368 | * netif_device_detach - mark device as removed | |
1369 | * @dev: network device | |
1370 | * | |
1371 | * Mark device as removed from system and therefore no longer available. | |
1372 | */ | |
56079431 DV |
1373 | void netif_device_detach(struct net_device *dev) |
1374 | { | |
1375 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && | |
1376 | netif_running(dev)) { | |
1377 | netif_stop_queue(dev); | |
1378 | } | |
1379 | } | |
1380 | EXPORT_SYMBOL(netif_device_detach); | |
1381 | ||
bea3348e SH |
1382 | /** |
1383 | * netif_device_attach - mark device as attached | |
1384 | * @dev: network device | |
1385 | * | |
1386 | * Mark device as attached from system and restart if needed. | |
1387 | */ | |
56079431 DV |
1388 | void netif_device_attach(struct net_device *dev) |
1389 | { | |
1390 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | |
1391 | netif_running(dev)) { | |
1392 | netif_wake_queue(dev); | |
4ec93edb | 1393 | __netdev_watchdog_up(dev); |
56079431 DV |
1394 | } |
1395 | } | |
1396 | EXPORT_SYMBOL(netif_device_attach); | |
1397 | ||
6de329e2 BH |
1398 | static bool can_checksum_protocol(unsigned long features, __be16 protocol) |
1399 | { | |
1400 | return ((features & NETIF_F_GEN_CSUM) || | |
1401 | ((features & NETIF_F_IP_CSUM) && | |
1402 | protocol == htons(ETH_P_IP)) || | |
1403 | ((features & NETIF_F_IPV6_CSUM) && | |
1404 | protocol == htons(ETH_P_IPV6))); | |
1405 | } | |
1406 | ||
1407 | static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) | |
1408 | { | |
1409 | if (can_checksum_protocol(dev->features, skb->protocol)) | |
1410 | return true; | |
1411 | ||
1412 | if (skb->protocol == htons(ETH_P_8021Q)) { | |
1413 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | |
1414 | if (can_checksum_protocol(dev->features & dev->vlan_features, | |
1415 | veh->h_vlan_encapsulated_proto)) | |
1416 | return true; | |
1417 | } | |
1418 | ||
1419 | return false; | |
1420 | } | |
56079431 | 1421 | |
1da177e4 LT |
1422 | /* |
1423 | * Invalidate hardware checksum when packet is to be mangled, and | |
1424 | * complete checksum manually on outgoing path. | |
1425 | */ | |
84fa7933 | 1426 | int skb_checksum_help(struct sk_buff *skb) |
1da177e4 | 1427 | { |
d3bc23e7 | 1428 | __wsum csum; |
663ead3b | 1429 | int ret = 0, offset; |
1da177e4 | 1430 | |
84fa7933 | 1431 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
a430a43d HX |
1432 | goto out_set_summed; |
1433 | ||
1434 | if (unlikely(skb_shinfo(skb)->gso_size)) { | |
a430a43d HX |
1435 | /* Let GSO fix up the checksum. */ |
1436 | goto out_set_summed; | |
1da177e4 LT |
1437 | } |
1438 | ||
a030847e HX |
1439 | offset = skb->csum_start - skb_headroom(skb); |
1440 | BUG_ON(offset >= skb_headlen(skb)); | |
1441 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | |
1442 | ||
1443 | offset += skb->csum_offset; | |
1444 | BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); | |
1445 | ||
1446 | if (skb_cloned(skb) && | |
1447 | !skb_clone_writable(skb, offset + sizeof(__sum16))) { | |
1da177e4 LT |
1448 | ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
1449 | if (ret) | |
1450 | goto out; | |
1451 | } | |
1452 | ||
a030847e | 1453 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); |
a430a43d | 1454 | out_set_summed: |
1da177e4 | 1455 | skb->ip_summed = CHECKSUM_NONE; |
4ec93edb | 1456 | out: |
1da177e4 LT |
1457 | return ret; |
1458 | } | |
1459 | ||
f6a78bfc HX |
1460 | /** |
1461 | * skb_gso_segment - Perform segmentation on skb. | |
1462 | * @skb: buffer to segment | |
576a30eb | 1463 | * @features: features for the output path (see dev->features) |
f6a78bfc HX |
1464 | * |
1465 | * This function segments the given skb and returns a list of segments. | |
576a30eb HX |
1466 | * |
1467 | * It may return NULL if the skb requires no segmentation. This is | |
1468 | * only possible when GSO is used for verifying header integrity. | |
f6a78bfc | 1469 | */ |
576a30eb | 1470 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) |
f6a78bfc HX |
1471 | { |
1472 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | |
1473 | struct packet_type *ptype; | |
252e3346 | 1474 | __be16 type = skb->protocol; |
a430a43d | 1475 | int err; |
f6a78bfc HX |
1476 | |
1477 | BUG_ON(skb_shinfo(skb)->frag_list); | |
f6a78bfc | 1478 | |
459a98ed | 1479 | skb_reset_mac_header(skb); |
b0e380b1 | 1480 | skb->mac_len = skb->network_header - skb->mac_header; |
f6a78bfc HX |
1481 | __skb_pull(skb, skb->mac_len); |
1482 | ||
f9d106a6 | 1483 | if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) { |
a430a43d HX |
1484 | if (skb_header_cloned(skb) && |
1485 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | |
1486 | return ERR_PTR(err); | |
1487 | } | |
1488 | ||
f6a78bfc | 1489 | rcu_read_lock(); |
82d8a867 PE |
1490 | list_for_each_entry_rcu(ptype, |
1491 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | |
f6a78bfc | 1492 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { |
84fa7933 | 1493 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
a430a43d HX |
1494 | err = ptype->gso_send_check(skb); |
1495 | segs = ERR_PTR(err); | |
1496 | if (err || skb_gso_ok(skb, features)) | |
1497 | break; | |
d56f90a7 ACM |
1498 | __skb_push(skb, (skb->data - |
1499 | skb_network_header(skb))); | |
a430a43d | 1500 | } |
576a30eb | 1501 | segs = ptype->gso_segment(skb, features); |
f6a78bfc HX |
1502 | break; |
1503 | } | |
1504 | } | |
1505 | rcu_read_unlock(); | |
1506 | ||
98e399f8 | 1507 | __skb_push(skb, skb->data - skb_mac_header(skb)); |
576a30eb | 1508 | |
f6a78bfc HX |
1509 | return segs; |
1510 | } | |
1511 | ||
1512 | EXPORT_SYMBOL(skb_gso_segment); | |
1513 | ||
fb286bb2 HX |
1514 | /* Take action when hardware reception checksum errors are detected. */ |
1515 | #ifdef CONFIG_BUG | |
1516 | void netdev_rx_csum_fault(struct net_device *dev) | |
1517 | { | |
1518 | if (net_ratelimit()) { | |
4ec93edb | 1519 | printk(KERN_ERR "%s: hw csum failure.\n", |
246a4212 | 1520 | dev ? dev->name : "<unknown>"); |
fb286bb2 HX |
1521 | dump_stack(); |
1522 | } | |
1523 | } | |
1524 | EXPORT_SYMBOL(netdev_rx_csum_fault); | |
1525 | #endif | |
1526 | ||
1da177e4 LT |
1527 | /* Actually, we should eliminate this check as soon as we know, that: |
1528 | * 1. IOMMU is present and allows to map all the memory. | |
1529 | * 2. No high memory really exists on this machine. | |
1530 | */ | |
1531 | ||
1532 | static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | |
1533 | { | |
3d3a8533 | 1534 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
1535 | int i; |
1536 | ||
1537 | if (dev->features & NETIF_F_HIGHDMA) | |
1538 | return 0; | |
1539 | ||
1540 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
1541 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | |
1542 | return 1; | |
1543 | ||
3d3a8533 | 1544 | #endif |
1da177e4 LT |
1545 | return 0; |
1546 | } | |
1da177e4 | 1547 | |
f6a78bfc HX |
1548 | struct dev_gso_cb { |
1549 | void (*destructor)(struct sk_buff *skb); | |
1550 | }; | |
1551 | ||
1552 | #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) | |
1553 | ||
1554 | static void dev_gso_skb_destructor(struct sk_buff *skb) | |
1555 | { | |
1556 | struct dev_gso_cb *cb; | |
1557 | ||
1558 | do { | |
1559 | struct sk_buff *nskb = skb->next; | |
1560 | ||
1561 | skb->next = nskb->next; | |
1562 | nskb->next = NULL; | |
1563 | kfree_skb(nskb); | |
1564 | } while (skb->next); | |
1565 | ||
1566 | cb = DEV_GSO_CB(skb); | |
1567 | if (cb->destructor) | |
1568 | cb->destructor(skb); | |
1569 | } | |
1570 | ||
1571 | /** | |
1572 | * dev_gso_segment - Perform emulated hardware segmentation on skb. | |
1573 | * @skb: buffer to segment | |
1574 | * | |
1575 | * This function segments the given skb and stores the list of segments | |
1576 | * in skb->next. | |
1577 | */ | |
1578 | static int dev_gso_segment(struct sk_buff *skb) | |
1579 | { | |
1580 | struct net_device *dev = skb->dev; | |
1581 | struct sk_buff *segs; | |
576a30eb HX |
1582 | int features = dev->features & ~(illegal_highdma(dev, skb) ? |
1583 | NETIF_F_SG : 0); | |
1584 | ||
1585 | segs = skb_gso_segment(skb, features); | |
1586 | ||
1587 | /* Verifying header integrity only. */ | |
1588 | if (!segs) | |
1589 | return 0; | |
f6a78bfc | 1590 | |
801678c5 | 1591 | if (IS_ERR(segs)) |
f6a78bfc HX |
1592 | return PTR_ERR(segs); |
1593 | ||
1594 | skb->next = segs; | |
1595 | DEV_GSO_CB(skb)->destructor = skb->destructor; | |
1596 | skb->destructor = dev_gso_skb_destructor; | |
1597 | ||
1598 | return 0; | |
1599 | } | |
1600 | ||
1601 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1602 | { | |
1603 | if (likely(!skb->next)) { | |
9be9a6b9 | 1604 | if (!list_empty(&ptype_all)) |
f6a78bfc HX |
1605 | dev_queue_xmit_nit(skb, dev); |
1606 | ||
576a30eb HX |
1607 | if (netif_needs_gso(dev, skb)) { |
1608 | if (unlikely(dev_gso_segment(skb))) | |
1609 | goto out_kfree_skb; | |
1610 | if (skb->next) | |
1611 | goto gso; | |
1612 | } | |
f6a78bfc | 1613 | |
576a30eb | 1614 | return dev->hard_start_xmit(skb, dev); |
f6a78bfc HX |
1615 | } |
1616 | ||
576a30eb | 1617 | gso: |
f6a78bfc HX |
1618 | do { |
1619 | struct sk_buff *nskb = skb->next; | |
1620 | int rc; | |
1621 | ||
1622 | skb->next = nskb->next; | |
1623 | nskb->next = NULL; | |
1624 | rc = dev->hard_start_xmit(nskb, dev); | |
1625 | if (unlikely(rc)) { | |
f54d9e8d | 1626 | nskb->next = skb->next; |
f6a78bfc HX |
1627 | skb->next = nskb; |
1628 | return rc; | |
1629 | } | |
f25f4e44 | 1630 | if (unlikely((netif_queue_stopped(dev) || |
668f895a | 1631 | netif_subqueue_stopped(dev, skb)) && |
f25f4e44 | 1632 | skb->next)) |
f54d9e8d | 1633 | return NETDEV_TX_BUSY; |
f6a78bfc | 1634 | } while (skb->next); |
4ec93edb | 1635 | |
f6a78bfc HX |
1636 | skb->destructor = DEV_GSO_CB(skb)->destructor; |
1637 | ||
1638 | out_kfree_skb: | |
1639 | kfree_skb(skb); | |
1640 | return 0; | |
1641 | } | |
1642 | ||
1da177e4 LT |
1643 | /** |
1644 | * dev_queue_xmit - transmit a buffer | |
1645 | * @skb: buffer to transmit | |
1646 | * | |
1647 | * Queue a buffer for transmission to a network device. The caller must | |
1648 | * have set the device and priority and built the buffer before calling | |
1649 | * this function. The function can be called from an interrupt. | |
1650 | * | |
1651 | * A negative errno code is returned on a failure. A success does not | |
1652 | * guarantee the frame will be transmitted as it may be dropped due | |
1653 | * to congestion or traffic shaping. | |
af191367 BG |
1654 | * |
1655 | * ----------------------------------------------------------------------------------- | |
1656 | * I notice this method can also return errors from the queue disciplines, | |
1657 | * including NET_XMIT_DROP, which is a positive value. So, errors can also | |
1658 | * be positive. | |
1659 | * | |
1660 | * Regardless of the return value, the skb is consumed, so it is currently | |
1661 | * difficult to retry a send to this method. (You can bump the ref count | |
1662 | * before sending to hold a reference for retry if you are careful.) | |
1663 | * | |
1664 | * When calling this method, interrupts MUST be enabled. This is because | |
1665 | * the BH enable code must have IRQs enabled so that it will not deadlock. | |
1666 | * --BLG | |
1da177e4 LT |
1667 | */ |
1668 | ||
1669 | int dev_queue_xmit(struct sk_buff *skb) | |
1670 | { | |
1671 | struct net_device *dev = skb->dev; | |
dc2b4847 | 1672 | struct netdev_queue *txq; |
1da177e4 LT |
1673 | struct Qdisc *q; |
1674 | int rc = -ENOMEM; | |
1675 | ||
f6a78bfc HX |
1676 | /* GSO will handle the following emulations directly. */ |
1677 | if (netif_needs_gso(dev, skb)) | |
1678 | goto gso; | |
1679 | ||
1da177e4 LT |
1680 | if (skb_shinfo(skb)->frag_list && |
1681 | !(dev->features & NETIF_F_FRAGLIST) && | |
364c6bad | 1682 | __skb_linearize(skb)) |
1da177e4 LT |
1683 | goto out_kfree_skb; |
1684 | ||
1685 | /* Fragmented skb is linearized if device does not support SG, | |
1686 | * or if at least one of fragments is in highmem and device | |
1687 | * does not support DMA from it. | |
1688 | */ | |
1689 | if (skb_shinfo(skb)->nr_frags && | |
1690 | (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && | |
364c6bad | 1691 | __skb_linearize(skb)) |
1da177e4 LT |
1692 | goto out_kfree_skb; |
1693 | ||
1694 | /* If packet is not checksummed and device does not support | |
1695 | * checksumming for this protocol, complete checksumming here. | |
1696 | */ | |
663ead3b HX |
1697 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1698 | skb_set_transport_header(skb, skb->csum_start - | |
1699 | skb_headroom(skb)); | |
6de329e2 BH |
1700 | if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb)) |
1701 | goto out_kfree_skb; | |
663ead3b | 1702 | } |
1da177e4 | 1703 | |
f6a78bfc | 1704 | gso: |
dc2b4847 DM |
1705 | txq = &dev->tx_queue; |
1706 | spin_lock_prefetch(&txq->lock); | |
2d7ceece | 1707 | |
4ec93edb YH |
1708 | /* Disable soft irqs for various locks below. Also |
1709 | * stops preemption for RCU. | |
1da177e4 | 1710 | */ |
4ec93edb | 1711 | rcu_read_lock_bh(); |
1da177e4 | 1712 | |
dc2b4847 | 1713 | /* Updates of qdisc are serialized by queue->lock. |
4ec93edb YH |
1714 | * The struct Qdisc which is pointed to by qdisc is now a |
1715 | * rcu structure - it may be accessed without acquiring | |
1da177e4 | 1716 | * a lock (but the structure may be stale.) The freeing of the |
4ec93edb | 1717 | * qdisc will be deferred until it's known that there are no |
1da177e4 | 1718 | * more references to it. |
4ec93edb YH |
1719 | * |
1720 | * If the qdisc has an enqueue function, we still need to | |
dc2b4847 | 1721 | * hold the queue->lock before calling it, since queue->lock |
1da177e4 LT |
1722 | * also serializes access to the device queue. |
1723 | */ | |
1724 | ||
b0e1e646 | 1725 | q = rcu_dereference(txq->qdisc); |
1da177e4 LT |
1726 | #ifdef CONFIG_NET_CLS_ACT |
1727 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); | |
1728 | #endif | |
1729 | if (q->enqueue) { | |
1730 | /* Grab device queue */ | |
dc2b4847 | 1731 | spin_lock(&txq->lock); |
b0e1e646 | 1732 | q = txq->qdisc; |
85670cc1 | 1733 | if (q->enqueue) { |
f25f4e44 | 1734 | /* reset queue_mapping to zero */ |
dfa40911 | 1735 | skb_set_queue_mapping(skb, 0); |
85670cc1 | 1736 | rc = q->enqueue(skb, q); |
eb6aafe3 | 1737 | qdisc_run(txq); |
dc2b4847 | 1738 | spin_unlock(&txq->lock); |
1da177e4 | 1739 | |
85670cc1 PM |
1740 | rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; |
1741 | goto out; | |
1742 | } | |
dc2b4847 | 1743 | spin_unlock(&txq->lock); |
1da177e4 LT |
1744 | } |
1745 | ||
1746 | /* The device has no queue. Common case for software devices: | |
1747 | loopback, all the sorts of tunnels... | |
1748 | ||
932ff279 HX |
1749 | Really, it is unlikely that netif_tx_lock protection is necessary |
1750 | here. (f.e. loopback and IP tunnels are clean ignoring statistics | |
1da177e4 LT |
1751 | counters.) |
1752 | However, it is possible, that they rely on protection | |
1753 | made by us here. | |
1754 | ||
1755 | Check this and shot the lock. It is not prone from deadlocks. | |
1756 | Either shot noqueue qdisc, it is even simpler 8) | |
1757 | */ | |
1758 | if (dev->flags & IFF_UP) { | |
1759 | int cpu = smp_processor_id(); /* ok because BHs are off */ | |
1760 | ||
c773e847 | 1761 | if (txq->xmit_lock_owner != cpu) { |
1da177e4 | 1762 | |
c773e847 | 1763 | HARD_TX_LOCK(dev, txq, cpu); |
1da177e4 | 1764 | |
f25f4e44 | 1765 | if (!netif_queue_stopped(dev) && |
668f895a | 1766 | !netif_subqueue_stopped(dev, skb)) { |
1da177e4 | 1767 | rc = 0; |
f6a78bfc | 1768 | if (!dev_hard_start_xmit(skb, dev)) { |
c773e847 | 1769 | HARD_TX_UNLOCK(dev, txq); |
1da177e4 LT |
1770 | goto out; |
1771 | } | |
1772 | } | |
c773e847 | 1773 | HARD_TX_UNLOCK(dev, txq); |
1da177e4 LT |
1774 | if (net_ratelimit()) |
1775 | printk(KERN_CRIT "Virtual device %s asks to " | |
1776 | "queue packet!\n", dev->name); | |
1777 | } else { | |
1778 | /* Recursion is detected! It is possible, | |
1779 | * unfortunately */ | |
1780 | if (net_ratelimit()) | |
1781 | printk(KERN_CRIT "Dead loop on virtual device " | |
1782 | "%s, fix it urgently!\n", dev->name); | |
1783 | } | |
1784 | } | |
1785 | ||
1786 | rc = -ENETDOWN; | |
d4828d85 | 1787 | rcu_read_unlock_bh(); |
1da177e4 LT |
1788 | |
1789 | out_kfree_skb: | |
1790 | kfree_skb(skb); | |
1791 | return rc; | |
1792 | out: | |
d4828d85 | 1793 | rcu_read_unlock_bh(); |
1da177e4 LT |
1794 | return rc; |
1795 | } | |
1796 | ||
1797 | ||
1798 | /*======================================================================= | |
1799 | Receiver routines | |
1800 | =======================================================================*/ | |
1801 | ||
6b2bedc3 SH |
1802 | int netdev_max_backlog __read_mostly = 1000; |
1803 | int netdev_budget __read_mostly = 300; | |
1804 | int weight_p __read_mostly = 64; /* old backlog weight */ | |
1da177e4 LT |
1805 | |
1806 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | |
1807 | ||
1808 | ||
1da177e4 LT |
1809 | /** |
1810 | * netif_rx - post buffer to the network code | |
1811 | * @skb: buffer to post | |
1812 | * | |
1813 | * This function receives a packet from a device driver and queues it for | |
1814 | * the upper (protocol) levels to process. It always succeeds. The buffer | |
1815 | * may be dropped during processing for congestion control or by the | |
1816 | * protocol layers. | |
1817 | * | |
1818 | * return values: | |
1819 | * NET_RX_SUCCESS (no congestion) | |
1da177e4 LT |
1820 | * NET_RX_DROP (packet was dropped) |
1821 | * | |
1822 | */ | |
1823 | ||
1824 | int netif_rx(struct sk_buff *skb) | |
1825 | { | |
1da177e4 LT |
1826 | struct softnet_data *queue; |
1827 | unsigned long flags; | |
1828 | ||
1829 | /* if netpoll wants it, pretend we never saw it */ | |
1830 | if (netpoll_rx(skb)) | |
1831 | return NET_RX_DROP; | |
1832 | ||
b7aa0bf7 | 1833 | if (!skb->tstamp.tv64) |
a61bbcf2 | 1834 | net_timestamp(skb); |
1da177e4 LT |
1835 | |
1836 | /* | |
1837 | * The code is rearranged so that the path is the most | |
1838 | * short when CPU is congested, but is still operating. | |
1839 | */ | |
1840 | local_irq_save(flags); | |
1da177e4 LT |
1841 | queue = &__get_cpu_var(softnet_data); |
1842 | ||
1843 | __get_cpu_var(netdev_rx_stat).total++; | |
1844 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | |
1845 | if (queue->input_pkt_queue.qlen) { | |
1da177e4 LT |
1846 | enqueue: |
1847 | dev_hold(skb->dev); | |
1848 | __skb_queue_tail(&queue->input_pkt_queue, skb); | |
1da177e4 | 1849 | local_irq_restore(flags); |
34008d8c | 1850 | return NET_RX_SUCCESS; |
1da177e4 LT |
1851 | } |
1852 | ||
bea3348e | 1853 | napi_schedule(&queue->backlog); |
1da177e4 LT |
1854 | goto enqueue; |
1855 | } | |
1856 | ||
1da177e4 LT |
1857 | __get_cpu_var(netdev_rx_stat).dropped++; |
1858 | local_irq_restore(flags); | |
1859 | ||
1860 | kfree_skb(skb); | |
1861 | return NET_RX_DROP; | |
1862 | } | |
1863 | ||
1864 | int netif_rx_ni(struct sk_buff *skb) | |
1865 | { | |
1866 | int err; | |
1867 | ||
1868 | preempt_disable(); | |
1869 | err = netif_rx(skb); | |
1870 | if (local_softirq_pending()) | |
1871 | do_softirq(); | |
1872 | preempt_enable(); | |
1873 | ||
1874 | return err; | |
1875 | } | |
1876 | ||
1877 | EXPORT_SYMBOL(netif_rx_ni); | |
1878 | ||
f2ccd8fa | 1879 | static inline struct net_device *skb_bond(struct sk_buff *skb) |
1da177e4 LT |
1880 | { |
1881 | struct net_device *dev = skb->dev; | |
1882 | ||
8f903c70 | 1883 | if (dev->master) { |
7ea49ed7 | 1884 | if (skb_bond_should_drop(skb)) { |
8f903c70 JV |
1885 | kfree_skb(skb); |
1886 | return NULL; | |
1887 | } | |
1da177e4 | 1888 | skb->dev = dev->master; |
8f903c70 | 1889 | } |
f2ccd8fa DM |
1890 | |
1891 | return dev; | |
1da177e4 LT |
1892 | } |
1893 | ||
bea3348e | 1894 | |
1da177e4 LT |
1895 | static void net_tx_action(struct softirq_action *h) |
1896 | { | |
1897 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | |
1898 | ||
1899 | if (sd->completion_queue) { | |
1900 | struct sk_buff *clist; | |
1901 | ||
1902 | local_irq_disable(); | |
1903 | clist = sd->completion_queue; | |
1904 | sd->completion_queue = NULL; | |
1905 | local_irq_enable(); | |
1906 | ||
1907 | while (clist) { | |
1908 | struct sk_buff *skb = clist; | |
1909 | clist = clist->next; | |
1910 | ||
1911 | BUG_TRAP(!atomic_read(&skb->users)); | |
1912 | __kfree_skb(skb); | |
1913 | } | |
1914 | } | |
1915 | ||
1916 | if (sd->output_queue) { | |
ee609cb3 | 1917 | struct netdev_queue *head; |
1da177e4 LT |
1918 | |
1919 | local_irq_disable(); | |
1920 | head = sd->output_queue; | |
1921 | sd->output_queue = NULL; | |
1922 | local_irq_enable(); | |
1923 | ||
1924 | while (head) { | |
ee609cb3 DM |
1925 | struct netdev_queue *txq = head; |
1926 | struct net_device *dev = txq->dev; | |
1da177e4 LT |
1927 | head = head->next_sched; |
1928 | ||
1929 | smp_mb__before_clear_bit(); | |
1930 | clear_bit(__LINK_STATE_SCHED, &dev->state); | |
1931 | ||
dc2b4847 | 1932 | if (spin_trylock(&txq->lock)) { |
eb6aafe3 | 1933 | qdisc_run(txq); |
dc2b4847 | 1934 | spin_unlock(&txq->lock); |
1da177e4 | 1935 | } else { |
86d804e1 | 1936 | netif_schedule_queue(txq); |
1da177e4 LT |
1937 | } |
1938 | } | |
1939 | } | |
1940 | } | |
1941 | ||
6f05f629 SH |
1942 | static inline int deliver_skb(struct sk_buff *skb, |
1943 | struct packet_type *pt_prev, | |
1944 | struct net_device *orig_dev) | |
1da177e4 LT |
1945 | { |
1946 | atomic_inc(&skb->users); | |
f2ccd8fa | 1947 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
1da177e4 LT |
1948 | } |
1949 | ||
1950 | #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) | |
6229e362 | 1951 | /* These hooks defined here for ATM */ |
1da177e4 LT |
1952 | struct net_bridge; |
1953 | struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, | |
1954 | unsigned char *addr); | |
6229e362 | 1955 | void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly; |
1da177e4 | 1956 | |
6229e362 SH |
1957 | /* |
1958 | * If bridge module is loaded call bridging hook. | |
1959 | * returns NULL if packet was consumed. | |
1960 | */ | |
1961 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, | |
1962 | struct sk_buff *skb) __read_mostly; | |
1963 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, | |
1964 | struct packet_type **pt_prev, int *ret, | |
1965 | struct net_device *orig_dev) | |
1da177e4 LT |
1966 | { |
1967 | struct net_bridge_port *port; | |
1968 | ||
6229e362 SH |
1969 | if (skb->pkt_type == PACKET_LOOPBACK || |
1970 | (port = rcu_dereference(skb->dev->br_port)) == NULL) | |
1971 | return skb; | |
1da177e4 LT |
1972 | |
1973 | if (*pt_prev) { | |
6229e362 | 1974 | *ret = deliver_skb(skb, *pt_prev, orig_dev); |
1da177e4 | 1975 | *pt_prev = NULL; |
4ec93edb YH |
1976 | } |
1977 | ||
6229e362 | 1978 | return br_handle_frame_hook(port, skb); |
1da177e4 LT |
1979 | } |
1980 | #else | |
6229e362 | 1981 | #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb) |
1da177e4 LT |
1982 | #endif |
1983 | ||
b863ceb7 PM |
1984 | #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) |
1985 | struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly; | |
1986 | EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); | |
1987 | ||
1988 | static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | |
1989 | struct packet_type **pt_prev, | |
1990 | int *ret, | |
1991 | struct net_device *orig_dev) | |
1992 | { | |
1993 | if (skb->dev->macvlan_port == NULL) | |
1994 | return skb; | |
1995 | ||
1996 | if (*pt_prev) { | |
1997 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | |
1998 | *pt_prev = NULL; | |
1999 | } | |
2000 | return macvlan_handle_frame_hook(skb); | |
2001 | } | |
2002 | #else | |
2003 | #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) | |
2004 | #endif | |
2005 | ||
1da177e4 LT |
2006 | #ifdef CONFIG_NET_CLS_ACT |
2007 | /* TODO: Maybe we should just force sch_ingress to be compiled in | |
2008 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions | |
2009 | * a compare and 2 stores extra right now if we dont have it on | |
2010 | * but have CONFIG_NET_CLS_ACT | |
4ec93edb | 2011 | * NOTE: This doesnt stop any functionality; if you dont have |
1da177e4 LT |
2012 | * the ingress scheduler, you just cant add policies on ingress. |
2013 | * | |
2014 | */ | |
4ec93edb | 2015 | static int ing_filter(struct sk_buff *skb) |
1da177e4 | 2016 | { |
1da177e4 | 2017 | struct net_device *dev = skb->dev; |
f697c3e8 | 2018 | u32 ttl = G_TC_RTTL(skb->tc_verd); |
555353cf DM |
2019 | struct netdev_queue *rxq; |
2020 | int result = TC_ACT_OK; | |
2021 | struct Qdisc *q; | |
4ec93edb | 2022 | |
f697c3e8 HX |
2023 | if (MAX_RED_LOOP < ttl++) { |
2024 | printk(KERN_WARNING | |
2025 | "Redir loop detected Dropping packet (%d->%d)\n", | |
2026 | skb->iif, dev->ifindex); | |
2027 | return TC_ACT_SHOT; | |
2028 | } | |
1da177e4 | 2029 | |
f697c3e8 HX |
2030 | skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); |
2031 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); | |
1da177e4 | 2032 | |
555353cf DM |
2033 | rxq = &dev->rx_queue; |
2034 | ||
2035 | spin_lock(&rxq->lock); | |
816f3258 | 2036 | if ((q = rxq->qdisc) != NULL) |
f697c3e8 | 2037 | result = q->enqueue(skb, q); |
555353cf | 2038 | spin_unlock(&rxq->lock); |
f697c3e8 HX |
2039 | |
2040 | return result; | |
2041 | } | |
86e65da9 | 2042 | |
f697c3e8 HX |
2043 | static inline struct sk_buff *handle_ing(struct sk_buff *skb, |
2044 | struct packet_type **pt_prev, | |
2045 | int *ret, struct net_device *orig_dev) | |
2046 | { | |
816f3258 | 2047 | if (!skb->dev->rx_queue.qdisc) |
f697c3e8 | 2048 | goto out; |
1da177e4 | 2049 | |
f697c3e8 HX |
2050 | if (*pt_prev) { |
2051 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | |
2052 | *pt_prev = NULL; | |
2053 | } else { | |
2054 | /* Huh? Why does turning on AF_PACKET affect this? */ | |
2055 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); | |
1da177e4 LT |
2056 | } |
2057 | ||
f697c3e8 HX |
2058 | switch (ing_filter(skb)) { |
2059 | case TC_ACT_SHOT: | |
2060 | case TC_ACT_STOLEN: | |
2061 | kfree_skb(skb); | |
2062 | return NULL; | |
2063 | } | |
2064 | ||
2065 | out: | |
2066 | skb->tc_verd = 0; | |
2067 | return skb; | |
1da177e4 LT |
2068 | } |
2069 | #endif | |
2070 | ||
bc1d0411 PM |
2071 | /* |
2072 | * netif_nit_deliver - deliver received packets to network taps | |
2073 | * @skb: buffer | |
2074 | * | |
2075 | * This function is used to deliver incoming packets to network | |
2076 | * taps. It should be used when the normal netif_receive_skb path | |
2077 | * is bypassed, for example because of VLAN acceleration. | |
2078 | */ | |
2079 | void netif_nit_deliver(struct sk_buff *skb) | |
2080 | { | |
2081 | struct packet_type *ptype; | |
2082 | ||
2083 | if (list_empty(&ptype_all)) | |
2084 | return; | |
2085 | ||
2086 | skb_reset_network_header(skb); | |
2087 | skb_reset_transport_header(skb); | |
2088 | skb->mac_len = skb->network_header - skb->mac_header; | |
2089 | ||
2090 | rcu_read_lock(); | |
2091 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | |
2092 | if (!ptype->dev || ptype->dev == skb->dev) | |
2093 | deliver_skb(skb, ptype, skb->dev); | |
2094 | } | |
2095 | rcu_read_unlock(); | |
2096 | } | |
2097 | ||
3b582cc1 SH |
2098 | /** |
2099 | * netif_receive_skb - process receive buffer from network | |
2100 | * @skb: buffer to process | |
2101 | * | |
2102 | * netif_receive_skb() is the main receive data processing function. | |
2103 | * It always succeeds. The buffer may be dropped during processing | |
2104 | * for congestion control or by the protocol layers. | |
2105 | * | |
2106 | * This function may only be called from softirq context and interrupts | |
2107 | * should be enabled. | |
2108 | * | |
2109 | * Return values (usually ignored): | |
2110 | * NET_RX_SUCCESS: no congestion | |
2111 | * NET_RX_DROP: packet was dropped | |
2112 | */ | |
1da177e4 LT |
2113 | int netif_receive_skb(struct sk_buff *skb) |
2114 | { | |
2115 | struct packet_type *ptype, *pt_prev; | |
f2ccd8fa | 2116 | struct net_device *orig_dev; |
1da177e4 | 2117 | int ret = NET_RX_DROP; |
252e3346 | 2118 | __be16 type; |
1da177e4 LT |
2119 | |
2120 | /* if we've gotten here through NAPI, check netpoll */ | |
bea3348e | 2121 | if (netpoll_receive_skb(skb)) |
1da177e4 LT |
2122 | return NET_RX_DROP; |
2123 | ||
b7aa0bf7 | 2124 | if (!skb->tstamp.tv64) |
a61bbcf2 | 2125 | net_timestamp(skb); |
1da177e4 | 2126 | |
c01003c2 PM |
2127 | if (!skb->iif) |
2128 | skb->iif = skb->dev->ifindex; | |
86e65da9 | 2129 | |
f2ccd8fa | 2130 | orig_dev = skb_bond(skb); |
1da177e4 | 2131 | |
8f903c70 JV |
2132 | if (!orig_dev) |
2133 | return NET_RX_DROP; | |
2134 | ||
1da177e4 LT |
2135 | __get_cpu_var(netdev_rx_stat).total++; |
2136 | ||
c1d2bbe1 | 2137 | skb_reset_network_header(skb); |
badff6d0 | 2138 | skb_reset_transport_header(skb); |
b0e380b1 | 2139 | skb->mac_len = skb->network_header - skb->mac_header; |
1da177e4 LT |
2140 | |
2141 | pt_prev = NULL; | |
2142 | ||
2143 | rcu_read_lock(); | |
2144 | ||
b9f75f45 EB |
2145 | /* Don't receive packets in an exiting network namespace */ |
2146 | if (!net_alive(dev_net(skb->dev))) | |
2147 | goto out; | |
2148 | ||
1da177e4 LT |
2149 | #ifdef CONFIG_NET_CLS_ACT |
2150 | if (skb->tc_verd & TC_NCLS) { | |
2151 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); | |
2152 | goto ncls; | |
2153 | } | |
2154 | #endif | |
2155 | ||
2156 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | |
2157 | if (!ptype->dev || ptype->dev == skb->dev) { | |
4ec93edb | 2158 | if (pt_prev) |
f2ccd8fa | 2159 | ret = deliver_skb(skb, pt_prev, orig_dev); |
1da177e4 LT |
2160 | pt_prev = ptype; |
2161 | } | |
2162 | } | |
2163 | ||
2164 | #ifdef CONFIG_NET_CLS_ACT | |
f697c3e8 HX |
2165 | skb = handle_ing(skb, &pt_prev, &ret, orig_dev); |
2166 | if (!skb) | |
1da177e4 | 2167 | goto out; |
1da177e4 LT |
2168 | ncls: |
2169 | #endif | |
2170 | ||
6229e362 | 2171 | skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); |
b863ceb7 PM |
2172 | if (!skb) |
2173 | goto out; | |
2174 | skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev); | |
6229e362 | 2175 | if (!skb) |
1da177e4 LT |
2176 | goto out; |
2177 | ||
2178 | type = skb->protocol; | |
82d8a867 PE |
2179 | list_for_each_entry_rcu(ptype, |
2180 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | |
1da177e4 LT |
2181 | if (ptype->type == type && |
2182 | (!ptype->dev || ptype->dev == skb->dev)) { | |
4ec93edb | 2183 | if (pt_prev) |
f2ccd8fa | 2184 | ret = deliver_skb(skb, pt_prev, orig_dev); |
1da177e4 LT |
2185 | pt_prev = ptype; |
2186 | } | |
2187 | } | |
2188 | ||
2189 | if (pt_prev) { | |
f2ccd8fa | 2190 | ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
1da177e4 LT |
2191 | } else { |
2192 | kfree_skb(skb); | |
2193 | /* Jamal, now you will not able to escape explaining | |
2194 | * me how you were going to use this. :-) | |
2195 | */ | |
2196 | ret = NET_RX_DROP; | |
2197 | } | |
2198 | ||
2199 | out: | |
2200 | rcu_read_unlock(); | |
2201 | return ret; | |
2202 | } | |
2203 | ||
bea3348e | 2204 | static int process_backlog(struct napi_struct *napi, int quota) |
1da177e4 LT |
2205 | { |
2206 | int work = 0; | |
1da177e4 LT |
2207 | struct softnet_data *queue = &__get_cpu_var(softnet_data); |
2208 | unsigned long start_time = jiffies; | |
2209 | ||
bea3348e SH |
2210 | napi->weight = weight_p; |
2211 | do { | |
1da177e4 LT |
2212 | struct sk_buff *skb; |
2213 | struct net_device *dev; | |
2214 | ||
2215 | local_irq_disable(); | |
2216 | skb = __skb_dequeue(&queue->input_pkt_queue); | |
bea3348e SH |
2217 | if (!skb) { |
2218 | __napi_complete(napi); | |
2219 | local_irq_enable(); | |
2220 | break; | |
2221 | } | |
2222 | ||
1da177e4 LT |
2223 | local_irq_enable(); |
2224 | ||
2225 | dev = skb->dev; | |
2226 | ||
2227 | netif_receive_skb(skb); | |
2228 | ||
2229 | dev_put(dev); | |
bea3348e | 2230 | } while (++work < quota && jiffies == start_time); |
1da177e4 | 2231 | |
bea3348e SH |
2232 | return work; |
2233 | } | |
1da177e4 | 2234 | |
bea3348e SH |
2235 | /** |
2236 | * __napi_schedule - schedule for receive | |
c4ea43c5 | 2237 | * @n: entry to schedule |
bea3348e SH |
2238 | * |
2239 | * The entry's receive function will be scheduled to run | |
2240 | */ | |
b5606c2d | 2241 | void __napi_schedule(struct napi_struct *n) |
bea3348e SH |
2242 | { |
2243 | unsigned long flags; | |
1da177e4 | 2244 | |
bea3348e SH |
2245 | local_irq_save(flags); |
2246 | list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); | |
2247 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | |
2248 | local_irq_restore(flags); | |
1da177e4 | 2249 | } |
bea3348e SH |
2250 | EXPORT_SYMBOL(__napi_schedule); |
2251 | ||
1da177e4 LT |
2252 | |
2253 | static void net_rx_action(struct softirq_action *h) | |
2254 | { | |
bea3348e | 2255 | struct list_head *list = &__get_cpu_var(softnet_data).poll_list; |
1da177e4 | 2256 | unsigned long start_time = jiffies; |
51b0bded | 2257 | int budget = netdev_budget; |
53fb95d3 MM |
2258 | void *have; |
2259 | ||
1da177e4 LT |
2260 | local_irq_disable(); |
2261 | ||
bea3348e SH |
2262 | while (!list_empty(list)) { |
2263 | struct napi_struct *n; | |
2264 | int work, weight; | |
1da177e4 | 2265 | |
bea3348e SH |
2266 | /* If softirq window is exhuasted then punt. |
2267 | * | |
2268 | * Note that this is a slight policy change from the | |
2269 | * previous NAPI code, which would allow up to 2 | |
2270 | * jiffies to pass before breaking out. The test | |
2271 | * used to be "jiffies - start_time > 1". | |
2272 | */ | |
2273 | if (unlikely(budget <= 0 || jiffies != start_time)) | |
1da177e4 LT |
2274 | goto softnet_break; |
2275 | ||
2276 | local_irq_enable(); | |
2277 | ||
bea3348e SH |
2278 | /* Even though interrupts have been re-enabled, this |
2279 | * access is safe because interrupts can only add new | |
2280 | * entries to the tail of this list, and only ->poll() | |
2281 | * calls can remove this head entry from the list. | |
2282 | */ | |
2283 | n = list_entry(list->next, struct napi_struct, poll_list); | |
1da177e4 | 2284 | |
bea3348e SH |
2285 | have = netpoll_poll_lock(n); |
2286 | ||
2287 | weight = n->weight; | |
2288 | ||
0a7606c1 DM |
2289 | /* This NAPI_STATE_SCHED test is for avoiding a race |
2290 | * with netpoll's poll_napi(). Only the entity which | |
2291 | * obtains the lock and sees NAPI_STATE_SCHED set will | |
2292 | * actually make the ->poll() call. Therefore we avoid | |
2293 | * accidently calling ->poll() when NAPI is not scheduled. | |
2294 | */ | |
2295 | work = 0; | |
2296 | if (test_bit(NAPI_STATE_SCHED, &n->state)) | |
2297 | work = n->poll(n, weight); | |
bea3348e SH |
2298 | |
2299 | WARN_ON_ONCE(work > weight); | |
2300 | ||
2301 | budget -= work; | |
2302 | ||
2303 | local_irq_disable(); | |
2304 | ||
2305 | /* Drivers must not modify the NAPI state if they | |
2306 | * consume the entire weight. In such cases this code | |
2307 | * still "owns" the NAPI instance and therefore can | |
2308 | * move the instance around on the list at-will. | |
2309 | */ | |
fed17f30 DM |
2310 | if (unlikely(work == weight)) { |
2311 | if (unlikely(napi_disable_pending(n))) | |
2312 | __napi_complete(n); | |
2313 | else | |
2314 | list_move_tail(&n->poll_list, list); | |
2315 | } | |
bea3348e SH |
2316 | |
2317 | netpoll_poll_unlock(have); | |
1da177e4 LT |
2318 | } |
2319 | out: | |
515e06c4 | 2320 | local_irq_enable(); |
bea3348e | 2321 | |
db217334 CL |
2322 | #ifdef CONFIG_NET_DMA |
2323 | /* | |
2324 | * There may not be any more sk_buffs coming right now, so push | |
2325 | * any pending DMA copies to hardware | |
2326 | */ | |
d379b01e DW |
2327 | if (!cpus_empty(net_dma.channel_mask)) { |
2328 | int chan_idx; | |
2329 | for_each_cpu_mask(chan_idx, net_dma.channel_mask) { | |
2330 | struct dma_chan *chan = net_dma.channels[chan_idx]; | |
2331 | if (chan) | |
2332 | dma_async_memcpy_issue_pending(chan); | |
2333 | } | |
db217334 CL |
2334 | } |
2335 | #endif | |
bea3348e | 2336 | |
1da177e4 LT |
2337 | return; |
2338 | ||
2339 | softnet_break: | |
2340 | __get_cpu_var(netdev_rx_stat).time_squeeze++; | |
2341 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | |
2342 | goto out; | |
2343 | } | |
2344 | ||
2345 | static gifconf_func_t * gifconf_list [NPROTO]; | |
2346 | ||
2347 | /** | |
2348 | * register_gifconf - register a SIOCGIF handler | |
2349 | * @family: Address family | |
2350 | * @gifconf: Function handler | |
2351 | * | |
2352 | * Register protocol dependent address dumping routines. The handler | |
2353 | * that is passed must not be freed or reused until it has been replaced | |
2354 | * by another handler. | |
2355 | */ | |
2356 | int register_gifconf(unsigned int family, gifconf_func_t * gifconf) | |
2357 | { | |
2358 | if (family >= NPROTO) | |
2359 | return -EINVAL; | |
2360 | gifconf_list[family] = gifconf; | |
2361 | return 0; | |
2362 | } | |
2363 | ||
2364 | ||
2365 | /* | |
2366 | * Map an interface index to its name (SIOCGIFNAME) | |
2367 | */ | |
2368 | ||
2369 | /* | |
2370 | * We need this ioctl for efficient implementation of the | |
2371 | * if_indextoname() function required by the IPv6 API. Without | |
2372 | * it, we would have to search all the interfaces to find a | |
2373 | * match. --pb | |
2374 | */ | |
2375 | ||
881d966b | 2376 | static int dev_ifname(struct net *net, struct ifreq __user *arg) |
1da177e4 LT |
2377 | { |
2378 | struct net_device *dev; | |
2379 | struct ifreq ifr; | |
2380 | ||
2381 | /* | |
2382 | * Fetch the caller's info block. | |
2383 | */ | |
2384 | ||
2385 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | |
2386 | return -EFAULT; | |
2387 | ||
2388 | read_lock(&dev_base_lock); | |
881d966b | 2389 | dev = __dev_get_by_index(net, ifr.ifr_ifindex); |
1da177e4 LT |
2390 | if (!dev) { |
2391 | read_unlock(&dev_base_lock); | |
2392 | return -ENODEV; | |
2393 | } | |
2394 | ||
2395 | strcpy(ifr.ifr_name, dev->name); | |
2396 | read_unlock(&dev_base_lock); | |
2397 | ||
2398 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) | |
2399 | return -EFAULT; | |
2400 | return 0; | |
2401 | } | |
2402 | ||
2403 | /* | |
2404 | * Perform a SIOCGIFCONF call. This structure will change | |
2405 | * size eventually, and there is nothing I can do about it. | |
2406 | * Thus we will need a 'compatibility mode'. | |
2407 | */ | |
2408 | ||
881d966b | 2409 | static int dev_ifconf(struct net *net, char __user *arg) |
1da177e4 LT |
2410 | { |
2411 | struct ifconf ifc; | |
2412 | struct net_device *dev; | |
2413 | char __user *pos; | |
2414 | int len; | |
2415 | int total; | |
2416 | int i; | |
2417 | ||
2418 | /* | |
2419 | * Fetch the caller's info block. | |
2420 | */ | |
2421 | ||
2422 | if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) | |
2423 | return -EFAULT; | |
2424 | ||
2425 | pos = ifc.ifc_buf; | |
2426 | len = ifc.ifc_len; | |
2427 | ||
2428 | /* | |
2429 | * Loop over the interfaces, and write an info block for each. | |
2430 | */ | |
2431 | ||
2432 | total = 0; | |
881d966b | 2433 | for_each_netdev(net, dev) { |
1da177e4 LT |
2434 | for (i = 0; i < NPROTO; i++) { |
2435 | if (gifconf_list[i]) { | |
2436 | int done; | |
2437 | if (!pos) | |
2438 | done = gifconf_list[i](dev, NULL, 0); | |
2439 | else | |
2440 | done = gifconf_list[i](dev, pos + total, | |
2441 | len - total); | |
2442 | if (done < 0) | |
2443 | return -EFAULT; | |
2444 | total += done; | |
2445 | } | |
2446 | } | |
4ec93edb | 2447 | } |
1da177e4 LT |
2448 | |
2449 | /* | |
2450 | * All done. Write the updated control block back to the caller. | |
2451 | */ | |
2452 | ifc.ifc_len = total; | |
2453 | ||
2454 | /* | |
2455 | * Both BSD and Solaris return 0 here, so we do too. | |
2456 | */ | |
2457 | return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; | |
2458 | } | |
2459 | ||
2460 | #ifdef CONFIG_PROC_FS | |
2461 | /* | |
2462 | * This is invoked by the /proc filesystem handler to display a device | |
2463 | * in detail. | |
2464 | */ | |
7562f876 | 2465 | void *dev_seq_start(struct seq_file *seq, loff_t *pos) |
9a429c49 | 2466 | __acquires(dev_base_lock) |
1da177e4 | 2467 | { |
e372c414 | 2468 | struct net *net = seq_file_net(seq); |
7562f876 | 2469 | loff_t off; |
1da177e4 | 2470 | struct net_device *dev; |
1da177e4 | 2471 | |
7562f876 PE |
2472 | read_lock(&dev_base_lock); |
2473 | if (!*pos) | |
2474 | return SEQ_START_TOKEN; | |
1da177e4 | 2475 | |
7562f876 | 2476 | off = 1; |
881d966b | 2477 | for_each_netdev(net, dev) |
7562f876 PE |
2478 | if (off++ == *pos) |
2479 | return dev; | |
1da177e4 | 2480 | |
7562f876 | 2481 | return NULL; |
1da177e4 LT |
2482 | } |
2483 | ||
2484 | void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
2485 | { | |
e372c414 | 2486 | struct net *net = seq_file_net(seq); |
1da177e4 | 2487 | ++*pos; |
7562f876 | 2488 | return v == SEQ_START_TOKEN ? |
881d966b | 2489 | first_net_device(net) : next_net_device((struct net_device *)v); |
1da177e4 LT |
2490 | } |
2491 | ||
2492 | void dev_seq_stop(struct seq_file *seq, void *v) | |
9a429c49 | 2493 | __releases(dev_base_lock) |
1da177e4 LT |
2494 | { |
2495 | read_unlock(&dev_base_lock); | |
2496 | } | |
2497 | ||
2498 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | |
2499 | { | |
c45d286e | 2500 | struct net_device_stats *stats = dev->get_stats(dev); |
1da177e4 | 2501 | |
5a1b5898 RR |
2502 | seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " |
2503 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", | |
2504 | dev->name, stats->rx_bytes, stats->rx_packets, | |
2505 | stats->rx_errors, | |
2506 | stats->rx_dropped + stats->rx_missed_errors, | |
2507 | stats->rx_fifo_errors, | |
2508 | stats->rx_length_errors + stats->rx_over_errors + | |
2509 | stats->rx_crc_errors + stats->rx_frame_errors, | |
2510 | stats->rx_compressed, stats->multicast, | |
2511 | stats->tx_bytes, stats->tx_packets, | |
2512 | stats->tx_errors, stats->tx_dropped, | |
2513 | stats->tx_fifo_errors, stats->collisions, | |
2514 | stats->tx_carrier_errors + | |
2515 | stats->tx_aborted_errors + | |
2516 | stats->tx_window_errors + | |
2517 | stats->tx_heartbeat_errors, | |
2518 | stats->tx_compressed); | |
1da177e4 LT |
2519 | } |
2520 | ||
2521 | /* | |
2522 | * Called from the PROCfs module. This now uses the new arbitrary sized | |
2523 | * /proc/net interface to create /proc/net/dev | |
2524 | */ | |
2525 | static int dev_seq_show(struct seq_file *seq, void *v) | |
2526 | { | |
2527 | if (v == SEQ_START_TOKEN) | |
2528 | seq_puts(seq, "Inter-| Receive " | |
2529 | " | Transmit\n" | |
2530 | " face |bytes packets errs drop fifo frame " | |
2531 | "compressed multicast|bytes packets errs " | |
2532 | "drop fifo colls carrier compressed\n"); | |
2533 | else | |
2534 | dev_seq_printf_stats(seq, v); | |
2535 | return 0; | |
2536 | } | |
2537 | ||
2538 | static struct netif_rx_stats *softnet_get_online(loff_t *pos) | |
2539 | { | |
2540 | struct netif_rx_stats *rc = NULL; | |
2541 | ||
0c0b0aca | 2542 | while (*pos < nr_cpu_ids) |
4ec93edb | 2543 | if (cpu_online(*pos)) { |
1da177e4 LT |
2544 | rc = &per_cpu(netdev_rx_stat, *pos); |
2545 | break; | |
2546 | } else | |
2547 | ++*pos; | |
2548 | return rc; | |
2549 | } | |
2550 | ||
2551 | static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) | |
2552 | { | |
2553 | return softnet_get_online(pos); | |
2554 | } | |
2555 | ||
2556 | static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
2557 | { | |
2558 | ++*pos; | |
2559 | return softnet_get_online(pos); | |
2560 | } | |
2561 | ||
2562 | static void softnet_seq_stop(struct seq_file *seq, void *v) | |
2563 | { | |
2564 | } | |
2565 | ||
2566 | static int softnet_seq_show(struct seq_file *seq, void *v) | |
2567 | { | |
2568 | struct netif_rx_stats *s = v; | |
2569 | ||
2570 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | |
31aa02c5 | 2571 | s->total, s->dropped, s->time_squeeze, 0, |
c1ebcdb8 SH |
2572 | 0, 0, 0, 0, /* was fastroute */ |
2573 | s->cpu_collision ); | |
1da177e4 LT |
2574 | return 0; |
2575 | } | |
2576 | ||
f690808e | 2577 | static const struct seq_operations dev_seq_ops = { |
1da177e4 LT |
2578 | .start = dev_seq_start, |
2579 | .next = dev_seq_next, | |
2580 | .stop = dev_seq_stop, | |
2581 | .show = dev_seq_show, | |
2582 | }; | |
2583 | ||
2584 | static int dev_seq_open(struct inode *inode, struct file *file) | |
2585 | { | |
e372c414 DL |
2586 | return seq_open_net(inode, file, &dev_seq_ops, |
2587 | sizeof(struct seq_net_private)); | |
1da177e4 LT |
2588 | } |
2589 | ||
9a32144e | 2590 | static const struct file_operations dev_seq_fops = { |
1da177e4 LT |
2591 | .owner = THIS_MODULE, |
2592 | .open = dev_seq_open, | |
2593 | .read = seq_read, | |
2594 | .llseek = seq_lseek, | |
e372c414 | 2595 | .release = seq_release_net, |
1da177e4 LT |
2596 | }; |
2597 | ||
f690808e | 2598 | static const struct seq_operations softnet_seq_ops = { |
1da177e4 LT |
2599 | .start = softnet_seq_start, |
2600 | .next = softnet_seq_next, | |
2601 | .stop = softnet_seq_stop, | |
2602 | .show = softnet_seq_show, | |
2603 | }; | |
2604 | ||
2605 | static int softnet_seq_open(struct inode *inode, struct file *file) | |
2606 | { | |
2607 | return seq_open(file, &softnet_seq_ops); | |
2608 | } | |
2609 | ||
9a32144e | 2610 | static const struct file_operations softnet_seq_fops = { |
1da177e4 LT |
2611 | .owner = THIS_MODULE, |
2612 | .open = softnet_seq_open, | |
2613 | .read = seq_read, | |
2614 | .llseek = seq_lseek, | |
2615 | .release = seq_release, | |
2616 | }; | |
2617 | ||
0e1256ff SH |
2618 | static void *ptype_get_idx(loff_t pos) |
2619 | { | |
2620 | struct packet_type *pt = NULL; | |
2621 | loff_t i = 0; | |
2622 | int t; | |
2623 | ||
2624 | list_for_each_entry_rcu(pt, &ptype_all, list) { | |
2625 | if (i == pos) | |
2626 | return pt; | |
2627 | ++i; | |
2628 | } | |
2629 | ||
82d8a867 | 2630 | for (t = 0; t < PTYPE_HASH_SIZE; t++) { |
0e1256ff SH |
2631 | list_for_each_entry_rcu(pt, &ptype_base[t], list) { |
2632 | if (i == pos) | |
2633 | return pt; | |
2634 | ++i; | |
2635 | } | |
2636 | } | |
2637 | return NULL; | |
2638 | } | |
2639 | ||
2640 | static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) | |
72348a42 | 2641 | __acquires(RCU) |
0e1256ff SH |
2642 | { |
2643 | rcu_read_lock(); | |
2644 | return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; | |
2645 | } | |
2646 | ||
2647 | static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
2648 | { | |
2649 | struct packet_type *pt; | |
2650 | struct list_head *nxt; | |
2651 | int hash; | |
2652 | ||
2653 | ++*pos; | |
2654 | if (v == SEQ_START_TOKEN) | |
2655 | return ptype_get_idx(0); | |
2656 | ||
2657 | pt = v; | |
2658 | nxt = pt->list.next; | |
2659 | if (pt->type == htons(ETH_P_ALL)) { | |
2660 | if (nxt != &ptype_all) | |
2661 | goto found; | |
2662 | hash = 0; | |
2663 | nxt = ptype_base[0].next; | |
2664 | } else | |
82d8a867 | 2665 | hash = ntohs(pt->type) & PTYPE_HASH_MASK; |
0e1256ff SH |
2666 | |
2667 | while (nxt == &ptype_base[hash]) { | |
82d8a867 | 2668 | if (++hash >= PTYPE_HASH_SIZE) |
0e1256ff SH |
2669 | return NULL; |
2670 | nxt = ptype_base[hash].next; | |
2671 | } | |
2672 | found: | |
2673 | return list_entry(nxt, struct packet_type, list); | |
2674 | } | |
2675 | ||
2676 | static void ptype_seq_stop(struct seq_file *seq, void *v) | |
72348a42 | 2677 | __releases(RCU) |
0e1256ff SH |
2678 | { |
2679 | rcu_read_unlock(); | |
2680 | } | |
2681 | ||
2682 | static void ptype_seq_decode(struct seq_file *seq, void *sym) | |
2683 | { | |
2684 | #ifdef CONFIG_KALLSYMS | |
2685 | unsigned long offset = 0, symsize; | |
2686 | const char *symname; | |
2687 | char *modname; | |
2688 | char namebuf[128]; | |
2689 | ||
2690 | symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset, | |
2691 | &modname, namebuf); | |
2692 | ||
2693 | if (symname) { | |
2694 | char *delim = ":"; | |
2695 | ||
2696 | if (!modname) | |
2697 | modname = delim = ""; | |
2698 | seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim, | |
2699 | symname, offset); | |
2700 | return; | |
2701 | } | |
2702 | #endif | |
2703 | ||
2704 | seq_printf(seq, "[%p]", sym); | |
2705 | } | |
2706 | ||
2707 | static int ptype_seq_show(struct seq_file *seq, void *v) | |
2708 | { | |
2709 | struct packet_type *pt = v; | |
2710 | ||
2711 | if (v == SEQ_START_TOKEN) | |
2712 | seq_puts(seq, "Type Device Function\n"); | |
c346dca1 | 2713 | else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { |
0e1256ff SH |
2714 | if (pt->type == htons(ETH_P_ALL)) |
2715 | seq_puts(seq, "ALL "); | |
2716 | else | |
2717 | seq_printf(seq, "%04x", ntohs(pt->type)); | |
2718 | ||
2719 | seq_printf(seq, " %-8s ", | |
2720 | pt->dev ? pt->dev->name : ""); | |
2721 | ptype_seq_decode(seq, pt->func); | |
2722 | seq_putc(seq, '\n'); | |
2723 | } | |
2724 | ||
2725 | return 0; | |
2726 | } | |
2727 | ||
2728 | static const struct seq_operations ptype_seq_ops = { | |
2729 | .start = ptype_seq_start, | |
2730 | .next = ptype_seq_next, | |
2731 | .stop = ptype_seq_stop, | |
2732 | .show = ptype_seq_show, | |
2733 | }; | |
2734 | ||
2735 | static int ptype_seq_open(struct inode *inode, struct file *file) | |
2736 | { | |
2feb27db PE |
2737 | return seq_open_net(inode, file, &ptype_seq_ops, |
2738 | sizeof(struct seq_net_private)); | |
0e1256ff SH |
2739 | } |
2740 | ||
2741 | static const struct file_operations ptype_seq_fops = { | |
2742 | .owner = THIS_MODULE, | |
2743 | .open = ptype_seq_open, | |
2744 | .read = seq_read, | |
2745 | .llseek = seq_lseek, | |
2feb27db | 2746 | .release = seq_release_net, |
0e1256ff SH |
2747 | }; |
2748 | ||
2749 | ||
4665079c | 2750 | static int __net_init dev_proc_net_init(struct net *net) |
1da177e4 LT |
2751 | { |
2752 | int rc = -ENOMEM; | |
2753 | ||
881d966b | 2754 | if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) |
1da177e4 | 2755 | goto out; |
881d966b | 2756 | if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) |
1da177e4 | 2757 | goto out_dev; |
881d966b | 2758 | if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) |
457c4cbc | 2759 | goto out_softnet; |
0e1256ff | 2760 | |
881d966b | 2761 | if (wext_proc_init(net)) |
457c4cbc | 2762 | goto out_ptype; |
1da177e4 LT |
2763 | rc = 0; |
2764 | out: | |
2765 | return rc; | |
457c4cbc | 2766 | out_ptype: |
881d966b | 2767 | proc_net_remove(net, "ptype"); |
1da177e4 | 2768 | out_softnet: |
881d966b | 2769 | proc_net_remove(net, "softnet_stat"); |
1da177e4 | 2770 | out_dev: |
881d966b | 2771 | proc_net_remove(net, "dev"); |
1da177e4 LT |
2772 | goto out; |
2773 | } | |
881d966b | 2774 | |
4665079c | 2775 | static void __net_exit dev_proc_net_exit(struct net *net) |
881d966b EB |
2776 | { |
2777 | wext_proc_exit(net); | |
2778 | ||
2779 | proc_net_remove(net, "ptype"); | |
2780 | proc_net_remove(net, "softnet_stat"); | |
2781 | proc_net_remove(net, "dev"); | |
2782 | } | |
2783 | ||
022cbae6 | 2784 | static struct pernet_operations __net_initdata dev_proc_ops = { |
881d966b EB |
2785 | .init = dev_proc_net_init, |
2786 | .exit = dev_proc_net_exit, | |
2787 | }; | |
2788 | ||
2789 | static int __init dev_proc_init(void) | |
2790 | { | |
2791 | return register_pernet_subsys(&dev_proc_ops); | |
2792 | } | |
1da177e4 LT |
2793 | #else |
2794 | #define dev_proc_init() 0 | |
2795 | #endif /* CONFIG_PROC_FS */ | |
2796 | ||
2797 | ||
2798 | /** | |
2799 | * netdev_set_master - set up master/slave pair | |
2800 | * @slave: slave device | |
2801 | * @master: new master device | |
2802 | * | |
2803 | * Changes the master device of the slave. Pass %NULL to break the | |
2804 | * bonding. The caller must hold the RTNL semaphore. On a failure | |
2805 | * a negative errno code is returned. On success the reference counts | |
2806 | * are adjusted, %RTM_NEWLINK is sent to the routing socket and the | |
2807 | * function returns zero. | |
2808 | */ | |
2809 | int netdev_set_master(struct net_device *slave, struct net_device *master) | |
2810 | { | |
2811 | struct net_device *old = slave->master; | |
2812 | ||
2813 | ASSERT_RTNL(); | |
2814 | ||
2815 | if (master) { | |
2816 | if (old) | |
2817 | return -EBUSY; | |
2818 | dev_hold(master); | |
2819 | } | |
2820 | ||
2821 | slave->master = master; | |
4ec93edb | 2822 | |
1da177e4 LT |
2823 | synchronize_net(); |
2824 | ||
2825 | if (old) | |
2826 | dev_put(old); | |
2827 | ||
2828 | if (master) | |
2829 | slave->flags |= IFF_SLAVE; | |
2830 | else | |
2831 | slave->flags &= ~IFF_SLAVE; | |
2832 | ||
2833 | rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); | |
2834 | return 0; | |
2835 | } | |
2836 | ||
dad9b335 | 2837 | static int __dev_set_promiscuity(struct net_device *dev, int inc) |
1da177e4 LT |
2838 | { |
2839 | unsigned short old_flags = dev->flags; | |
2840 | ||
24023451 PM |
2841 | ASSERT_RTNL(); |
2842 | ||
dad9b335 WC |
2843 | dev->flags |= IFF_PROMISC; |
2844 | dev->promiscuity += inc; | |
2845 | if (dev->promiscuity == 0) { | |
2846 | /* | |
2847 | * Avoid overflow. | |
2848 | * If inc causes overflow, untouch promisc and return error. | |
2849 | */ | |
2850 | if (inc < 0) | |
2851 | dev->flags &= ~IFF_PROMISC; | |
2852 | else { | |
2853 | dev->promiscuity -= inc; | |
2854 | printk(KERN_WARNING "%s: promiscuity touches roof, " | |
2855 | "set promiscuity failed, promiscuity feature " | |
2856 | "of device might be broken.\n", dev->name); | |
2857 | return -EOVERFLOW; | |
2858 | } | |
2859 | } | |
52609c0b | 2860 | if (dev->flags != old_flags) { |
1da177e4 LT |
2861 | printk(KERN_INFO "device %s %s promiscuous mode\n", |
2862 | dev->name, (dev->flags & IFF_PROMISC) ? "entered" : | |
4ec93edb | 2863 | "left"); |
7759db82 KHK |
2864 | if (audit_enabled) |
2865 | audit_log(current->audit_context, GFP_ATOMIC, | |
2866 | AUDIT_ANOM_PROMISCUOUS, | |
2867 | "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", | |
2868 | dev->name, (dev->flags & IFF_PROMISC), | |
2869 | (old_flags & IFF_PROMISC), | |
2870 | audit_get_loginuid(current), | |
2871 | current->uid, current->gid, | |
2872 | audit_get_sessionid(current)); | |
24023451 PM |
2873 | |
2874 | if (dev->change_rx_flags) | |
2875 | dev->change_rx_flags(dev, IFF_PROMISC); | |
1da177e4 | 2876 | } |
dad9b335 | 2877 | return 0; |
1da177e4 LT |
2878 | } |
2879 | ||
4417da66 PM |
2880 | /** |
2881 | * dev_set_promiscuity - update promiscuity count on a device | |
2882 | * @dev: device | |
2883 | * @inc: modifier | |
2884 | * | |
2885 | * Add or remove promiscuity from a device. While the count in the device | |
2886 | * remains above zero the interface remains promiscuous. Once it hits zero | |
2887 | * the device reverts back to normal filtering operation. A negative inc | |
2888 | * value is used to drop promiscuity on the device. | |
dad9b335 | 2889 | * Return 0 if successful or a negative errno code on error. |
4417da66 | 2890 | */ |
dad9b335 | 2891 | int dev_set_promiscuity(struct net_device *dev, int inc) |
4417da66 PM |
2892 | { |
2893 | unsigned short old_flags = dev->flags; | |
dad9b335 | 2894 | int err; |
4417da66 | 2895 | |
dad9b335 | 2896 | err = __dev_set_promiscuity(dev, inc); |
4b5a698e | 2897 | if (err < 0) |
dad9b335 | 2898 | return err; |
4417da66 PM |
2899 | if (dev->flags != old_flags) |
2900 | dev_set_rx_mode(dev); | |
dad9b335 | 2901 | return err; |
4417da66 PM |
2902 | } |
2903 | ||
1da177e4 LT |
2904 | /** |
2905 | * dev_set_allmulti - update allmulti count on a device | |
2906 | * @dev: device | |
2907 | * @inc: modifier | |
2908 | * | |
2909 | * Add or remove reception of all multicast frames to a device. While the | |
2910 | * count in the device remains above zero the interface remains listening | |
2911 | * to all interfaces. Once it hits zero the device reverts back to normal | |
2912 | * filtering operation. A negative @inc value is used to drop the counter | |
2913 | * when releasing a resource needing all multicasts. | |
dad9b335 | 2914 | * Return 0 if successful or a negative errno code on error. |
1da177e4 LT |
2915 | */ |
2916 | ||
dad9b335 | 2917 | int dev_set_allmulti(struct net_device *dev, int inc) |
1da177e4 LT |
2918 | { |
2919 | unsigned short old_flags = dev->flags; | |
2920 | ||
24023451 PM |
2921 | ASSERT_RTNL(); |
2922 | ||
1da177e4 | 2923 | dev->flags |= IFF_ALLMULTI; |
dad9b335 WC |
2924 | dev->allmulti += inc; |
2925 | if (dev->allmulti == 0) { | |
2926 | /* | |
2927 | * Avoid overflow. | |
2928 | * If inc causes overflow, untouch allmulti and return error. | |
2929 | */ | |
2930 | if (inc < 0) | |
2931 | dev->flags &= ~IFF_ALLMULTI; | |
2932 | else { | |
2933 | dev->allmulti -= inc; | |
2934 | printk(KERN_WARNING "%s: allmulti touches roof, " | |
2935 | "set allmulti failed, allmulti feature of " | |
2936 | "device might be broken.\n", dev->name); | |
2937 | return -EOVERFLOW; | |
2938 | } | |
2939 | } | |
24023451 PM |
2940 | if (dev->flags ^ old_flags) { |
2941 | if (dev->change_rx_flags) | |
2942 | dev->change_rx_flags(dev, IFF_ALLMULTI); | |
4417da66 | 2943 | dev_set_rx_mode(dev); |
24023451 | 2944 | } |
dad9b335 | 2945 | return 0; |
4417da66 PM |
2946 | } |
2947 | ||
2948 | /* | |
2949 | * Upload unicast and multicast address lists to device and | |
2950 | * configure RX filtering. When the device doesn't support unicast | |
53ccaae1 | 2951 | * filtering it is put in promiscuous mode while unicast addresses |
4417da66 PM |
2952 | * are present. |
2953 | */ | |
2954 | void __dev_set_rx_mode(struct net_device *dev) | |
2955 | { | |
2956 | /* dev_open will call this function so the list will stay sane. */ | |
2957 | if (!(dev->flags&IFF_UP)) | |
2958 | return; | |
2959 | ||
2960 | if (!netif_device_present(dev)) | |
40b77c94 | 2961 | return; |
4417da66 PM |
2962 | |
2963 | if (dev->set_rx_mode) | |
2964 | dev->set_rx_mode(dev); | |
2965 | else { | |
2966 | /* Unicast addresses changes may only happen under the rtnl, | |
2967 | * therefore calling __dev_set_promiscuity here is safe. | |
2968 | */ | |
2969 | if (dev->uc_count > 0 && !dev->uc_promisc) { | |
2970 | __dev_set_promiscuity(dev, 1); | |
2971 | dev->uc_promisc = 1; | |
2972 | } else if (dev->uc_count == 0 && dev->uc_promisc) { | |
2973 | __dev_set_promiscuity(dev, -1); | |
2974 | dev->uc_promisc = 0; | |
2975 | } | |
2976 | ||
2977 | if (dev->set_multicast_list) | |
2978 | dev->set_multicast_list(dev); | |
2979 | } | |
2980 | } | |
2981 | ||
2982 | void dev_set_rx_mode(struct net_device *dev) | |
2983 | { | |
b9e40857 | 2984 | netif_addr_lock_bh(dev); |
4417da66 | 2985 | __dev_set_rx_mode(dev); |
b9e40857 | 2986 | netif_addr_unlock_bh(dev); |
1da177e4 LT |
2987 | } |
2988 | ||
61cbc2fc PM |
2989 | int __dev_addr_delete(struct dev_addr_list **list, int *count, |
2990 | void *addr, int alen, int glbl) | |
bf742482 PM |
2991 | { |
2992 | struct dev_addr_list *da; | |
2993 | ||
2994 | for (; (da = *list) != NULL; list = &da->next) { | |
2995 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | |
2996 | alen == da->da_addrlen) { | |
2997 | if (glbl) { | |
2998 | int old_glbl = da->da_gusers; | |
2999 | da->da_gusers = 0; | |
3000 | if (old_glbl == 0) | |
3001 | break; | |
3002 | } | |
3003 | if (--da->da_users) | |
3004 | return 0; | |
3005 | ||
3006 | *list = da->next; | |
3007 | kfree(da); | |
61cbc2fc | 3008 | (*count)--; |
bf742482 PM |
3009 | return 0; |
3010 | } | |
3011 | } | |
3012 | return -ENOENT; | |
3013 | } | |
3014 | ||
61cbc2fc PM |
3015 | int __dev_addr_add(struct dev_addr_list **list, int *count, |
3016 | void *addr, int alen, int glbl) | |
bf742482 PM |
3017 | { |
3018 | struct dev_addr_list *da; | |
3019 | ||
3020 | for (da = *list; da != NULL; da = da->next) { | |
3021 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | |
3022 | da->da_addrlen == alen) { | |
3023 | if (glbl) { | |
3024 | int old_glbl = da->da_gusers; | |
3025 | da->da_gusers = 1; | |
3026 | if (old_glbl) | |
3027 | return 0; | |
3028 | } | |
3029 | da->da_users++; | |
3030 | return 0; | |
3031 | } | |
3032 | } | |
3033 | ||
12aa343a | 3034 | da = kzalloc(sizeof(*da), GFP_ATOMIC); |
bf742482 PM |
3035 | if (da == NULL) |
3036 | return -ENOMEM; | |
3037 | memcpy(da->da_addr, addr, alen); | |
3038 | da->da_addrlen = alen; | |
3039 | da->da_users = 1; | |
3040 | da->da_gusers = glbl ? 1 : 0; | |
3041 | da->next = *list; | |
3042 | *list = da; | |
61cbc2fc | 3043 | (*count)++; |
bf742482 PM |
3044 | return 0; |
3045 | } | |
3046 | ||
4417da66 PM |
3047 | /** |
3048 | * dev_unicast_delete - Release secondary unicast address. | |
3049 | * @dev: device | |
0ed72ec4 RD |
3050 | * @addr: address to delete |
3051 | * @alen: length of @addr | |
4417da66 PM |
3052 | * |
3053 | * Release reference to a secondary unicast address and remove it | |
0ed72ec4 | 3054 | * from the device if the reference count drops to zero. |
4417da66 PM |
3055 | * |
3056 | * The caller must hold the rtnl_mutex. | |
3057 | */ | |
3058 | int dev_unicast_delete(struct net_device *dev, void *addr, int alen) | |
3059 | { | |
3060 | int err; | |
3061 | ||
3062 | ASSERT_RTNL(); | |
3063 | ||
b9e40857 | 3064 | netif_addr_lock_bh(dev); |
61cbc2fc PM |
3065 | err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); |
3066 | if (!err) | |
4417da66 | 3067 | __dev_set_rx_mode(dev); |
b9e40857 | 3068 | netif_addr_unlock_bh(dev); |
4417da66 PM |
3069 | return err; |
3070 | } | |
3071 | EXPORT_SYMBOL(dev_unicast_delete); | |
3072 | ||
3073 | /** | |
3074 | * dev_unicast_add - add a secondary unicast address | |
3075 | * @dev: device | |
5dbaec5d | 3076 | * @addr: address to add |
0ed72ec4 | 3077 | * @alen: length of @addr |
4417da66 PM |
3078 | * |
3079 | * Add a secondary unicast address to the device or increase | |
3080 | * the reference count if it already exists. | |
3081 | * | |
3082 | * The caller must hold the rtnl_mutex. | |
3083 | */ | |
3084 | int dev_unicast_add(struct net_device *dev, void *addr, int alen) | |
3085 | { | |
3086 | int err; | |
3087 | ||
3088 | ASSERT_RTNL(); | |
3089 | ||
b9e40857 | 3090 | netif_addr_lock_bh(dev); |
61cbc2fc PM |
3091 | err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); |
3092 | if (!err) | |
4417da66 | 3093 | __dev_set_rx_mode(dev); |
b9e40857 | 3094 | netif_addr_unlock_bh(dev); |
4417da66 PM |
3095 | return err; |
3096 | } | |
3097 | EXPORT_SYMBOL(dev_unicast_add); | |
3098 | ||
e83a2ea8 CL |
3099 | int __dev_addr_sync(struct dev_addr_list **to, int *to_count, |
3100 | struct dev_addr_list **from, int *from_count) | |
3101 | { | |
3102 | struct dev_addr_list *da, *next; | |
3103 | int err = 0; | |
3104 | ||
3105 | da = *from; | |
3106 | while (da != NULL) { | |
3107 | next = da->next; | |
3108 | if (!da->da_synced) { | |
3109 | err = __dev_addr_add(to, to_count, | |
3110 | da->da_addr, da->da_addrlen, 0); | |
3111 | if (err < 0) | |
3112 | break; | |
3113 | da->da_synced = 1; | |
3114 | da->da_users++; | |
3115 | } else if (da->da_users == 1) { | |
3116 | __dev_addr_delete(to, to_count, | |
3117 | da->da_addr, da->da_addrlen, 0); | |
3118 | __dev_addr_delete(from, from_count, | |
3119 | da->da_addr, da->da_addrlen, 0); | |
3120 | } | |
3121 | da = next; | |
3122 | } | |
3123 | return err; | |
3124 | } | |
3125 | ||
3126 | void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | |
3127 | struct dev_addr_list **from, int *from_count) | |
3128 | { | |
3129 | struct dev_addr_list *da, *next; | |
3130 | ||
3131 | da = *from; | |
3132 | while (da != NULL) { | |
3133 | next = da->next; | |
3134 | if (da->da_synced) { | |
3135 | __dev_addr_delete(to, to_count, | |
3136 | da->da_addr, da->da_addrlen, 0); | |
3137 | da->da_synced = 0; | |
3138 | __dev_addr_delete(from, from_count, | |
3139 | da->da_addr, da->da_addrlen, 0); | |
3140 | } | |
3141 | da = next; | |
3142 | } | |
3143 | } | |
3144 | ||
3145 | /** | |
3146 | * dev_unicast_sync - Synchronize device's unicast list to another device | |
3147 | * @to: destination device | |
3148 | * @from: source device | |
3149 | * | |
3150 | * Add newly added addresses to the destination device and release | |
3151 | * addresses that have no users left. The source device must be | |
3152 | * locked by netif_tx_lock_bh. | |
3153 | * | |
3154 | * This function is intended to be called from the dev->set_rx_mode | |
3155 | * function of layered software devices. | |
3156 | */ | |
3157 | int dev_unicast_sync(struct net_device *to, struct net_device *from) | |
3158 | { | |
3159 | int err = 0; | |
3160 | ||
b9e40857 | 3161 | netif_addr_lock_bh(to); |
e83a2ea8 CL |
3162 | err = __dev_addr_sync(&to->uc_list, &to->uc_count, |
3163 | &from->uc_list, &from->uc_count); | |
3164 | if (!err) | |
3165 | __dev_set_rx_mode(to); | |
b9e40857 | 3166 | netif_addr_unlock_bh(to); |
e83a2ea8 CL |
3167 | return err; |
3168 | } | |
3169 | EXPORT_SYMBOL(dev_unicast_sync); | |
3170 | ||
3171 | /** | |
bc2cda1e | 3172 | * dev_unicast_unsync - Remove synchronized addresses from the destination device |
e83a2ea8 CL |
3173 | * @to: destination device |
3174 | * @from: source device | |
3175 | * | |
3176 | * Remove all addresses that were added to the destination device by | |
3177 | * dev_unicast_sync(). This function is intended to be called from the | |
3178 | * dev->stop function of layered software devices. | |
3179 | */ | |
3180 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | |
3181 | { | |
b9e40857 | 3182 | netif_addr_lock_bh(from); |
e308a5d8 | 3183 | netif_addr_lock(to); |
e83a2ea8 CL |
3184 | |
3185 | __dev_addr_unsync(&to->uc_list, &to->uc_count, | |
3186 | &from->uc_list, &from->uc_count); | |
3187 | __dev_set_rx_mode(to); | |
3188 | ||
e308a5d8 | 3189 | netif_addr_unlock(to); |
b9e40857 | 3190 | netif_addr_unlock_bh(from); |
e83a2ea8 CL |
3191 | } |
3192 | EXPORT_SYMBOL(dev_unicast_unsync); | |
3193 | ||
12972621 DC |
3194 | static void __dev_addr_discard(struct dev_addr_list **list) |
3195 | { | |
3196 | struct dev_addr_list *tmp; | |
3197 | ||
3198 | while (*list != NULL) { | |
3199 | tmp = *list; | |
3200 | *list = tmp->next; | |
3201 | if (tmp->da_users > tmp->da_gusers) | |
3202 | printk("__dev_addr_discard: address leakage! " | |
3203 | "da_users=%d\n", tmp->da_users); | |
3204 | kfree(tmp); | |
3205 | } | |
3206 | } | |
3207 | ||
26cc2522 | 3208 | static void dev_addr_discard(struct net_device *dev) |
4417da66 | 3209 | { |
b9e40857 | 3210 | netif_addr_lock_bh(dev); |
26cc2522 | 3211 | |
4417da66 PM |
3212 | __dev_addr_discard(&dev->uc_list); |
3213 | dev->uc_count = 0; | |
4417da66 | 3214 | |
456ad75c DC |
3215 | __dev_addr_discard(&dev->mc_list); |
3216 | dev->mc_count = 0; | |
26cc2522 | 3217 | |
b9e40857 | 3218 | netif_addr_unlock_bh(dev); |
456ad75c DC |
3219 | } |
3220 | ||
1da177e4 LT |
3221 | unsigned dev_get_flags(const struct net_device *dev) |
3222 | { | |
3223 | unsigned flags; | |
3224 | ||
3225 | flags = (dev->flags & ~(IFF_PROMISC | | |
3226 | IFF_ALLMULTI | | |
b00055aa SR |
3227 | IFF_RUNNING | |
3228 | IFF_LOWER_UP | | |
3229 | IFF_DORMANT)) | | |
1da177e4 LT |
3230 | (dev->gflags & (IFF_PROMISC | |
3231 | IFF_ALLMULTI)); | |
3232 | ||
b00055aa SR |
3233 | if (netif_running(dev)) { |
3234 | if (netif_oper_up(dev)) | |
3235 | flags |= IFF_RUNNING; | |
3236 | if (netif_carrier_ok(dev)) | |
3237 | flags |= IFF_LOWER_UP; | |
3238 | if (netif_dormant(dev)) | |
3239 | flags |= IFF_DORMANT; | |
3240 | } | |
1da177e4 LT |
3241 | |
3242 | return flags; | |
3243 | } | |
3244 | ||
3245 | int dev_change_flags(struct net_device *dev, unsigned flags) | |
3246 | { | |
7c355f53 | 3247 | int ret, changes; |
1da177e4 LT |
3248 | int old_flags = dev->flags; |
3249 | ||
24023451 PM |
3250 | ASSERT_RTNL(); |
3251 | ||
1da177e4 LT |
3252 | /* |
3253 | * Set the flags on our device. | |
3254 | */ | |
3255 | ||
3256 | dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | | |
3257 | IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | | |
3258 | IFF_AUTOMEDIA)) | | |
3259 | (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | | |
3260 | IFF_ALLMULTI)); | |
3261 | ||
3262 | /* | |
3263 | * Load in the correct multicast list now the flags have changed. | |
3264 | */ | |
3265 | ||
0e91796e | 3266 | if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST) |
24023451 PM |
3267 | dev->change_rx_flags(dev, IFF_MULTICAST); |
3268 | ||
4417da66 | 3269 | dev_set_rx_mode(dev); |
1da177e4 LT |
3270 | |
3271 | /* | |
3272 | * Have we downed the interface. We handle IFF_UP ourselves | |
3273 | * according to user attempts to set it, rather than blindly | |
3274 | * setting it. | |
3275 | */ | |
3276 | ||
3277 | ret = 0; | |
3278 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ | |
3279 | ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); | |
3280 | ||
3281 | if (!ret) | |
4417da66 | 3282 | dev_set_rx_mode(dev); |
1da177e4 LT |
3283 | } |
3284 | ||
3285 | if (dev->flags & IFF_UP && | |
3286 | ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | |
3287 | IFF_VOLATILE))) | |
056925ab | 3288 | call_netdevice_notifiers(NETDEV_CHANGE, dev); |
1da177e4 LT |
3289 | |
3290 | if ((flags ^ dev->gflags) & IFF_PROMISC) { | |
3291 | int inc = (flags & IFF_PROMISC) ? +1 : -1; | |
3292 | dev->gflags ^= IFF_PROMISC; | |
3293 | dev_set_promiscuity(dev, inc); | |
3294 | } | |
3295 | ||
3296 | /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI | |
3297 | is important. Some (broken) drivers set IFF_PROMISC, when | |
3298 | IFF_ALLMULTI is requested not asking us and not reporting. | |
3299 | */ | |
3300 | if ((flags ^ dev->gflags) & IFF_ALLMULTI) { | |
3301 | int inc = (flags & IFF_ALLMULTI) ? +1 : -1; | |
3302 | dev->gflags ^= IFF_ALLMULTI; | |
3303 | dev_set_allmulti(dev, inc); | |
3304 | } | |
3305 | ||
7c355f53 TG |
3306 | /* Exclude state transition flags, already notified */ |
3307 | changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING); | |
3308 | if (changes) | |
3309 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); | |
1da177e4 LT |
3310 | |
3311 | return ret; | |
3312 | } | |
3313 | ||
3314 | int dev_set_mtu(struct net_device *dev, int new_mtu) | |
3315 | { | |
3316 | int err; | |
3317 | ||
3318 | if (new_mtu == dev->mtu) | |
3319 | return 0; | |
3320 | ||
3321 | /* MTU must be positive. */ | |
3322 | if (new_mtu < 0) | |
3323 | return -EINVAL; | |
3324 | ||
3325 | if (!netif_device_present(dev)) | |
3326 | return -ENODEV; | |
3327 | ||
3328 | err = 0; | |
3329 | if (dev->change_mtu) | |
3330 | err = dev->change_mtu(dev, new_mtu); | |
3331 | else | |
3332 | dev->mtu = new_mtu; | |
3333 | if (!err && dev->flags & IFF_UP) | |
056925ab | 3334 | call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); |
1da177e4 LT |
3335 | return err; |
3336 | } | |
3337 | ||
3338 | int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) | |
3339 | { | |
3340 | int err; | |
3341 | ||
3342 | if (!dev->set_mac_address) | |
3343 | return -EOPNOTSUPP; | |
3344 | if (sa->sa_family != dev->type) | |
3345 | return -EINVAL; | |
3346 | if (!netif_device_present(dev)) | |
3347 | return -ENODEV; | |
3348 | err = dev->set_mac_address(dev, sa); | |
3349 | if (!err) | |
056925ab | 3350 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); |
1da177e4 LT |
3351 | return err; |
3352 | } | |
3353 | ||
3354 | /* | |
14e3e079 | 3355 | * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) |
1da177e4 | 3356 | */ |
14e3e079 | 3357 | static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) |
1da177e4 LT |
3358 | { |
3359 | int err; | |
881d966b | 3360 | struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); |
1da177e4 LT |
3361 | |
3362 | if (!dev) | |
3363 | return -ENODEV; | |
3364 | ||
3365 | switch (cmd) { | |
3366 | case SIOCGIFFLAGS: /* Get interface flags */ | |
3367 | ifr->ifr_flags = dev_get_flags(dev); | |
3368 | return 0; | |
3369 | ||
1da177e4 LT |
3370 | case SIOCGIFMETRIC: /* Get the metric on the interface |
3371 | (currently unused) */ | |
3372 | ifr->ifr_metric = 0; | |
3373 | return 0; | |
3374 | ||
1da177e4 LT |
3375 | case SIOCGIFMTU: /* Get the MTU of a device */ |
3376 | ifr->ifr_mtu = dev->mtu; | |
3377 | return 0; | |
3378 | ||
1da177e4 LT |
3379 | case SIOCGIFHWADDR: |
3380 | if (!dev->addr_len) | |
3381 | memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); | |
3382 | else | |
3383 | memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, | |
3384 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | |
3385 | ifr->ifr_hwaddr.sa_family = dev->type; | |
3386 | return 0; | |
3387 | ||
14e3e079 JG |
3388 | case SIOCGIFSLAVE: |
3389 | err = -EINVAL; | |
3390 | break; | |
3391 | ||
3392 | case SIOCGIFMAP: | |
3393 | ifr->ifr_map.mem_start = dev->mem_start; | |
3394 | ifr->ifr_map.mem_end = dev->mem_end; | |
3395 | ifr->ifr_map.base_addr = dev->base_addr; | |
3396 | ifr->ifr_map.irq = dev->irq; | |
3397 | ifr->ifr_map.dma = dev->dma; | |
3398 | ifr->ifr_map.port = dev->if_port; | |
3399 | return 0; | |
3400 | ||
3401 | case SIOCGIFINDEX: | |
3402 | ifr->ifr_ifindex = dev->ifindex; | |
3403 | return 0; | |
3404 | ||
3405 | case SIOCGIFTXQLEN: | |
3406 | ifr->ifr_qlen = dev->tx_queue_len; | |
3407 | return 0; | |
3408 | ||
3409 | default: | |
3410 | /* dev_ioctl() should ensure this case | |
3411 | * is never reached | |
3412 | */ | |
3413 | WARN_ON(1); | |
3414 | err = -EINVAL; | |
3415 | break; | |
3416 | ||
3417 | } | |
3418 | return err; | |
3419 | } | |
3420 | ||
3421 | /* | |
3422 | * Perform the SIOCxIFxxx calls, inside rtnl_lock() | |
3423 | */ | |
3424 | static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |
3425 | { | |
3426 | int err; | |
3427 | struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); | |
3428 | ||
3429 | if (!dev) | |
3430 | return -ENODEV; | |
3431 | ||
3432 | switch (cmd) { | |
3433 | case SIOCSIFFLAGS: /* Set interface flags */ | |
3434 | return dev_change_flags(dev, ifr->ifr_flags); | |
3435 | ||
3436 | case SIOCSIFMETRIC: /* Set the metric on the interface | |
3437 | (currently unused) */ | |
3438 | return -EOPNOTSUPP; | |
3439 | ||
3440 | case SIOCSIFMTU: /* Set the MTU of a device */ | |
3441 | return dev_set_mtu(dev, ifr->ifr_mtu); | |
3442 | ||
1da177e4 LT |
3443 | case SIOCSIFHWADDR: |
3444 | return dev_set_mac_address(dev, &ifr->ifr_hwaddr); | |
3445 | ||
3446 | case SIOCSIFHWBROADCAST: | |
3447 | if (ifr->ifr_hwaddr.sa_family != dev->type) | |
3448 | return -EINVAL; | |
3449 | memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, | |
3450 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | |
056925ab | 3451 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); |
1da177e4 LT |
3452 | return 0; |
3453 | ||
1da177e4 LT |
3454 | case SIOCSIFMAP: |
3455 | if (dev->set_config) { | |
3456 | if (!netif_device_present(dev)) | |
3457 | return -ENODEV; | |
3458 | return dev->set_config(dev, &ifr->ifr_map); | |
3459 | } | |
3460 | return -EOPNOTSUPP; | |
3461 | ||
3462 | case SIOCADDMULTI: | |
61ee6bd4 | 3463 | if ((!dev->set_multicast_list && !dev->set_rx_mode) || |
1da177e4 LT |
3464 | ifr->ifr_hwaddr.sa_family != AF_UNSPEC) |
3465 | return -EINVAL; | |
3466 | if (!netif_device_present(dev)) | |
3467 | return -ENODEV; | |
3468 | return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, | |
3469 | dev->addr_len, 1); | |
3470 | ||
3471 | case SIOCDELMULTI: | |
61ee6bd4 | 3472 | if ((!dev->set_multicast_list && !dev->set_rx_mode) || |
1da177e4 LT |
3473 | ifr->ifr_hwaddr.sa_family != AF_UNSPEC) |
3474 | return -EINVAL; | |
3475 | if (!netif_device_present(dev)) | |
3476 | return -ENODEV; | |
3477 | return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, | |
3478 | dev->addr_len, 1); | |
3479 | ||
1da177e4 LT |
3480 | case SIOCSIFTXQLEN: |
3481 | if (ifr->ifr_qlen < 0) | |
3482 | return -EINVAL; | |
3483 | dev->tx_queue_len = ifr->ifr_qlen; | |
3484 | return 0; | |
3485 | ||
3486 | case SIOCSIFNAME: | |
3487 | ifr->ifr_newname[IFNAMSIZ-1] = '\0'; | |
3488 | return dev_change_name(dev, ifr->ifr_newname); | |
3489 | ||
3490 | /* | |
3491 | * Unknown or private ioctl | |
3492 | */ | |
3493 | ||
3494 | default: | |
3495 | if ((cmd >= SIOCDEVPRIVATE && | |
3496 | cmd <= SIOCDEVPRIVATE + 15) || | |
3497 | cmd == SIOCBONDENSLAVE || | |
3498 | cmd == SIOCBONDRELEASE || | |
3499 | cmd == SIOCBONDSETHWADDR || | |
3500 | cmd == SIOCBONDSLAVEINFOQUERY || | |
3501 | cmd == SIOCBONDINFOQUERY || | |
3502 | cmd == SIOCBONDCHANGEACTIVE || | |
3503 | cmd == SIOCGMIIPHY || | |
3504 | cmd == SIOCGMIIREG || | |
3505 | cmd == SIOCSMIIREG || | |
3506 | cmd == SIOCBRADDIF || | |
3507 | cmd == SIOCBRDELIF || | |
3508 | cmd == SIOCWANDEV) { | |
3509 | err = -EOPNOTSUPP; | |
3510 | if (dev->do_ioctl) { | |
3511 | if (netif_device_present(dev)) | |
3512 | err = dev->do_ioctl(dev, ifr, | |
3513 | cmd); | |
3514 | else | |
3515 | err = -ENODEV; | |
3516 | } | |
3517 | } else | |
3518 | err = -EINVAL; | |
3519 | ||
3520 | } | |
3521 | return err; | |
3522 | } | |
3523 | ||
3524 | /* | |
3525 | * This function handles all "interface"-type I/O control requests. The actual | |
3526 | * 'doing' part of this is dev_ifsioc above. | |
3527 | */ | |
3528 | ||
3529 | /** | |
3530 | * dev_ioctl - network device ioctl | |
c4ea43c5 | 3531 | * @net: the applicable net namespace |
1da177e4 LT |
3532 | * @cmd: command to issue |
3533 | * @arg: pointer to a struct ifreq in user space | |
3534 | * | |
3535 | * Issue ioctl functions to devices. This is normally called by the | |
3536 | * user space syscall interfaces but can sometimes be useful for | |
3537 | * other purposes. The return value is the return from the syscall if | |
3538 | * positive or a negative errno code on error. | |
3539 | */ | |
3540 | ||
881d966b | 3541 | int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) |
1da177e4 LT |
3542 | { |
3543 | struct ifreq ifr; | |
3544 | int ret; | |
3545 | char *colon; | |
3546 | ||
3547 | /* One special case: SIOCGIFCONF takes ifconf argument | |
3548 | and requires shared lock, because it sleeps writing | |
3549 | to user space. | |
3550 | */ | |
3551 | ||
3552 | if (cmd == SIOCGIFCONF) { | |
6756ae4b | 3553 | rtnl_lock(); |
881d966b | 3554 | ret = dev_ifconf(net, (char __user *) arg); |
6756ae4b | 3555 | rtnl_unlock(); |
1da177e4 LT |
3556 | return ret; |
3557 | } | |
3558 | if (cmd == SIOCGIFNAME) | |
881d966b | 3559 | return dev_ifname(net, (struct ifreq __user *)arg); |
1da177e4 LT |
3560 | |
3561 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | |
3562 | return -EFAULT; | |
3563 | ||
3564 | ifr.ifr_name[IFNAMSIZ-1] = 0; | |
3565 | ||
3566 | colon = strchr(ifr.ifr_name, ':'); | |
3567 | if (colon) | |
3568 | *colon = 0; | |
3569 | ||
3570 | /* | |
3571 | * See which interface the caller is talking about. | |
3572 | */ | |
3573 | ||
3574 | switch (cmd) { | |
3575 | /* | |
3576 | * These ioctl calls: | |
3577 | * - can be done by all. | |
3578 | * - atomic and do not require locking. | |
3579 | * - return a value | |
3580 | */ | |
3581 | case SIOCGIFFLAGS: | |
3582 | case SIOCGIFMETRIC: | |
3583 | case SIOCGIFMTU: | |
3584 | case SIOCGIFHWADDR: | |
3585 | case SIOCGIFSLAVE: | |
3586 | case SIOCGIFMAP: | |
3587 | case SIOCGIFINDEX: | |
3588 | case SIOCGIFTXQLEN: | |
881d966b | 3589 | dev_load(net, ifr.ifr_name); |
1da177e4 | 3590 | read_lock(&dev_base_lock); |
14e3e079 | 3591 | ret = dev_ifsioc_locked(net, &ifr, cmd); |
1da177e4 LT |
3592 | read_unlock(&dev_base_lock); |
3593 | if (!ret) { | |
3594 | if (colon) | |
3595 | *colon = ':'; | |
3596 | if (copy_to_user(arg, &ifr, | |
3597 | sizeof(struct ifreq))) | |
3598 | ret = -EFAULT; | |
3599 | } | |
3600 | return ret; | |
3601 | ||
3602 | case SIOCETHTOOL: | |
881d966b | 3603 | dev_load(net, ifr.ifr_name); |
1da177e4 | 3604 | rtnl_lock(); |
881d966b | 3605 | ret = dev_ethtool(net, &ifr); |
1da177e4 LT |
3606 | rtnl_unlock(); |
3607 | if (!ret) { | |
3608 | if (colon) | |
3609 | *colon = ':'; | |
3610 | if (copy_to_user(arg, &ifr, | |
3611 | sizeof(struct ifreq))) | |
3612 | ret = -EFAULT; | |
3613 | } | |
3614 | return ret; | |
3615 | ||
3616 | /* | |
3617 | * These ioctl calls: | |
3618 | * - require superuser power. | |
3619 | * - require strict serialization. | |
3620 | * - return a value | |
3621 | */ | |
3622 | case SIOCGMIIPHY: | |
3623 | case SIOCGMIIREG: | |
3624 | case SIOCSIFNAME: | |
3625 | if (!capable(CAP_NET_ADMIN)) | |
3626 | return -EPERM; | |
881d966b | 3627 | dev_load(net, ifr.ifr_name); |
1da177e4 | 3628 | rtnl_lock(); |
881d966b | 3629 | ret = dev_ifsioc(net, &ifr, cmd); |
1da177e4 LT |
3630 | rtnl_unlock(); |
3631 | if (!ret) { | |
3632 | if (colon) | |
3633 | *colon = ':'; | |
3634 | if (copy_to_user(arg, &ifr, | |
3635 | sizeof(struct ifreq))) | |
3636 | ret = -EFAULT; | |
3637 | } | |
3638 | return ret; | |
3639 | ||
3640 | /* | |
3641 | * These ioctl calls: | |
3642 | * - require superuser power. | |
3643 | * - require strict serialization. | |
3644 | * - do not return a value | |
3645 | */ | |
3646 | case SIOCSIFFLAGS: | |
3647 | case SIOCSIFMETRIC: | |
3648 | case SIOCSIFMTU: | |
3649 | case SIOCSIFMAP: | |
3650 | case SIOCSIFHWADDR: | |
3651 | case SIOCSIFSLAVE: | |
3652 | case SIOCADDMULTI: | |
3653 | case SIOCDELMULTI: | |
3654 | case SIOCSIFHWBROADCAST: | |
3655 | case SIOCSIFTXQLEN: | |
3656 | case SIOCSMIIREG: | |
3657 | case SIOCBONDENSLAVE: | |
3658 | case SIOCBONDRELEASE: | |
3659 | case SIOCBONDSETHWADDR: | |
1da177e4 LT |
3660 | case SIOCBONDCHANGEACTIVE: |
3661 | case SIOCBRADDIF: | |
3662 | case SIOCBRDELIF: | |
3663 | if (!capable(CAP_NET_ADMIN)) | |
3664 | return -EPERM; | |
cabcac0b TG |
3665 | /* fall through */ |
3666 | case SIOCBONDSLAVEINFOQUERY: | |
3667 | case SIOCBONDINFOQUERY: | |
881d966b | 3668 | dev_load(net, ifr.ifr_name); |
1da177e4 | 3669 | rtnl_lock(); |
881d966b | 3670 | ret = dev_ifsioc(net, &ifr, cmd); |
1da177e4 LT |
3671 | rtnl_unlock(); |
3672 | return ret; | |
3673 | ||
3674 | case SIOCGIFMEM: | |
3675 | /* Get the per device memory space. We can add this but | |
3676 | * currently do not support it */ | |
3677 | case SIOCSIFMEM: | |
3678 | /* Set the per device memory buffer space. | |
3679 | * Not applicable in our case */ | |
3680 | case SIOCSIFLINK: | |
3681 | return -EINVAL; | |
3682 | ||
3683 | /* | |
3684 | * Unknown or private ioctl. | |
3685 | */ | |
3686 | default: | |
3687 | if (cmd == SIOCWANDEV || | |
3688 | (cmd >= SIOCDEVPRIVATE && | |
3689 | cmd <= SIOCDEVPRIVATE + 15)) { | |
881d966b | 3690 | dev_load(net, ifr.ifr_name); |
1da177e4 | 3691 | rtnl_lock(); |
881d966b | 3692 | ret = dev_ifsioc(net, &ifr, cmd); |
1da177e4 LT |
3693 | rtnl_unlock(); |
3694 | if (!ret && copy_to_user(arg, &ifr, | |
3695 | sizeof(struct ifreq))) | |
3696 | ret = -EFAULT; | |
3697 | return ret; | |
3698 | } | |
1da177e4 | 3699 | /* Take care of Wireless Extensions */ |
295f4a1f | 3700 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) |
881d966b | 3701 | return wext_handle_ioctl(net, &ifr, cmd, arg); |
1da177e4 LT |
3702 | return -EINVAL; |
3703 | } | |
3704 | } | |
3705 | ||
3706 | ||
3707 | /** | |
3708 | * dev_new_index - allocate an ifindex | |
c4ea43c5 | 3709 | * @net: the applicable net namespace |
1da177e4 LT |
3710 | * |
3711 | * Returns a suitable unique value for a new device interface | |
3712 | * number. The caller must hold the rtnl semaphore or the | |
3713 | * dev_base_lock to be sure it remains unique. | |
3714 | */ | |
881d966b | 3715 | static int dev_new_index(struct net *net) |
1da177e4 LT |
3716 | { |
3717 | static int ifindex; | |
3718 | for (;;) { | |
3719 | if (++ifindex <= 0) | |
3720 | ifindex = 1; | |
881d966b | 3721 | if (!__dev_get_by_index(net, ifindex)) |
1da177e4 LT |
3722 | return ifindex; |
3723 | } | |
3724 | } | |
3725 | ||
1da177e4 LT |
3726 | /* Delayed registration/unregisteration */ |
3727 | static DEFINE_SPINLOCK(net_todo_list_lock); | |
3b5b34fd | 3728 | static LIST_HEAD(net_todo_list); |
1da177e4 | 3729 | |
6f05f629 | 3730 | static void net_set_todo(struct net_device *dev) |
1da177e4 LT |
3731 | { |
3732 | spin_lock(&net_todo_list_lock); | |
3733 | list_add_tail(&dev->todo_list, &net_todo_list); | |
3734 | spin_unlock(&net_todo_list_lock); | |
3735 | } | |
3736 | ||
93ee31f1 DL |
3737 | static void rollback_registered(struct net_device *dev) |
3738 | { | |
3739 | BUG_ON(dev_boot_phase); | |
3740 | ASSERT_RTNL(); | |
3741 | ||
3742 | /* Some devices call without registering for initialization unwind. */ | |
3743 | if (dev->reg_state == NETREG_UNINITIALIZED) { | |
3744 | printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " | |
3745 | "was registered\n", dev->name, dev); | |
3746 | ||
3747 | WARN_ON(1); | |
3748 | return; | |
3749 | } | |
3750 | ||
3751 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | |
3752 | ||
3753 | /* If device is running, close it first. */ | |
3754 | dev_close(dev); | |
3755 | ||
3756 | /* And unlink it from device chain. */ | |
3757 | unlist_netdevice(dev); | |
3758 | ||
3759 | dev->reg_state = NETREG_UNREGISTERING; | |
3760 | ||
3761 | synchronize_net(); | |
3762 | ||
3763 | /* Shutdown queueing discipline. */ | |
3764 | dev_shutdown(dev); | |
3765 | ||
3766 | ||
3767 | /* Notify protocols, that we are about to destroy | |
3768 | this device. They should clean all the things. | |
3769 | */ | |
3770 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | |
3771 | ||
3772 | /* | |
3773 | * Flush the unicast and multicast chains | |
3774 | */ | |
3775 | dev_addr_discard(dev); | |
3776 | ||
3777 | if (dev->uninit) | |
3778 | dev->uninit(dev); | |
3779 | ||
3780 | /* Notifier chain MUST detach us from master device. */ | |
3781 | BUG_TRAP(!dev->master); | |
3782 | ||
3783 | /* Remove entries from kobject tree */ | |
3784 | netdev_unregister_kobject(dev); | |
3785 | ||
3786 | synchronize_net(); | |
3787 | ||
3788 | dev_put(dev); | |
3789 | } | |
3790 | ||
c773e847 DM |
3791 | static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue, |
3792 | struct net_device *dev) | |
3793 | { | |
3794 | spin_lock_init(&dev_queue->_xmit_lock); | |
3795 | netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); | |
3796 | dev_queue->xmit_lock_owner = -1; | |
3797 | } | |
3798 | ||
3799 | static void netdev_init_queue_locks(struct net_device *dev) | |
3800 | { | |
3801 | __netdev_init_queue_locks_one(&dev->tx_queue, dev); | |
3802 | __netdev_init_queue_locks_one(&dev->rx_queue, dev); | |
3803 | } | |
3804 | ||
1da177e4 LT |
3805 | /** |
3806 | * register_netdevice - register a network device | |
3807 | * @dev: device to register | |
3808 | * | |
3809 | * Take a completed network device structure and add it to the kernel | |
3810 | * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier | |
3811 | * chain. 0 is returned on success. A negative errno code is returned | |
3812 | * on a failure to set up the device, or if the name is a duplicate. | |
3813 | * | |
3814 | * Callers must hold the rtnl semaphore. You may want | |
3815 | * register_netdev() instead of this. | |
3816 | * | |
3817 | * BUGS: | |
3818 | * The locking appears insufficient to guarantee two parallel registers | |
3819 | * will not get the same name. | |
3820 | */ | |
3821 | ||
3822 | int register_netdevice(struct net_device *dev) | |
3823 | { | |
3824 | struct hlist_head *head; | |
3825 | struct hlist_node *p; | |
3826 | int ret; | |
881d966b | 3827 | struct net *net; |
1da177e4 LT |
3828 | |
3829 | BUG_ON(dev_boot_phase); | |
3830 | ASSERT_RTNL(); | |
3831 | ||
b17a7c17 SH |
3832 | might_sleep(); |
3833 | ||
1da177e4 LT |
3834 | /* When net_device's are persistent, this will be fatal. */ |
3835 | BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); | |
c346dca1 YH |
3836 | BUG_ON(!dev_net(dev)); |
3837 | net = dev_net(dev); | |
1da177e4 | 3838 | |
f1f28aa3 | 3839 | spin_lock_init(&dev->addr_list_lock); |
c773e847 | 3840 | netdev_init_queue_locks(dev); |
1da177e4 | 3841 | |
1da177e4 LT |
3842 | dev->iflink = -1; |
3843 | ||
3844 | /* Init, if this function is available */ | |
3845 | if (dev->init) { | |
3846 | ret = dev->init(dev); | |
3847 | if (ret) { | |
3848 | if (ret > 0) | |
3849 | ret = -EIO; | |
90833aa4 | 3850 | goto out; |
1da177e4 LT |
3851 | } |
3852 | } | |
4ec93edb | 3853 | |
1da177e4 LT |
3854 | if (!dev_valid_name(dev->name)) { |
3855 | ret = -EINVAL; | |
7ce1b0ed | 3856 | goto err_uninit; |
1da177e4 LT |
3857 | } |
3858 | ||
881d966b | 3859 | dev->ifindex = dev_new_index(net); |
1da177e4 LT |
3860 | if (dev->iflink == -1) |
3861 | dev->iflink = dev->ifindex; | |
3862 | ||
3863 | /* Check for existence of name */ | |
881d966b | 3864 | head = dev_name_hash(net, dev->name); |
1da177e4 LT |
3865 | hlist_for_each(p, head) { |
3866 | struct net_device *d | |
3867 | = hlist_entry(p, struct net_device, name_hlist); | |
3868 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { | |
3869 | ret = -EEXIST; | |
7ce1b0ed | 3870 | goto err_uninit; |
1da177e4 | 3871 | } |
4ec93edb | 3872 | } |
1da177e4 | 3873 | |
d212f87b SH |
3874 | /* Fix illegal checksum combinations */ |
3875 | if ((dev->features & NETIF_F_HW_CSUM) && | |
3876 | (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | |
3877 | printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n", | |
3878 | dev->name); | |
3879 | dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); | |
3880 | } | |
3881 | ||
3882 | if ((dev->features & NETIF_F_NO_CSUM) && | |
3883 | (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | |
3884 | printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n", | |
3885 | dev->name); | |
3886 | dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); | |
3887 | } | |
3888 | ||
3889 | ||
1da177e4 LT |
3890 | /* Fix illegal SG+CSUM combinations. */ |
3891 | if ((dev->features & NETIF_F_SG) && | |
8648b305 | 3892 | !(dev->features & NETIF_F_ALL_CSUM)) { |
5a8da02b | 3893 | printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", |
1da177e4 LT |
3894 | dev->name); |
3895 | dev->features &= ~NETIF_F_SG; | |
3896 | } | |
3897 | ||
3898 | /* TSO requires that SG is present as well. */ | |
3899 | if ((dev->features & NETIF_F_TSO) && | |
3900 | !(dev->features & NETIF_F_SG)) { | |
5a8da02b | 3901 | printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", |
1da177e4 LT |
3902 | dev->name); |
3903 | dev->features &= ~NETIF_F_TSO; | |
3904 | } | |
e89e9cf5 AR |
3905 | if (dev->features & NETIF_F_UFO) { |
3906 | if (!(dev->features & NETIF_F_HW_CSUM)) { | |
3907 | printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no " | |
3908 | "NETIF_F_HW_CSUM feature.\n", | |
3909 | dev->name); | |
3910 | dev->features &= ~NETIF_F_UFO; | |
3911 | } | |
3912 | if (!(dev->features & NETIF_F_SG)) { | |
3913 | printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no " | |
3914 | "NETIF_F_SG feature.\n", | |
3915 | dev->name); | |
3916 | dev->features &= ~NETIF_F_UFO; | |
3917 | } | |
3918 | } | |
1da177e4 | 3919 | |
aaf8cdc3 | 3920 | netdev_initialize_kobject(dev); |
8b41d188 | 3921 | ret = netdev_register_kobject(dev); |
b17a7c17 | 3922 | if (ret) |
7ce1b0ed | 3923 | goto err_uninit; |
b17a7c17 SH |
3924 | dev->reg_state = NETREG_REGISTERED; |
3925 | ||
1da177e4 LT |
3926 | /* |
3927 | * Default initial state at registry is that the | |
3928 | * device is present. | |
3929 | */ | |
3930 | ||
3931 | set_bit(__LINK_STATE_PRESENT, &dev->state); | |
3932 | ||
1da177e4 | 3933 | dev_init_scheduler(dev); |
1da177e4 | 3934 | dev_hold(dev); |
ce286d32 | 3935 | list_netdevice(dev); |
1da177e4 LT |
3936 | |
3937 | /* Notify protocols, that a new device appeared. */ | |
056925ab | 3938 | ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); |
fcc5a03a | 3939 | ret = notifier_to_errno(ret); |
93ee31f1 DL |
3940 | if (ret) { |
3941 | rollback_registered(dev); | |
3942 | dev->reg_state = NETREG_UNREGISTERED; | |
3943 | } | |
1da177e4 LT |
3944 | |
3945 | out: | |
3946 | return ret; | |
7ce1b0ed HX |
3947 | |
3948 | err_uninit: | |
3949 | if (dev->uninit) | |
3950 | dev->uninit(dev); | |
3951 | goto out; | |
1da177e4 LT |
3952 | } |
3953 | ||
3954 | /** | |
3955 | * register_netdev - register a network device | |
3956 | * @dev: device to register | |
3957 | * | |
3958 | * Take a completed network device structure and add it to the kernel | |
3959 | * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier | |
3960 | * chain. 0 is returned on success. A negative errno code is returned | |
3961 | * on a failure to set up the device, or if the name is a duplicate. | |
3962 | * | |
38b4da38 | 3963 | * This is a wrapper around register_netdevice that takes the rtnl semaphore |
1da177e4 LT |
3964 | * and expands the device name if you passed a format string to |
3965 | * alloc_netdev. | |
3966 | */ | |
3967 | int register_netdev(struct net_device *dev) | |
3968 | { | |
3969 | int err; | |
3970 | ||
3971 | rtnl_lock(); | |
3972 | ||
3973 | /* | |
3974 | * If the name is a format string the caller wants us to do a | |
3975 | * name allocation. | |
3976 | */ | |
3977 | if (strchr(dev->name, '%')) { | |
3978 | err = dev_alloc_name(dev, dev->name); | |
3979 | if (err < 0) | |
3980 | goto out; | |
3981 | } | |
4ec93edb | 3982 | |
1da177e4 LT |
3983 | err = register_netdevice(dev); |
3984 | out: | |
3985 | rtnl_unlock(); | |
3986 | return err; | |
3987 | } | |
3988 | EXPORT_SYMBOL(register_netdev); | |
3989 | ||
3990 | /* | |
3991 | * netdev_wait_allrefs - wait until all references are gone. | |
3992 | * | |
3993 | * This is called when unregistering network devices. | |
3994 | * | |
3995 | * Any protocol or device that holds a reference should register | |
3996 | * for netdevice notification, and cleanup and put back the | |
3997 | * reference if they receive an UNREGISTER event. | |
3998 | * We can get stuck here if buggy protocols don't correctly | |
4ec93edb | 3999 | * call dev_put. |
1da177e4 LT |
4000 | */ |
4001 | static void netdev_wait_allrefs(struct net_device *dev) | |
4002 | { | |
4003 | unsigned long rebroadcast_time, warning_time; | |
4004 | ||
4005 | rebroadcast_time = warning_time = jiffies; | |
4006 | while (atomic_read(&dev->refcnt) != 0) { | |
4007 | if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { | |
6756ae4b | 4008 | rtnl_lock(); |
1da177e4 LT |
4009 | |
4010 | /* Rebroadcast unregister notification */ | |
056925ab | 4011 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
1da177e4 LT |
4012 | |
4013 | if (test_bit(__LINK_STATE_LINKWATCH_PENDING, | |
4014 | &dev->state)) { | |
4015 | /* We must not have linkwatch events | |
4016 | * pending on unregister. If this | |
4017 | * happens, we simply run the queue | |
4018 | * unscheduled, resulting in a noop | |
4019 | * for this device. | |
4020 | */ | |
4021 | linkwatch_run_queue(); | |
4022 | } | |
4023 | ||
6756ae4b | 4024 | __rtnl_unlock(); |
1da177e4 LT |
4025 | |
4026 | rebroadcast_time = jiffies; | |
4027 | } | |
4028 | ||
4029 | msleep(250); | |
4030 | ||
4031 | if (time_after(jiffies, warning_time + 10 * HZ)) { | |
4032 | printk(KERN_EMERG "unregister_netdevice: " | |
4033 | "waiting for %s to become free. Usage " | |
4034 | "count = %d\n", | |
4035 | dev->name, atomic_read(&dev->refcnt)); | |
4036 | warning_time = jiffies; | |
4037 | } | |
4038 | } | |
4039 | } | |
4040 | ||
4041 | /* The sequence is: | |
4042 | * | |
4043 | * rtnl_lock(); | |
4044 | * ... | |
4045 | * register_netdevice(x1); | |
4046 | * register_netdevice(x2); | |
4047 | * ... | |
4048 | * unregister_netdevice(y1); | |
4049 | * unregister_netdevice(y2); | |
4050 | * ... | |
4051 | * rtnl_unlock(); | |
4052 | * free_netdev(y1); | |
4053 | * free_netdev(y2); | |
4054 | * | |
4055 | * We are invoked by rtnl_unlock() after it drops the semaphore. | |
4056 | * This allows us to deal with problems: | |
b17a7c17 | 4057 | * 1) We can delete sysfs objects which invoke hotplug |
1da177e4 LT |
4058 | * without deadlocking with linkwatch via keventd. |
4059 | * 2) Since we run with the RTNL semaphore not held, we can sleep | |
4060 | * safely in order to wait for the netdev refcnt to drop to zero. | |
4061 | */ | |
4a3e2f71 | 4062 | static DEFINE_MUTEX(net_todo_run_mutex); |
1da177e4 LT |
4063 | void netdev_run_todo(void) |
4064 | { | |
626ab0e6 | 4065 | struct list_head list; |
1da177e4 LT |
4066 | |
4067 | /* Need to guard against multiple cpu's getting out of order. */ | |
4a3e2f71 | 4068 | mutex_lock(&net_todo_run_mutex); |
1da177e4 LT |
4069 | |
4070 | /* Not safe to do outside the semaphore. We must not return | |
4071 | * until all unregister events invoked by the local processor | |
4072 | * have been completed (either by this todo run, or one on | |
4073 | * another cpu). | |
4074 | */ | |
4075 | if (list_empty(&net_todo_list)) | |
4076 | goto out; | |
4077 | ||
4078 | /* Snapshot list, allow later requests */ | |
4079 | spin_lock(&net_todo_list_lock); | |
626ab0e6 | 4080 | list_replace_init(&net_todo_list, &list); |
1da177e4 | 4081 | spin_unlock(&net_todo_list_lock); |
626ab0e6 | 4082 | |
1da177e4 LT |
4083 | while (!list_empty(&list)) { |
4084 | struct net_device *dev | |
4085 | = list_entry(list.next, struct net_device, todo_list); | |
4086 | list_del(&dev->todo_list); | |
4087 | ||
b17a7c17 SH |
4088 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
4089 | printk(KERN_ERR "network todo '%s' but state %d\n", | |
4090 | dev->name, dev->reg_state); | |
4091 | dump_stack(); | |
4092 | continue; | |
4093 | } | |
1da177e4 | 4094 | |
b17a7c17 | 4095 | dev->reg_state = NETREG_UNREGISTERED; |
1da177e4 | 4096 | |
b17a7c17 | 4097 | netdev_wait_allrefs(dev); |
1da177e4 | 4098 | |
b17a7c17 SH |
4099 | /* paranoia */ |
4100 | BUG_ON(atomic_read(&dev->refcnt)); | |
4101 | BUG_TRAP(!dev->ip_ptr); | |
4102 | BUG_TRAP(!dev->ip6_ptr); | |
4103 | BUG_TRAP(!dev->dn_ptr); | |
1da177e4 | 4104 | |
b17a7c17 SH |
4105 | if (dev->destructor) |
4106 | dev->destructor(dev); | |
9093bbb2 SH |
4107 | |
4108 | /* Free network device */ | |
4109 | kobject_put(&dev->dev.kobj); | |
1da177e4 LT |
4110 | } |
4111 | ||
4112 | out: | |
4a3e2f71 | 4113 | mutex_unlock(&net_todo_run_mutex); |
1da177e4 LT |
4114 | } |
4115 | ||
5a1b5898 | 4116 | static struct net_device_stats *internal_stats(struct net_device *dev) |
c45d286e | 4117 | { |
5a1b5898 | 4118 | return &dev->stats; |
c45d286e RR |
4119 | } |
4120 | ||
dc2b4847 DM |
4121 | static void netdev_init_one_queue(struct net_device *dev, |
4122 | struct netdev_queue *queue) | |
4123 | { | |
4124 | spin_lock_init(&queue->lock); | |
4125 | queue->dev = dev; | |
4126 | } | |
4127 | ||
bb949fbd DM |
4128 | static void netdev_init_queues(struct net_device *dev) |
4129 | { | |
dc2b4847 DM |
4130 | netdev_init_one_queue(dev, &dev->rx_queue); |
4131 | netdev_init_one_queue(dev, &dev->tx_queue); | |
bb949fbd DM |
4132 | } |
4133 | ||
1da177e4 | 4134 | /** |
f25f4e44 | 4135 | * alloc_netdev_mq - allocate network device |
1da177e4 LT |
4136 | * @sizeof_priv: size of private data to allocate space for |
4137 | * @name: device name format string | |
4138 | * @setup: callback to initialize device | |
f25f4e44 | 4139 | * @queue_count: the number of subqueues to allocate |
1da177e4 LT |
4140 | * |
4141 | * Allocates a struct net_device with private data area for driver use | |
f25f4e44 PWJ |
4142 | * and performs basic initialization. Also allocates subquue structs |
4143 | * for each queue on the device at the end of the netdevice. | |
1da177e4 | 4144 | */ |
f25f4e44 PWJ |
4145 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
4146 | void (*setup)(struct net_device *), unsigned int queue_count) | |
1da177e4 LT |
4147 | { |
4148 | void *p; | |
4149 | struct net_device *dev; | |
4150 | int alloc_size; | |
4151 | ||
b6fe17d6 SH |
4152 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
4153 | ||
d1643d24 AD |
4154 | alloc_size = sizeof(struct net_device) + |
4155 | sizeof(struct net_device_subqueue) * (queue_count - 1); | |
4156 | if (sizeof_priv) { | |
4157 | /* ensure 32-byte alignment of private area */ | |
4158 | alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; | |
4159 | alloc_size += sizeof_priv; | |
4160 | } | |
4161 | /* ensure 32-byte alignment of whole construct */ | |
4162 | alloc_size += NETDEV_ALIGN_CONST; | |
1da177e4 | 4163 | |
31380de9 | 4164 | p = kzalloc(alloc_size, GFP_KERNEL); |
1da177e4 | 4165 | if (!p) { |
b6fe17d6 | 4166 | printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); |
1da177e4 LT |
4167 | return NULL; |
4168 | } | |
1da177e4 LT |
4169 | |
4170 | dev = (struct net_device *) | |
4171 | (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); | |
4172 | dev->padded = (char *)dev - (char *)p; | |
c346dca1 | 4173 | dev_net_set(dev, &init_net); |
1da177e4 | 4174 | |
f25f4e44 PWJ |
4175 | if (sizeof_priv) { |
4176 | dev->priv = ((char *)dev + | |
4177 | ((sizeof(struct net_device) + | |
4178 | (sizeof(struct net_device_subqueue) * | |
31ce72a6 | 4179 | (queue_count - 1)) + NETDEV_ALIGN_CONST) |
f25f4e44 PWJ |
4180 | & ~NETDEV_ALIGN_CONST)); |
4181 | } | |
4182 | ||
4183 | dev->egress_subqueue_count = queue_count; | |
82cc1a7a | 4184 | dev->gso_max_size = GSO_MAX_SIZE; |
1da177e4 | 4185 | |
bb949fbd DM |
4186 | netdev_init_queues(dev); |
4187 | ||
5a1b5898 | 4188 | dev->get_stats = internal_stats; |
bea3348e | 4189 | netpoll_netdev_init(dev); |
1da177e4 LT |
4190 | setup(dev); |
4191 | strcpy(dev->name, name); | |
4192 | return dev; | |
4193 | } | |
f25f4e44 | 4194 | EXPORT_SYMBOL(alloc_netdev_mq); |
1da177e4 LT |
4195 | |
4196 | /** | |
4197 | * free_netdev - free network device | |
4198 | * @dev: device | |
4199 | * | |
4ec93edb YH |
4200 | * This function does the last stage of destroying an allocated device |
4201 | * interface. The reference to the device object is released. | |
1da177e4 LT |
4202 | * If this is the last reference then it will be freed. |
4203 | */ | |
4204 | void free_netdev(struct net_device *dev) | |
4205 | { | |
f3005d7f DL |
4206 | release_net(dev_net(dev)); |
4207 | ||
3041a069 | 4208 | /* Compatibility with error handling in drivers */ |
1da177e4 LT |
4209 | if (dev->reg_state == NETREG_UNINITIALIZED) { |
4210 | kfree((char *)dev - dev->padded); | |
4211 | return; | |
4212 | } | |
4213 | ||
4214 | BUG_ON(dev->reg_state != NETREG_UNREGISTERED); | |
4215 | dev->reg_state = NETREG_RELEASED; | |
4216 | ||
43cb76d9 GKH |
4217 | /* will free via device release */ |
4218 | put_device(&dev->dev); | |
1da177e4 | 4219 | } |
4ec93edb | 4220 | |
1da177e4 | 4221 | /* Synchronize with packet receive processing. */ |
4ec93edb | 4222 | void synchronize_net(void) |
1da177e4 LT |
4223 | { |
4224 | might_sleep(); | |
fbd568a3 | 4225 | synchronize_rcu(); |
1da177e4 LT |
4226 | } |
4227 | ||
4228 | /** | |
4229 | * unregister_netdevice - remove device from the kernel | |
4230 | * @dev: device | |
4231 | * | |
4232 | * This function shuts down a device interface and removes it | |
d59b54b1 | 4233 | * from the kernel tables. |
1da177e4 LT |
4234 | * |
4235 | * Callers must hold the rtnl semaphore. You may want | |
4236 | * unregister_netdev() instead of this. | |
4237 | */ | |
4238 | ||
22f8cde5 | 4239 | void unregister_netdevice(struct net_device *dev) |
1da177e4 | 4240 | { |
a6620712 HX |
4241 | ASSERT_RTNL(); |
4242 | ||
93ee31f1 | 4243 | rollback_registered(dev); |
1da177e4 LT |
4244 | /* Finish processing unregister after unlock */ |
4245 | net_set_todo(dev); | |
1da177e4 LT |
4246 | } |
4247 | ||
4248 | /** | |
4249 | * unregister_netdev - remove device from the kernel | |
4250 | * @dev: device | |
4251 | * | |
4252 | * This function shuts down a device interface and removes it | |
d59b54b1 | 4253 | * from the kernel tables. |
1da177e4 LT |
4254 | * |
4255 | * This is just a wrapper for unregister_netdevice that takes | |
4256 | * the rtnl semaphore. In general you want to use this and not | |
4257 | * unregister_netdevice. | |
4258 | */ | |
4259 | void unregister_netdev(struct net_device *dev) | |
4260 | { | |
4261 | rtnl_lock(); | |
4262 | unregister_netdevice(dev); | |
4263 | rtnl_unlock(); | |
4264 | } | |
4265 | ||
4266 | EXPORT_SYMBOL(unregister_netdev); | |
4267 | ||
ce286d32 EB |
4268 | /** |
4269 | * dev_change_net_namespace - move device to different nethost namespace | |
4270 | * @dev: device | |
4271 | * @net: network namespace | |
4272 | * @pat: If not NULL name pattern to try if the current device name | |
4273 | * is already taken in the destination network namespace. | |
4274 | * | |
4275 | * This function shuts down a device interface and moves it | |
4276 | * to a new network namespace. On success 0 is returned, on | |
4277 | * a failure a netagive errno code is returned. | |
4278 | * | |
4279 | * Callers must hold the rtnl semaphore. | |
4280 | */ | |
4281 | ||
4282 | int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) | |
4283 | { | |
4284 | char buf[IFNAMSIZ]; | |
4285 | const char *destname; | |
4286 | int err; | |
4287 | ||
4288 | ASSERT_RTNL(); | |
4289 | ||
4290 | /* Don't allow namespace local devices to be moved. */ | |
4291 | err = -EINVAL; | |
4292 | if (dev->features & NETIF_F_NETNS_LOCAL) | |
4293 | goto out; | |
4294 | ||
4295 | /* Ensure the device has been registrered */ | |
4296 | err = -EINVAL; | |
4297 | if (dev->reg_state != NETREG_REGISTERED) | |
4298 | goto out; | |
4299 | ||
4300 | /* Get out if there is nothing todo */ | |
4301 | err = 0; | |
878628fb | 4302 | if (net_eq(dev_net(dev), net)) |
ce286d32 EB |
4303 | goto out; |
4304 | ||
4305 | /* Pick the destination device name, and ensure | |
4306 | * we can use it in the destination network namespace. | |
4307 | */ | |
4308 | err = -EEXIST; | |
4309 | destname = dev->name; | |
4310 | if (__dev_get_by_name(net, destname)) { | |
4311 | /* We get here if we can't use the current device name */ | |
4312 | if (!pat) | |
4313 | goto out; | |
4314 | if (!dev_valid_name(pat)) | |
4315 | goto out; | |
4316 | if (strchr(pat, '%')) { | |
4317 | if (__dev_alloc_name(net, pat, buf) < 0) | |
4318 | goto out; | |
4319 | destname = buf; | |
4320 | } else | |
4321 | destname = pat; | |
4322 | if (__dev_get_by_name(net, destname)) | |
4323 | goto out; | |
4324 | } | |
4325 | ||
4326 | /* | |
4327 | * And now a mini version of register_netdevice unregister_netdevice. | |
4328 | */ | |
4329 | ||
4330 | /* If device is running close it first. */ | |
9b772652 | 4331 | dev_close(dev); |
ce286d32 EB |
4332 | |
4333 | /* And unlink it from device chain */ | |
4334 | err = -ENODEV; | |
4335 | unlist_netdevice(dev); | |
4336 | ||
4337 | synchronize_net(); | |
4338 | ||
4339 | /* Shutdown queueing discipline. */ | |
4340 | dev_shutdown(dev); | |
4341 | ||
4342 | /* Notify protocols, that we are about to destroy | |
4343 | this device. They should clean all the things. | |
4344 | */ | |
4345 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | |
4346 | ||
4347 | /* | |
4348 | * Flush the unicast and multicast chains | |
4349 | */ | |
4350 | dev_addr_discard(dev); | |
4351 | ||
4352 | /* Actually switch the network namespace */ | |
c346dca1 | 4353 | dev_net_set(dev, net); |
ce286d32 EB |
4354 | |
4355 | /* Assign the new device name */ | |
4356 | if (destname != dev->name) | |
4357 | strcpy(dev->name, destname); | |
4358 | ||
4359 | /* If there is an ifindex conflict assign a new one */ | |
4360 | if (__dev_get_by_index(net, dev->ifindex)) { | |
4361 | int iflink = (dev->iflink == dev->ifindex); | |
4362 | dev->ifindex = dev_new_index(net); | |
4363 | if (iflink) | |
4364 | dev->iflink = dev->ifindex; | |
4365 | } | |
4366 | ||
8b41d188 | 4367 | /* Fixup kobjects */ |
aaf8cdc3 DL |
4368 | netdev_unregister_kobject(dev); |
4369 | err = netdev_register_kobject(dev); | |
8b41d188 | 4370 | WARN_ON(err); |
ce286d32 EB |
4371 | |
4372 | /* Add the device back in the hashes */ | |
4373 | list_netdevice(dev); | |
4374 | ||
4375 | /* Notify protocols, that a new device appeared. */ | |
4376 | call_netdevice_notifiers(NETDEV_REGISTER, dev); | |
4377 | ||
4378 | synchronize_net(); | |
4379 | err = 0; | |
4380 | out: | |
4381 | return err; | |
4382 | } | |
4383 | ||
1da177e4 LT |
4384 | static int dev_cpu_callback(struct notifier_block *nfb, |
4385 | unsigned long action, | |
4386 | void *ocpu) | |
4387 | { | |
4388 | struct sk_buff **list_skb; | |
ee609cb3 | 4389 | struct netdev_queue **list_net; |
1da177e4 LT |
4390 | struct sk_buff *skb; |
4391 | unsigned int cpu, oldcpu = (unsigned long)ocpu; | |
4392 | struct softnet_data *sd, *oldsd; | |
4393 | ||
8bb78442 | 4394 | if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) |
1da177e4 LT |
4395 | return NOTIFY_OK; |
4396 | ||
4397 | local_irq_disable(); | |
4398 | cpu = smp_processor_id(); | |
4399 | sd = &per_cpu(softnet_data, cpu); | |
4400 | oldsd = &per_cpu(softnet_data, oldcpu); | |
4401 | ||
4402 | /* Find end of our completion_queue. */ | |
4403 | list_skb = &sd->completion_queue; | |
4404 | while (*list_skb) | |
4405 | list_skb = &(*list_skb)->next; | |
4406 | /* Append completion queue from offline CPU. */ | |
4407 | *list_skb = oldsd->completion_queue; | |
4408 | oldsd->completion_queue = NULL; | |
4409 | ||
4410 | /* Find end of our output_queue. */ | |
4411 | list_net = &sd->output_queue; | |
4412 | while (*list_net) | |
4413 | list_net = &(*list_net)->next_sched; | |
4414 | /* Append output queue from offline CPU. */ | |
4415 | *list_net = oldsd->output_queue; | |
4416 | oldsd->output_queue = NULL; | |
4417 | ||
4418 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | |
4419 | local_irq_enable(); | |
4420 | ||
4421 | /* Process offline CPU's input_pkt_queue */ | |
4422 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) | |
4423 | netif_rx(skb); | |
4424 | ||
4425 | return NOTIFY_OK; | |
4426 | } | |
1da177e4 | 4427 | |
db217334 CL |
4428 | #ifdef CONFIG_NET_DMA |
4429 | /** | |
0ed72ec4 RD |
4430 | * net_dma_rebalance - try to maintain one DMA channel per CPU |
4431 | * @net_dma: DMA client and associated data (lock, channels, channel_mask) | |
4432 | * | |
4433 | * This is called when the number of channels allocated to the net_dma client | |
4434 | * changes. The net_dma client tries to have one DMA channel per CPU. | |
db217334 | 4435 | */ |
d379b01e DW |
4436 | |
4437 | static void net_dma_rebalance(struct net_dma *net_dma) | |
db217334 | 4438 | { |
d379b01e | 4439 | unsigned int cpu, i, n, chan_idx; |
db217334 CL |
4440 | struct dma_chan *chan; |
4441 | ||
d379b01e | 4442 | if (cpus_empty(net_dma->channel_mask)) { |
db217334 | 4443 | for_each_online_cpu(cpu) |
29bbd72d | 4444 | rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); |
db217334 CL |
4445 | return; |
4446 | } | |
4447 | ||
4448 | i = 0; | |
4449 | cpu = first_cpu(cpu_online_map); | |
4450 | ||
d379b01e DW |
4451 | for_each_cpu_mask(chan_idx, net_dma->channel_mask) { |
4452 | chan = net_dma->channels[chan_idx]; | |
4453 | ||
4454 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | |
4455 | + (i < (num_online_cpus() % | |
4456 | cpus_weight(net_dma->channel_mask)) ? 1 : 0)); | |
db217334 CL |
4457 | |
4458 | while(n) { | |
29bbd72d | 4459 | per_cpu(softnet_data, cpu).net_dma = chan; |
db217334 CL |
4460 | cpu = next_cpu(cpu, cpu_online_map); |
4461 | n--; | |
4462 | } | |
4463 | i++; | |
4464 | } | |
db217334 CL |
4465 | } |
4466 | ||
4467 | /** | |
4468 | * netdev_dma_event - event callback for the net_dma_client | |
4469 | * @client: should always be net_dma_client | |
f4b8ea78 | 4470 | * @chan: DMA channel for the event |
0ed72ec4 | 4471 | * @state: DMA state to be handled |
db217334 | 4472 | */ |
d379b01e DW |
4473 | static enum dma_state_client |
4474 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | |
4475 | enum dma_state state) | |
4476 | { | |
4477 | int i, found = 0, pos = -1; | |
4478 | struct net_dma *net_dma = | |
4479 | container_of(client, struct net_dma, client); | |
4480 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | |
4481 | ||
4482 | spin_lock(&net_dma->lock); | |
4483 | switch (state) { | |
4484 | case DMA_RESOURCE_AVAILABLE: | |
0c0b0aca | 4485 | for (i = 0; i < nr_cpu_ids; i++) |
d379b01e DW |
4486 | if (net_dma->channels[i] == chan) { |
4487 | found = 1; | |
4488 | break; | |
4489 | } else if (net_dma->channels[i] == NULL && pos < 0) | |
4490 | pos = i; | |
4491 | ||
4492 | if (!found && pos >= 0) { | |
4493 | ack = DMA_ACK; | |
4494 | net_dma->channels[pos] = chan; | |
4495 | cpu_set(pos, net_dma->channel_mask); | |
4496 | net_dma_rebalance(net_dma); | |
4497 | } | |
db217334 CL |
4498 | break; |
4499 | case DMA_RESOURCE_REMOVED: | |
0c0b0aca | 4500 | for (i = 0; i < nr_cpu_ids; i++) |
d379b01e DW |
4501 | if (net_dma->channels[i] == chan) { |
4502 | found = 1; | |
4503 | pos = i; | |
4504 | break; | |
4505 | } | |
4506 | ||
4507 | if (found) { | |
4508 | ack = DMA_ACK; | |
4509 | cpu_clear(pos, net_dma->channel_mask); | |
4510 | net_dma->channels[i] = NULL; | |
4511 | net_dma_rebalance(net_dma); | |
4512 | } | |
db217334 CL |
4513 | break; |
4514 | default: | |
4515 | break; | |
4516 | } | |
d379b01e DW |
4517 | spin_unlock(&net_dma->lock); |
4518 | ||
4519 | return ack; | |
db217334 CL |
4520 | } |
4521 | ||
4522 | /** | |
4523 | * netdev_dma_regiser - register the networking subsystem as a DMA client | |
4524 | */ | |
4525 | static int __init netdev_dma_register(void) | |
4526 | { | |
0c0b0aca MT |
4527 | net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma), |
4528 | GFP_KERNEL); | |
4529 | if (unlikely(!net_dma.channels)) { | |
4530 | printk(KERN_NOTICE | |
4531 | "netdev_dma: no memory for net_dma.channels\n"); | |
4532 | return -ENOMEM; | |
4533 | } | |
d379b01e DW |
4534 | spin_lock_init(&net_dma.lock); |
4535 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); | |
4536 | dma_async_client_register(&net_dma.client); | |
4537 | dma_async_client_chan_request(&net_dma.client); | |
db217334 CL |
4538 | return 0; |
4539 | } | |
4540 | ||
4541 | #else | |
4542 | static int __init netdev_dma_register(void) { return -ENODEV; } | |
4543 | #endif /* CONFIG_NET_DMA */ | |
1da177e4 | 4544 | |
7f353bf2 HX |
4545 | /** |
4546 | * netdev_compute_feature - compute conjunction of two feature sets | |
4547 | * @all: first feature set | |
4548 | * @one: second feature set | |
4549 | * | |
4550 | * Computes a new feature set after adding a device with feature set | |
4551 | * @one to the master device with current feature set @all. Returns | |
4552 | * the new feature set. | |
4553 | */ | |
4554 | int netdev_compute_features(unsigned long all, unsigned long one) | |
4555 | { | |
4556 | /* if device needs checksumming, downgrade to hw checksumming */ | |
4557 | if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) | |
4558 | all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM; | |
4559 | ||
4560 | /* if device can't do all checksum, downgrade to ipv4/ipv6 */ | |
4561 | if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM)) | |
4562 | all ^= NETIF_F_HW_CSUM | |
4563 | | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
4564 | ||
4565 | if (one & NETIF_F_GSO) | |
4566 | one |= NETIF_F_GSO_SOFTWARE; | |
4567 | one |= NETIF_F_GSO; | |
4568 | ||
4569 | /* If even one device supports robust GSO, enable it for all. */ | |
4570 | if (one & NETIF_F_GSO_ROBUST) | |
4571 | all |= NETIF_F_GSO_ROBUST; | |
4572 | ||
4573 | all &= one | NETIF_F_LLTX; | |
4574 | ||
4575 | if (!(all & NETIF_F_ALL_CSUM)) | |
4576 | all &= ~NETIF_F_SG; | |
4577 | if (!(all & NETIF_F_SG)) | |
4578 | all &= ~NETIF_F_GSO_MASK; | |
4579 | ||
4580 | return all; | |
4581 | } | |
4582 | EXPORT_SYMBOL(netdev_compute_features); | |
4583 | ||
30d97d35 PE |
4584 | static struct hlist_head *netdev_create_hash(void) |
4585 | { | |
4586 | int i; | |
4587 | struct hlist_head *hash; | |
4588 | ||
4589 | hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); | |
4590 | if (hash != NULL) | |
4591 | for (i = 0; i < NETDEV_HASHENTRIES; i++) | |
4592 | INIT_HLIST_HEAD(&hash[i]); | |
4593 | ||
4594 | return hash; | |
4595 | } | |
4596 | ||
881d966b | 4597 | /* Initialize per network namespace state */ |
4665079c | 4598 | static int __net_init netdev_init(struct net *net) |
881d966b | 4599 | { |
881d966b | 4600 | INIT_LIST_HEAD(&net->dev_base_head); |
881d966b | 4601 | |
30d97d35 PE |
4602 | net->dev_name_head = netdev_create_hash(); |
4603 | if (net->dev_name_head == NULL) | |
4604 | goto err_name; | |
881d966b | 4605 | |
30d97d35 PE |
4606 | net->dev_index_head = netdev_create_hash(); |
4607 | if (net->dev_index_head == NULL) | |
4608 | goto err_idx; | |
881d966b EB |
4609 | |
4610 | return 0; | |
30d97d35 PE |
4611 | |
4612 | err_idx: | |
4613 | kfree(net->dev_name_head); | |
4614 | err_name: | |
4615 | return -ENOMEM; | |
881d966b EB |
4616 | } |
4617 | ||
4665079c | 4618 | static void __net_exit netdev_exit(struct net *net) |
881d966b EB |
4619 | { |
4620 | kfree(net->dev_name_head); | |
4621 | kfree(net->dev_index_head); | |
4622 | } | |
4623 | ||
022cbae6 | 4624 | static struct pernet_operations __net_initdata netdev_net_ops = { |
881d966b EB |
4625 | .init = netdev_init, |
4626 | .exit = netdev_exit, | |
4627 | }; | |
4628 | ||
4665079c | 4629 | static void __net_exit default_device_exit(struct net *net) |
ce286d32 EB |
4630 | { |
4631 | struct net_device *dev, *next; | |
4632 | /* | |
4633 | * Push all migratable of the network devices back to the | |
4634 | * initial network namespace | |
4635 | */ | |
4636 | rtnl_lock(); | |
4637 | for_each_netdev_safe(net, dev, next) { | |
4638 | int err; | |
aca51397 | 4639 | char fb_name[IFNAMSIZ]; |
ce286d32 EB |
4640 | |
4641 | /* Ignore unmoveable devices (i.e. loopback) */ | |
4642 | if (dev->features & NETIF_F_NETNS_LOCAL) | |
4643 | continue; | |
4644 | ||
4645 | /* Push remaing network devices to init_net */ | |
aca51397 PE |
4646 | snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); |
4647 | err = dev_change_net_namespace(dev, &init_net, fb_name); | |
ce286d32 | 4648 | if (err) { |
aca51397 | 4649 | printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", |
ce286d32 | 4650 | __func__, dev->name, err); |
aca51397 | 4651 | BUG(); |
ce286d32 EB |
4652 | } |
4653 | } | |
4654 | rtnl_unlock(); | |
4655 | } | |
4656 | ||
022cbae6 | 4657 | static struct pernet_operations __net_initdata default_device_ops = { |
ce286d32 EB |
4658 | .exit = default_device_exit, |
4659 | }; | |
4660 | ||
1da177e4 LT |
4661 | /* |
4662 | * Initialize the DEV module. At boot time this walks the device list and | |
4663 | * unhooks any devices that fail to initialise (normally hardware not | |
4664 | * present) and leaves us with a valid list of present and active devices. | |
4665 | * | |
4666 | */ | |
4667 | ||
4668 | /* | |
4669 | * This is called single threaded during boot, so no need | |
4670 | * to take the rtnl semaphore. | |
4671 | */ | |
4672 | static int __init net_dev_init(void) | |
4673 | { | |
4674 | int i, rc = -ENOMEM; | |
4675 | ||
4676 | BUG_ON(!dev_boot_phase); | |
4677 | ||
1da177e4 LT |
4678 | if (dev_proc_init()) |
4679 | goto out; | |
4680 | ||
8b41d188 | 4681 | if (netdev_kobject_init()) |
1da177e4 LT |
4682 | goto out; |
4683 | ||
4684 | INIT_LIST_HEAD(&ptype_all); | |
82d8a867 | 4685 | for (i = 0; i < PTYPE_HASH_SIZE; i++) |
1da177e4 LT |
4686 | INIT_LIST_HEAD(&ptype_base[i]); |
4687 | ||
881d966b EB |
4688 | if (register_pernet_subsys(&netdev_net_ops)) |
4689 | goto out; | |
1da177e4 | 4690 | |
ce286d32 EB |
4691 | if (register_pernet_device(&default_device_ops)) |
4692 | goto out; | |
4693 | ||
1da177e4 LT |
4694 | /* |
4695 | * Initialise the packet receive queues. | |
4696 | */ | |
4697 | ||
6f912042 | 4698 | for_each_possible_cpu(i) { |
1da177e4 LT |
4699 | struct softnet_data *queue; |
4700 | ||
4701 | queue = &per_cpu(softnet_data, i); | |
4702 | skb_queue_head_init(&queue->input_pkt_queue); | |
1da177e4 LT |
4703 | queue->completion_queue = NULL; |
4704 | INIT_LIST_HEAD(&queue->poll_list); | |
bea3348e SH |
4705 | |
4706 | queue->backlog.poll = process_backlog; | |
4707 | queue->backlog.weight = weight_p; | |
1da177e4 LT |
4708 | } |
4709 | ||
db217334 CL |
4710 | netdev_dma_register(); |
4711 | ||
1da177e4 LT |
4712 | dev_boot_phase = 0; |
4713 | ||
4714 | open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); | |
4715 | open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL); | |
4716 | ||
4717 | hotcpu_notifier(dev_cpu_callback, 0); | |
4718 | dst_init(); | |
4719 | dev_mcast_init(); | |
4720 | rc = 0; | |
4721 | out: | |
4722 | return rc; | |
4723 | } | |
4724 | ||
4725 | subsys_initcall(net_dev_init); | |
4726 | ||
4727 | EXPORT_SYMBOL(__dev_get_by_index); | |
4728 | EXPORT_SYMBOL(__dev_get_by_name); | |
4729 | EXPORT_SYMBOL(__dev_remove_pack); | |
c2373ee9 | 4730 | EXPORT_SYMBOL(dev_valid_name); |
1da177e4 LT |
4731 | EXPORT_SYMBOL(dev_add_pack); |
4732 | EXPORT_SYMBOL(dev_alloc_name); | |
4733 | EXPORT_SYMBOL(dev_close); | |
4734 | EXPORT_SYMBOL(dev_get_by_flags); | |
4735 | EXPORT_SYMBOL(dev_get_by_index); | |
4736 | EXPORT_SYMBOL(dev_get_by_name); | |
1da177e4 LT |
4737 | EXPORT_SYMBOL(dev_open); |
4738 | EXPORT_SYMBOL(dev_queue_xmit); | |
4739 | EXPORT_SYMBOL(dev_remove_pack); | |
4740 | EXPORT_SYMBOL(dev_set_allmulti); | |
4741 | EXPORT_SYMBOL(dev_set_promiscuity); | |
4742 | EXPORT_SYMBOL(dev_change_flags); | |
4743 | EXPORT_SYMBOL(dev_set_mtu); | |
4744 | EXPORT_SYMBOL(dev_set_mac_address); | |
4745 | EXPORT_SYMBOL(free_netdev); | |
4746 | EXPORT_SYMBOL(netdev_boot_setup_check); | |
4747 | EXPORT_SYMBOL(netdev_set_master); | |
4748 | EXPORT_SYMBOL(netdev_state_change); | |
4749 | EXPORT_SYMBOL(netif_receive_skb); | |
4750 | EXPORT_SYMBOL(netif_rx); | |
4751 | EXPORT_SYMBOL(register_gifconf); | |
4752 | EXPORT_SYMBOL(register_netdevice); | |
4753 | EXPORT_SYMBOL(register_netdevice_notifier); | |
4754 | EXPORT_SYMBOL(skb_checksum_help); | |
4755 | EXPORT_SYMBOL(synchronize_net); | |
4756 | EXPORT_SYMBOL(unregister_netdevice); | |
4757 | EXPORT_SYMBOL(unregister_netdevice_notifier); | |
4758 | EXPORT_SYMBOL(net_enable_timestamp); | |
4759 | EXPORT_SYMBOL(net_disable_timestamp); | |
4760 | EXPORT_SYMBOL(dev_get_flags); | |
4761 | ||
4762 | #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) | |
4763 | EXPORT_SYMBOL(br_handle_frame_hook); | |
4764 | EXPORT_SYMBOL(br_fdb_get_hook); | |
4765 | EXPORT_SYMBOL(br_fdb_put_hook); | |
4766 | #endif | |
4767 | ||
4768 | #ifdef CONFIG_KMOD | |
4769 | EXPORT_SYMBOL(dev_load); | |
4770 | #endif | |
4771 | ||
4772 | EXPORT_PER_CPU_SYMBOL(softnet_data); |