]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/core/dev.c
mlx4: remove mlx4_en_low_latency_recv()
[mirror_ubuntu-artful-kernel.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4 98#include <net/sock.h>
02d62e86 99#include <net/busy_poll.h>
1da177e4 100#include <linux/rtnetlink.h>
1da177e4 101#include <linux/stat.h>
1da177e4 102#include <net/dst.h>
fc4099f1 103#include <net/dst_metadata.h>
1da177e4
LT
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
44540960 106#include <net/xfrm.h>
1da177e4
LT
107#include <linux/highmem.h>
108#include <linux/init.h>
1da177e4 109#include <linux/module.h>
1da177e4
LT
110#include <linux/netpoll.h>
111#include <linux/rcupdate.h>
112#include <linux/delay.h>
1da177e4 113#include <net/iw_handler.h>
1da177e4 114#include <asm/current.h>
5bdb9886 115#include <linux/audit.h>
db217334 116#include <linux/dmaengine.h>
f6a78bfc 117#include <linux/err.h>
c7fa9d18 118#include <linux/ctype.h>
723e98b7 119#include <linux/if_arp.h>
6de329e2 120#include <linux/if_vlan.h>
8f0f2223 121#include <linux/ip.h>
ad55dcaf 122#include <net/ip.h>
25cd9ba0 123#include <net/mpls.h>
8f0f2223
DM
124#include <linux/ipv6.h>
125#include <linux/in.h>
b6b2fed1
DM
126#include <linux/jhash.h>
127#include <linux/random.h>
9cbc1cb8 128#include <trace/events/napi.h>
cf66ba58 129#include <trace/events/net.h>
07dc22e7 130#include <trace/events/skb.h>
5acbbd42 131#include <linux/pci.h>
caeda9b9 132#include <linux/inetdevice.h>
c445477d 133#include <linux/cpu_rmap.h>
c5905afb 134#include <linux/static_key.h>
af12fa6e 135#include <linux/hashtable.h>
60877a32 136#include <linux/vmalloc.h>
529d0489 137#include <linux/if_macvlan.h>
e7fd2885 138#include <linux/errqueue.h>
3b47d303 139#include <linux/hrtimer.h>
e687ad60 140#include <linux/netfilter_ingress.h>
1da177e4 141
342709ef
PE
142#include "net-sysfs.h"
143
d565b0a1
HX
144/* Instead of increasing this, you should create a hash table. */
145#define MAX_GRO_SKBS 8
146
5d38a079
HX
147/* This should be increased if a protocol with a bigger head is added. */
148#define GRO_MAX_HEAD (MAX_HEADER + 128)
149
1da177e4 150static DEFINE_SPINLOCK(ptype_lock);
62532da9 151static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
152struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
153struct list_head ptype_all __read_mostly; /* Taps */
62532da9 154static struct list_head offload_base __read_mostly;
1da177e4 155
ae78dbfa 156static int netif_rx_internal(struct sk_buff *skb);
54951194
LP
157static int call_netdevice_notifiers_info(unsigned long val,
158 struct net_device *dev,
159 struct netdev_notifier_info *info);
ae78dbfa 160
1da177e4 161/*
7562f876 162 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
163 * semaphore.
164 *
c6d14c84 165 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
166 *
167 * Writers must hold the rtnl semaphore while they loop through the
7562f876 168 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
169 * actual updates. This allows pure readers to access the list even
170 * while a writer is preparing to update it.
171 *
172 * To put it another way, dev_base_lock is held for writing only to
173 * protect against pure readers; the rtnl semaphore provides the
174 * protection against other writers.
175 *
176 * See, for example usages, register_netdevice() and
177 * unregister_netdevice(), which must be called with the rtnl
178 * semaphore held.
179 */
1da177e4 180DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
181EXPORT_SYMBOL(dev_base_lock);
182
af12fa6e
ET
183/* protects napi_hash addition/deletion and napi_gen_id */
184static DEFINE_SPINLOCK(napi_hash_lock);
185
52bd2d62 186static unsigned int napi_gen_id = NR_CPUS;
af12fa6e
ET
187static DEFINE_HASHTABLE(napi_hash, 8);
188
18afa4b0 189static seqcount_t devnet_rename_seq;
c91f6df2 190
4e985ada
TG
191static inline void dev_base_seq_inc(struct net *net)
192{
193 while (++net->dev_base_seq == 0);
194}
195
881d966b 196static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 197{
95c96174
ED
198 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
199
08e9897d 200 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
201}
202
881d966b 203static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 204{
7c28bd0b 205 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
206}
207
e36fa2f7 208static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
209{
210#ifdef CONFIG_RPS
e36fa2f7 211 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
212#endif
213}
214
e36fa2f7 215static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
216{
217#ifdef CONFIG_RPS
e36fa2f7 218 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
219#endif
220}
221
ce286d32 222/* Device list insertion */
53759be9 223static void list_netdevice(struct net_device *dev)
ce286d32 224{
c346dca1 225 struct net *net = dev_net(dev);
ce286d32
EB
226
227 ASSERT_RTNL();
228
229 write_lock_bh(&dev_base_lock);
c6d14c84 230 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 231 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
232 hlist_add_head_rcu(&dev->index_hlist,
233 dev_index_hash(net, dev->ifindex));
ce286d32 234 write_unlock_bh(&dev_base_lock);
4e985ada
TG
235
236 dev_base_seq_inc(net);
ce286d32
EB
237}
238
fb699dfd
ED
239/* Device list removal
240 * caller must respect a RCU grace period before freeing/reusing dev
241 */
ce286d32
EB
242static void unlist_netdevice(struct net_device *dev)
243{
244 ASSERT_RTNL();
245
246 /* Unlink dev from the device chain */
247 write_lock_bh(&dev_base_lock);
c6d14c84 248 list_del_rcu(&dev->dev_list);
72c9528b 249 hlist_del_rcu(&dev->name_hlist);
fb699dfd 250 hlist_del_rcu(&dev->index_hlist);
ce286d32 251 write_unlock_bh(&dev_base_lock);
4e985ada
TG
252
253 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
254}
255
1da177e4
LT
256/*
257 * Our notifier list
258 */
259
f07d5b94 260static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
261
262/*
263 * Device drivers call our routines to queue packets here. We empty the
264 * queue in the local softnet handler.
265 */
bea3348e 266
9958da05 267DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 268EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 269
cf508b12 270#ifdef CONFIG_LOCKDEP
723e98b7 271/*
c773e847 272 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
273 * according to dev->type
274 */
275static const unsigned short netdev_lock_type[] =
276 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
277 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
278 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
279 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
280 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
281 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
282 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
283 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
284 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
285 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
286 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
287 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
288 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
289 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
290 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 291
36cbd3dc 292static const char *const netdev_lock_name[] =
723e98b7
JP
293 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
294 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
295 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
296 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
297 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
298 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
299 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
300 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
301 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
302 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
303 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
304 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
305 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
306 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
307 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
308
309static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 310static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
311
312static inline unsigned short netdev_lock_pos(unsigned short dev_type)
313{
314 int i;
315
316 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
317 if (netdev_lock_type[i] == dev_type)
318 return i;
319 /* the last key is used by default */
320 return ARRAY_SIZE(netdev_lock_type) - 1;
321}
322
cf508b12
DM
323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
723e98b7
JP
325{
326 int i;
327
328 i = netdev_lock_pos(dev_type);
329 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
330 netdev_lock_name[i]);
331}
cf508b12
DM
332
333static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
334{
335 int i;
336
337 i = netdev_lock_pos(dev->type);
338 lockdep_set_class_and_name(&dev->addr_list_lock,
339 &netdev_addr_lock_key[i],
340 netdev_lock_name[i]);
341}
723e98b7 342#else
cf508b12
DM
343static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
344 unsigned short dev_type)
345{
346}
347static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
348{
349}
350#endif
1da177e4
LT
351
352/*******************************************************************************
353
354 Protocol management and registration routines
355
356*******************************************************************************/
357
1da177e4
LT
358/*
359 * Add a protocol ID to the list. Now that the input handler is
360 * smarter we can dispense with all the messy stuff that used to be
361 * here.
362 *
363 * BEWARE!!! Protocol handlers, mangling input packets,
364 * MUST BE last in hash buckets and checking protocol handlers
365 * MUST start from promiscuous ptype_all chain in net_bh.
366 * It is true now, do not change it.
367 * Explanation follows: if protocol handler, mangling packet, will
368 * be the first on list, it is not able to sense, that packet
369 * is cloned and should be copied-on-write, so that it will
370 * change it and subsequent readers will get broken packet.
371 * --ANK (980803)
372 */
373
c07b68e8
ED
374static inline struct list_head *ptype_head(const struct packet_type *pt)
375{
376 if (pt->type == htons(ETH_P_ALL))
7866a621 377 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 378 else
7866a621
SN
379 return pt->dev ? &pt->dev->ptype_specific :
380 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
381}
382
1da177e4
LT
383/**
384 * dev_add_pack - add packet handler
385 * @pt: packet type declaration
386 *
387 * Add a protocol handler to the networking stack. The passed &packet_type
388 * is linked into kernel lists and may not be freed until it has been
389 * removed from the kernel lists.
390 *
4ec93edb 391 * This call does not sleep therefore it can not
1da177e4
LT
392 * guarantee all CPU's that are in middle of receiving packets
393 * will see the new packet type (until the next received packet).
394 */
395
396void dev_add_pack(struct packet_type *pt)
397{
c07b68e8 398 struct list_head *head = ptype_head(pt);
1da177e4 399
c07b68e8
ED
400 spin_lock(&ptype_lock);
401 list_add_rcu(&pt->list, head);
402 spin_unlock(&ptype_lock);
1da177e4 403}
d1b19dff 404EXPORT_SYMBOL(dev_add_pack);
1da177e4 405
1da177e4
LT
406/**
407 * __dev_remove_pack - remove packet handler
408 * @pt: packet type declaration
409 *
410 * Remove a protocol handler that was previously added to the kernel
411 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
412 * from the kernel lists and can be freed or reused once this function
4ec93edb 413 * returns.
1da177e4
LT
414 *
415 * The packet type might still be in use by receivers
416 * and must not be freed until after all the CPU's have gone
417 * through a quiescent state.
418 */
419void __dev_remove_pack(struct packet_type *pt)
420{
c07b68e8 421 struct list_head *head = ptype_head(pt);
1da177e4
LT
422 struct packet_type *pt1;
423
c07b68e8 424 spin_lock(&ptype_lock);
1da177e4
LT
425
426 list_for_each_entry(pt1, head, list) {
427 if (pt == pt1) {
428 list_del_rcu(&pt->list);
429 goto out;
430 }
431 }
432
7b6cd1ce 433 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 434out:
c07b68e8 435 spin_unlock(&ptype_lock);
1da177e4 436}
d1b19dff
ED
437EXPORT_SYMBOL(__dev_remove_pack);
438
1da177e4
LT
439/**
440 * dev_remove_pack - remove packet handler
441 * @pt: packet type declaration
442 *
443 * Remove a protocol handler that was previously added to the kernel
444 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
445 * from the kernel lists and can be freed or reused once this function
446 * returns.
447 *
448 * This call sleeps to guarantee that no CPU is looking at the packet
449 * type after return.
450 */
451void dev_remove_pack(struct packet_type *pt)
452{
453 __dev_remove_pack(pt);
4ec93edb 454
1da177e4
LT
455 synchronize_net();
456}
d1b19dff 457EXPORT_SYMBOL(dev_remove_pack);
1da177e4 458
62532da9
VY
459
460/**
461 * dev_add_offload - register offload handlers
462 * @po: protocol offload declaration
463 *
464 * Add protocol offload handlers to the networking stack. The passed
465 * &proto_offload is linked into kernel lists and may not be freed until
466 * it has been removed from the kernel lists.
467 *
468 * This call does not sleep therefore it can not
469 * guarantee all CPU's that are in middle of receiving packets
470 * will see the new offload handlers (until the next received packet).
471 */
472void dev_add_offload(struct packet_offload *po)
473{
bdef7de4 474 struct packet_offload *elem;
62532da9
VY
475
476 spin_lock(&offload_lock);
bdef7de4
DM
477 list_for_each_entry(elem, &offload_base, list) {
478 if (po->priority < elem->priority)
479 break;
480 }
481 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
482 spin_unlock(&offload_lock);
483}
484EXPORT_SYMBOL(dev_add_offload);
485
486/**
487 * __dev_remove_offload - remove offload handler
488 * @po: packet offload declaration
489 *
490 * Remove a protocol offload handler that was previously added to the
491 * kernel offload handlers by dev_add_offload(). The passed &offload_type
492 * is removed from the kernel lists and can be freed or reused once this
493 * function returns.
494 *
495 * The packet type might still be in use by receivers
496 * and must not be freed until after all the CPU's have gone
497 * through a quiescent state.
498 */
1d143d9f 499static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
500{
501 struct list_head *head = &offload_base;
502 struct packet_offload *po1;
503
c53aa505 504 spin_lock(&offload_lock);
62532da9
VY
505
506 list_for_each_entry(po1, head, list) {
507 if (po == po1) {
508 list_del_rcu(&po->list);
509 goto out;
510 }
511 }
512
513 pr_warn("dev_remove_offload: %p not found\n", po);
514out:
c53aa505 515 spin_unlock(&offload_lock);
62532da9 516}
62532da9
VY
517
518/**
519 * dev_remove_offload - remove packet offload handler
520 * @po: packet offload declaration
521 *
522 * Remove a packet offload handler that was previously added to the kernel
523 * offload handlers by dev_add_offload(). The passed &offload_type is
524 * removed from the kernel lists and can be freed or reused once this
525 * function returns.
526 *
527 * This call sleeps to guarantee that no CPU is looking at the packet
528 * type after return.
529 */
530void dev_remove_offload(struct packet_offload *po)
531{
532 __dev_remove_offload(po);
533
534 synchronize_net();
535}
536EXPORT_SYMBOL(dev_remove_offload);
537
1da177e4
LT
538/******************************************************************************
539
540 Device Boot-time Settings Routines
541
542*******************************************************************************/
543
544/* Boot time configuration table */
545static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
546
547/**
548 * netdev_boot_setup_add - add new setup entry
549 * @name: name of the device
550 * @map: configured settings for the device
551 *
552 * Adds new setup entry to the dev_boot_setup list. The function
553 * returns 0 on error and 1 on success. This is a generic routine to
554 * all netdevices.
555 */
556static int netdev_boot_setup_add(char *name, struct ifmap *map)
557{
558 struct netdev_boot_setup *s;
559 int i;
560
561 s = dev_boot_setup;
562 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
563 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
564 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 565 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
566 memcpy(&s[i].map, map, sizeof(s[i].map));
567 break;
568 }
569 }
570
571 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
572}
573
574/**
575 * netdev_boot_setup_check - check boot time settings
576 * @dev: the netdevice
577 *
578 * Check boot time settings for the device.
579 * The found settings are set for the device to be used
580 * later in the device probing.
581 * Returns 0 if no settings found, 1 if they are.
582 */
583int netdev_boot_setup_check(struct net_device *dev)
584{
585 struct netdev_boot_setup *s = dev_boot_setup;
586 int i;
587
588 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
589 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 590 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
591 dev->irq = s[i].map.irq;
592 dev->base_addr = s[i].map.base_addr;
593 dev->mem_start = s[i].map.mem_start;
594 dev->mem_end = s[i].map.mem_end;
595 return 1;
596 }
597 }
598 return 0;
599}
d1b19dff 600EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
601
602
603/**
604 * netdev_boot_base - get address from boot time settings
605 * @prefix: prefix for network device
606 * @unit: id for network device
607 *
608 * Check boot time settings for the base address of device.
609 * The found settings are set for the device to be used
610 * later in the device probing.
611 * Returns 0 if no settings found.
612 */
613unsigned long netdev_boot_base(const char *prefix, int unit)
614{
615 const struct netdev_boot_setup *s = dev_boot_setup;
616 char name[IFNAMSIZ];
617 int i;
618
619 sprintf(name, "%s%d", prefix, unit);
620
621 /*
622 * If device already registered then return base of 1
623 * to indicate not to probe for this interface
624 */
881d966b 625 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
626 return 1;
627
628 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
629 if (!strcmp(name, s[i].name))
630 return s[i].map.base_addr;
631 return 0;
632}
633
634/*
635 * Saves at boot time configured settings for any netdevice.
636 */
637int __init netdev_boot_setup(char *str)
638{
639 int ints[5];
640 struct ifmap map;
641
642 str = get_options(str, ARRAY_SIZE(ints), ints);
643 if (!str || !*str)
644 return 0;
645
646 /* Save settings */
647 memset(&map, 0, sizeof(map));
648 if (ints[0] > 0)
649 map.irq = ints[1];
650 if (ints[0] > 1)
651 map.base_addr = ints[2];
652 if (ints[0] > 2)
653 map.mem_start = ints[3];
654 if (ints[0] > 3)
655 map.mem_end = ints[4];
656
657 /* Add new entry to the list */
658 return netdev_boot_setup_add(str, &map);
659}
660
661__setup("netdev=", netdev_boot_setup);
662
663/*******************************************************************************
664
665 Device Interface Subroutines
666
667*******************************************************************************/
668
a54acb3a
ND
669/**
670 * dev_get_iflink - get 'iflink' value of a interface
671 * @dev: targeted interface
672 *
673 * Indicates the ifindex the interface is linked to.
674 * Physical interfaces have the same 'ifindex' and 'iflink' values.
675 */
676
677int dev_get_iflink(const struct net_device *dev)
678{
679 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
680 return dev->netdev_ops->ndo_get_iflink(dev);
681
7a66bbc9 682 return dev->ifindex;
a54acb3a
ND
683}
684EXPORT_SYMBOL(dev_get_iflink);
685
fc4099f1
PS
686/**
687 * dev_fill_metadata_dst - Retrieve tunnel egress information.
688 * @dev: targeted interface
689 * @skb: The packet.
690 *
691 * For better visibility of tunnel traffic OVS needs to retrieve
692 * egress tunnel information for a packet. Following API allows
693 * user to get this info.
694 */
695int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
696{
697 struct ip_tunnel_info *info;
698
699 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
700 return -EINVAL;
701
702 info = skb_tunnel_info_unclone(skb);
703 if (!info)
704 return -ENOMEM;
705 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
706 return -EINVAL;
707
708 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
709}
710EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
711
1da177e4
LT
712/**
713 * __dev_get_by_name - find a device by its name
c4ea43c5 714 * @net: the applicable net namespace
1da177e4
LT
715 * @name: name to find
716 *
717 * Find an interface by name. Must be called under RTNL semaphore
718 * or @dev_base_lock. If the name is found a pointer to the device
719 * is returned. If the name is not found then %NULL is returned. The
720 * reference counters are not incremented so the caller must be
721 * careful with locks.
722 */
723
881d966b 724struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 725{
0bd8d536
ED
726 struct net_device *dev;
727 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 728
b67bfe0d 729 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
730 if (!strncmp(dev->name, name, IFNAMSIZ))
731 return dev;
0bd8d536 732
1da177e4
LT
733 return NULL;
734}
d1b19dff 735EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 736
72c9528b
ED
737/**
738 * dev_get_by_name_rcu - find a device by its name
739 * @net: the applicable net namespace
740 * @name: name to find
741 *
742 * Find an interface by name.
743 * If the name is found a pointer to the device is returned.
744 * If the name is not found then %NULL is returned.
745 * The reference counters are not incremented so the caller must be
746 * careful with locks. The caller must hold RCU lock.
747 */
748
749struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
750{
72c9528b
ED
751 struct net_device *dev;
752 struct hlist_head *head = dev_name_hash(net, name);
753
b67bfe0d 754 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
755 if (!strncmp(dev->name, name, IFNAMSIZ))
756 return dev;
757
758 return NULL;
759}
760EXPORT_SYMBOL(dev_get_by_name_rcu);
761
1da177e4
LT
762/**
763 * dev_get_by_name - find a device by its name
c4ea43c5 764 * @net: the applicable net namespace
1da177e4
LT
765 * @name: name to find
766 *
767 * Find an interface by name. This can be called from any
768 * context and does its own locking. The returned handle has
769 * the usage count incremented and the caller must use dev_put() to
770 * release it when it is no longer needed. %NULL is returned if no
771 * matching device is found.
772 */
773
881d966b 774struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
775{
776 struct net_device *dev;
777
72c9528b
ED
778 rcu_read_lock();
779 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
780 if (dev)
781 dev_hold(dev);
72c9528b 782 rcu_read_unlock();
1da177e4
LT
783 return dev;
784}
d1b19dff 785EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
786
787/**
788 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 789 * @net: the applicable net namespace
1da177e4
LT
790 * @ifindex: index of device
791 *
792 * Search for an interface by index. Returns %NULL if the device
793 * is not found or a pointer to the device. The device has not
794 * had its reference counter increased so the caller must be careful
795 * about locking. The caller must hold either the RTNL semaphore
796 * or @dev_base_lock.
797 */
798
881d966b 799struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 800{
0bd8d536
ED
801 struct net_device *dev;
802 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 803
b67bfe0d 804 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
805 if (dev->ifindex == ifindex)
806 return dev;
0bd8d536 807
1da177e4
LT
808 return NULL;
809}
d1b19dff 810EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 811
fb699dfd
ED
812/**
813 * dev_get_by_index_rcu - find a device by its ifindex
814 * @net: the applicable net namespace
815 * @ifindex: index of device
816 *
817 * Search for an interface by index. Returns %NULL if the device
818 * is not found or a pointer to the device. The device has not
819 * had its reference counter increased so the caller must be careful
820 * about locking. The caller must hold RCU lock.
821 */
822
823struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
824{
fb699dfd
ED
825 struct net_device *dev;
826 struct hlist_head *head = dev_index_hash(net, ifindex);
827
b67bfe0d 828 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
829 if (dev->ifindex == ifindex)
830 return dev;
831
832 return NULL;
833}
834EXPORT_SYMBOL(dev_get_by_index_rcu);
835
1da177e4
LT
836
837/**
838 * dev_get_by_index - find a device by its ifindex
c4ea43c5 839 * @net: the applicable net namespace
1da177e4
LT
840 * @ifindex: index of device
841 *
842 * Search for an interface by index. Returns NULL if the device
843 * is not found or a pointer to the device. The device returned has
844 * had a reference added and the pointer is safe until the user calls
845 * dev_put to indicate they have finished with it.
846 */
847
881d966b 848struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
849{
850 struct net_device *dev;
851
fb699dfd
ED
852 rcu_read_lock();
853 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
854 if (dev)
855 dev_hold(dev);
fb699dfd 856 rcu_read_unlock();
1da177e4
LT
857 return dev;
858}
d1b19dff 859EXPORT_SYMBOL(dev_get_by_index);
1da177e4 860
5dbe7c17
NS
861/**
862 * netdev_get_name - get a netdevice name, knowing its ifindex.
863 * @net: network namespace
864 * @name: a pointer to the buffer where the name will be stored.
865 * @ifindex: the ifindex of the interface to get the name from.
866 *
867 * The use of raw_seqcount_begin() and cond_resched() before
868 * retrying is required as we want to give the writers a chance
869 * to complete when CONFIG_PREEMPT is not set.
870 */
871int netdev_get_name(struct net *net, char *name, int ifindex)
872{
873 struct net_device *dev;
874 unsigned int seq;
875
876retry:
877 seq = raw_seqcount_begin(&devnet_rename_seq);
878 rcu_read_lock();
879 dev = dev_get_by_index_rcu(net, ifindex);
880 if (!dev) {
881 rcu_read_unlock();
882 return -ENODEV;
883 }
884
885 strcpy(name, dev->name);
886 rcu_read_unlock();
887 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
888 cond_resched();
889 goto retry;
890 }
891
892 return 0;
893}
894
1da177e4 895/**
941666c2 896 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 897 * @net: the applicable net namespace
1da177e4
LT
898 * @type: media type of device
899 * @ha: hardware address
900 *
901 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
902 * is not found or a pointer to the device.
903 * The caller must hold RCU or RTNL.
941666c2 904 * The returned device has not had its ref count increased
1da177e4
LT
905 * and the caller must therefore be careful about locking
906 *
1da177e4
LT
907 */
908
941666c2
ED
909struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
910 const char *ha)
1da177e4
LT
911{
912 struct net_device *dev;
913
941666c2 914 for_each_netdev_rcu(net, dev)
1da177e4
LT
915 if (dev->type == type &&
916 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
917 return dev;
918
919 return NULL;
1da177e4 920}
941666c2 921EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 922
881d966b 923struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
924{
925 struct net_device *dev;
926
4e9cac2b 927 ASSERT_RTNL();
881d966b 928 for_each_netdev(net, dev)
4e9cac2b 929 if (dev->type == type)
7562f876
PE
930 return dev;
931
932 return NULL;
4e9cac2b 933}
4e9cac2b
PM
934EXPORT_SYMBOL(__dev_getfirstbyhwtype);
935
881d966b 936struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 937{
99fe3c39 938 struct net_device *dev, *ret = NULL;
4e9cac2b 939
99fe3c39
ED
940 rcu_read_lock();
941 for_each_netdev_rcu(net, dev)
942 if (dev->type == type) {
943 dev_hold(dev);
944 ret = dev;
945 break;
946 }
947 rcu_read_unlock();
948 return ret;
1da177e4 949}
1da177e4
LT
950EXPORT_SYMBOL(dev_getfirstbyhwtype);
951
952/**
6c555490 953 * __dev_get_by_flags - find any device with given flags
c4ea43c5 954 * @net: the applicable net namespace
1da177e4
LT
955 * @if_flags: IFF_* values
956 * @mask: bitmask of bits in if_flags to check
957 *
958 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 959 * is not found or a pointer to the device. Must be called inside
6c555490 960 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
961 */
962
6c555490
WC
963struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
964 unsigned short mask)
1da177e4 965{
7562f876 966 struct net_device *dev, *ret;
1da177e4 967
6c555490
WC
968 ASSERT_RTNL();
969
7562f876 970 ret = NULL;
6c555490 971 for_each_netdev(net, dev) {
1da177e4 972 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 973 ret = dev;
1da177e4
LT
974 break;
975 }
976 }
7562f876 977 return ret;
1da177e4 978}
6c555490 979EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
980
981/**
982 * dev_valid_name - check if name is okay for network device
983 * @name: name string
984 *
985 * Network device names need to be valid file names to
c7fa9d18
DM
986 * to allow sysfs to work. We also disallow any kind of
987 * whitespace.
1da177e4 988 */
95f050bf 989bool dev_valid_name(const char *name)
1da177e4 990{
c7fa9d18 991 if (*name == '\0')
95f050bf 992 return false;
b6fe17d6 993 if (strlen(name) >= IFNAMSIZ)
95f050bf 994 return false;
c7fa9d18 995 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 996 return false;
c7fa9d18
DM
997
998 while (*name) {
a4176a93 999 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 1000 return false;
c7fa9d18
DM
1001 name++;
1002 }
95f050bf 1003 return true;
1da177e4 1004}
d1b19dff 1005EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
1006
1007/**
b267b179
EB
1008 * __dev_alloc_name - allocate a name for a device
1009 * @net: network namespace to allocate the device name in
1da177e4 1010 * @name: name format string
b267b179 1011 * @buf: scratch buffer and result name string
1da177e4
LT
1012 *
1013 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
1014 * id. It scans list of devices to build up a free map, then chooses
1015 * the first empty slot. The caller must hold the dev_base or rtnl lock
1016 * while allocating the name and adding the device in order to avoid
1017 * duplicates.
1018 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1019 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
1020 */
1021
b267b179 1022static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
1023{
1024 int i = 0;
1da177e4
LT
1025 const char *p;
1026 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 1027 unsigned long *inuse;
1da177e4
LT
1028 struct net_device *d;
1029
1030 p = strnchr(name, IFNAMSIZ-1, '%');
1031 if (p) {
1032 /*
1033 * Verify the string as this thing may have come from
1034 * the user. There must be either one "%d" and no other "%"
1035 * characters.
1036 */
1037 if (p[1] != 'd' || strchr(p + 2, '%'))
1038 return -EINVAL;
1039
1040 /* Use one page as a bit array of possible slots */
cfcabdcc 1041 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1042 if (!inuse)
1043 return -ENOMEM;
1044
881d966b 1045 for_each_netdev(net, d) {
1da177e4
LT
1046 if (!sscanf(d->name, name, &i))
1047 continue;
1048 if (i < 0 || i >= max_netdevices)
1049 continue;
1050
1051 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1052 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1053 if (!strncmp(buf, d->name, IFNAMSIZ))
1054 set_bit(i, inuse);
1055 }
1056
1057 i = find_first_zero_bit(inuse, max_netdevices);
1058 free_page((unsigned long) inuse);
1059 }
1060
d9031024
OP
1061 if (buf != name)
1062 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1063 if (!__dev_get_by_name(net, buf))
1da177e4 1064 return i;
1da177e4
LT
1065
1066 /* It is possible to run out of possible slots
1067 * when the name is long and there isn't enough space left
1068 * for the digits, or if all bits are used.
1069 */
1070 return -ENFILE;
1071}
1072
b267b179
EB
1073/**
1074 * dev_alloc_name - allocate a name for a device
1075 * @dev: device
1076 * @name: name format string
1077 *
1078 * Passed a format string - eg "lt%d" it will try and find a suitable
1079 * id. It scans list of devices to build up a free map, then chooses
1080 * the first empty slot. The caller must hold the dev_base or rtnl lock
1081 * while allocating the name and adding the device in order to avoid
1082 * duplicates.
1083 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1084 * Returns the number of the unit assigned or a negative errno code.
1085 */
1086
1087int dev_alloc_name(struct net_device *dev, const char *name)
1088{
1089 char buf[IFNAMSIZ];
1090 struct net *net;
1091 int ret;
1092
c346dca1
YH
1093 BUG_ON(!dev_net(dev));
1094 net = dev_net(dev);
b267b179
EB
1095 ret = __dev_alloc_name(net, name, buf);
1096 if (ret >= 0)
1097 strlcpy(dev->name, buf, IFNAMSIZ);
1098 return ret;
1099}
d1b19dff 1100EXPORT_SYMBOL(dev_alloc_name);
b267b179 1101
828de4f6
G
1102static int dev_alloc_name_ns(struct net *net,
1103 struct net_device *dev,
1104 const char *name)
d9031024 1105{
828de4f6
G
1106 char buf[IFNAMSIZ];
1107 int ret;
8ce6cebc 1108
828de4f6
G
1109 ret = __dev_alloc_name(net, name, buf);
1110 if (ret >= 0)
1111 strlcpy(dev->name, buf, IFNAMSIZ);
1112 return ret;
1113}
1114
1115static int dev_get_valid_name(struct net *net,
1116 struct net_device *dev,
1117 const char *name)
1118{
1119 BUG_ON(!net);
8ce6cebc 1120
d9031024
OP
1121 if (!dev_valid_name(name))
1122 return -EINVAL;
1123
1c5cae81 1124 if (strchr(name, '%'))
828de4f6 1125 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1126 else if (__dev_get_by_name(net, name))
1127 return -EEXIST;
8ce6cebc
DL
1128 else if (dev->name != name)
1129 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1130
1131 return 0;
1132}
1da177e4
LT
1133
1134/**
1135 * dev_change_name - change name of a device
1136 * @dev: device
1137 * @newname: name (or format string) must be at least IFNAMSIZ
1138 *
1139 * Change name of a device, can pass format strings "eth%d".
1140 * for wildcarding.
1141 */
cf04a4c7 1142int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1143{
238fa362 1144 unsigned char old_assign_type;
fcc5a03a 1145 char oldname[IFNAMSIZ];
1da177e4 1146 int err = 0;
fcc5a03a 1147 int ret;
881d966b 1148 struct net *net;
1da177e4
LT
1149
1150 ASSERT_RTNL();
c346dca1 1151 BUG_ON(!dev_net(dev));
1da177e4 1152
c346dca1 1153 net = dev_net(dev);
1da177e4
LT
1154 if (dev->flags & IFF_UP)
1155 return -EBUSY;
1156
30e6c9fa 1157 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1158
1159 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1160 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1161 return 0;
c91f6df2 1162 }
c8d90dca 1163
fcc5a03a
HX
1164 memcpy(oldname, dev->name, IFNAMSIZ);
1165
828de4f6 1166 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1167 if (err < 0) {
30e6c9fa 1168 write_seqcount_end(&devnet_rename_seq);
d9031024 1169 return err;
c91f6df2 1170 }
1da177e4 1171
6fe82a39
VF
1172 if (oldname[0] && !strchr(oldname, '%'))
1173 netdev_info(dev, "renamed from %s\n", oldname);
1174
238fa362
TG
1175 old_assign_type = dev->name_assign_type;
1176 dev->name_assign_type = NET_NAME_RENAMED;
1177
fcc5a03a 1178rollback:
a1b3f594
EB
1179 ret = device_rename(&dev->dev, dev->name);
1180 if (ret) {
1181 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1182 dev->name_assign_type = old_assign_type;
30e6c9fa 1183 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1184 return ret;
dcc99773 1185 }
7f988eab 1186
30e6c9fa 1187 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1188
5bb025fa
VF
1189 netdev_adjacent_rename_links(dev, oldname);
1190
7f988eab 1191 write_lock_bh(&dev_base_lock);
372b2312 1192 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1193 write_unlock_bh(&dev_base_lock);
1194
1195 synchronize_rcu();
1196
1197 write_lock_bh(&dev_base_lock);
1198 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1199 write_unlock_bh(&dev_base_lock);
1200
056925ab 1201 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1202 ret = notifier_to_errno(ret);
1203
1204 if (ret) {
91e9c07b
ED
1205 /* err >= 0 after dev_alloc_name() or stores the first errno */
1206 if (err >= 0) {
fcc5a03a 1207 err = ret;
30e6c9fa 1208 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1209 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1210 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1211 dev->name_assign_type = old_assign_type;
1212 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1213 goto rollback;
91e9c07b 1214 } else {
7b6cd1ce 1215 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1216 dev->name, ret);
fcc5a03a
HX
1217 }
1218 }
1da177e4
LT
1219
1220 return err;
1221}
1222
0b815a1a
SH
1223/**
1224 * dev_set_alias - change ifalias of a device
1225 * @dev: device
1226 * @alias: name up to IFALIASZ
f0db275a 1227 * @len: limit of bytes to copy from info
0b815a1a
SH
1228 *
1229 * Set ifalias for a device,
1230 */
1231int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1232{
7364e445
AK
1233 char *new_ifalias;
1234
0b815a1a
SH
1235 ASSERT_RTNL();
1236
1237 if (len >= IFALIASZ)
1238 return -EINVAL;
1239
96ca4a2c 1240 if (!len) {
388dfc2d
SK
1241 kfree(dev->ifalias);
1242 dev->ifalias = NULL;
96ca4a2c
OH
1243 return 0;
1244 }
1245
7364e445
AK
1246 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1247 if (!new_ifalias)
0b815a1a 1248 return -ENOMEM;
7364e445 1249 dev->ifalias = new_ifalias;
0b815a1a
SH
1250
1251 strlcpy(dev->ifalias, alias, len+1);
1252 return len;
1253}
1254
1255
d8a33ac4 1256/**
3041a069 1257 * netdev_features_change - device changes features
d8a33ac4
SH
1258 * @dev: device to cause notification
1259 *
1260 * Called to indicate a device has changed features.
1261 */
1262void netdev_features_change(struct net_device *dev)
1263{
056925ab 1264 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1265}
1266EXPORT_SYMBOL(netdev_features_change);
1267
1da177e4
LT
1268/**
1269 * netdev_state_change - device changes state
1270 * @dev: device to cause notification
1271 *
1272 * Called to indicate a device has changed state. This function calls
1273 * the notifier chains for netdev_chain and sends a NEWLINK message
1274 * to the routing socket.
1275 */
1276void netdev_state_change(struct net_device *dev)
1277{
1278 if (dev->flags & IFF_UP) {
54951194
LP
1279 struct netdev_notifier_change_info change_info;
1280
1281 change_info.flags_changed = 0;
1282 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1283 &change_info.info);
7f294054 1284 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1285 }
1286}
d1b19dff 1287EXPORT_SYMBOL(netdev_state_change);
1da177e4 1288
ee89bab1
AW
1289/**
1290 * netdev_notify_peers - notify network peers about existence of @dev
1291 * @dev: network device
1292 *
1293 * Generate traffic such that interested network peers are aware of
1294 * @dev, such as by generating a gratuitous ARP. This may be used when
1295 * a device wants to inform the rest of the network about some sort of
1296 * reconfiguration such as a failover event or virtual machine
1297 * migration.
1298 */
1299void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1300{
ee89bab1
AW
1301 rtnl_lock();
1302 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1303 rtnl_unlock();
c1da4ac7 1304}
ee89bab1 1305EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1306
bd380811 1307static int __dev_open(struct net_device *dev)
1da177e4 1308{
d314774c 1309 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1310 int ret;
1da177e4 1311
e46b66bc
BH
1312 ASSERT_RTNL();
1313
1da177e4
LT
1314 if (!netif_device_present(dev))
1315 return -ENODEV;
1316
ca99ca14
NH
1317 /* Block netpoll from trying to do any rx path servicing.
1318 * If we don't do this there is a chance ndo_poll_controller
1319 * or ndo_poll may be running while we open the device
1320 */
66b5552f 1321 netpoll_poll_disable(dev);
ca99ca14 1322
3b8bcfd5
JB
1323 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1324 ret = notifier_to_errno(ret);
1325 if (ret)
1326 return ret;
1327
1da177e4 1328 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1329
d314774c
SH
1330 if (ops->ndo_validate_addr)
1331 ret = ops->ndo_validate_addr(dev);
bada339b 1332
d314774c
SH
1333 if (!ret && ops->ndo_open)
1334 ret = ops->ndo_open(dev);
1da177e4 1335
66b5552f 1336 netpoll_poll_enable(dev);
ca99ca14 1337
bada339b
JG
1338 if (ret)
1339 clear_bit(__LINK_STATE_START, &dev->state);
1340 else {
1da177e4 1341 dev->flags |= IFF_UP;
4417da66 1342 dev_set_rx_mode(dev);
1da177e4 1343 dev_activate(dev);
7bf23575 1344 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1345 }
bada339b 1346
1da177e4
LT
1347 return ret;
1348}
1349
1350/**
bd380811
PM
1351 * dev_open - prepare an interface for use.
1352 * @dev: device to open
1da177e4 1353 *
bd380811
PM
1354 * Takes a device from down to up state. The device's private open
1355 * function is invoked and then the multicast lists are loaded. Finally
1356 * the device is moved into the up state and a %NETDEV_UP message is
1357 * sent to the netdev notifier chain.
1358 *
1359 * Calling this function on an active interface is a nop. On a failure
1360 * a negative errno code is returned.
1da177e4 1361 */
bd380811
PM
1362int dev_open(struct net_device *dev)
1363{
1364 int ret;
1365
bd380811
PM
1366 if (dev->flags & IFF_UP)
1367 return 0;
1368
bd380811
PM
1369 ret = __dev_open(dev);
1370 if (ret < 0)
1371 return ret;
1372
7f294054 1373 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1374 call_netdevice_notifiers(NETDEV_UP, dev);
1375
1376 return ret;
1377}
1378EXPORT_SYMBOL(dev_open);
1379
44345724 1380static int __dev_close_many(struct list_head *head)
1da177e4 1381{
44345724 1382 struct net_device *dev;
e46b66bc 1383
bd380811 1384 ASSERT_RTNL();
9d5010db
DM
1385 might_sleep();
1386
5cde2829 1387 list_for_each_entry(dev, head, close_list) {
3f4df206 1388 /* Temporarily disable netpoll until the interface is down */
66b5552f 1389 netpoll_poll_disable(dev);
3f4df206 1390
44345724 1391 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1392
44345724 1393 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1394
44345724
OP
1395 /* Synchronize to scheduled poll. We cannot touch poll list, it
1396 * can be even on different cpu. So just clear netif_running().
1397 *
1398 * dev->stop() will invoke napi_disable() on all of it's
1399 * napi_struct instances on this device.
1400 */
4e857c58 1401 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1402 }
1da177e4 1403
44345724 1404 dev_deactivate_many(head);
d8b2a4d2 1405
5cde2829 1406 list_for_each_entry(dev, head, close_list) {
44345724 1407 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1408
44345724
OP
1409 /*
1410 * Call the device specific close. This cannot fail.
1411 * Only if device is UP
1412 *
1413 * We allow it to be called even after a DETACH hot-plug
1414 * event.
1415 */
1416 if (ops->ndo_stop)
1417 ops->ndo_stop(dev);
1418
44345724 1419 dev->flags &= ~IFF_UP;
66b5552f 1420 netpoll_poll_enable(dev);
44345724
OP
1421 }
1422
1423 return 0;
1424}
1425
1426static int __dev_close(struct net_device *dev)
1427{
f87e6f47 1428 int retval;
44345724
OP
1429 LIST_HEAD(single);
1430
5cde2829 1431 list_add(&dev->close_list, &single);
f87e6f47
LT
1432 retval = __dev_close_many(&single);
1433 list_del(&single);
ca99ca14 1434
f87e6f47 1435 return retval;
44345724
OP
1436}
1437
99c4a26a 1438int dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1439{
1440 struct net_device *dev, *tmp;
1da177e4 1441
5cde2829
EB
1442 /* Remove the devices that don't need to be closed */
1443 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1444 if (!(dev->flags & IFF_UP))
5cde2829 1445 list_del_init(&dev->close_list);
44345724
OP
1446
1447 __dev_close_many(head);
1da177e4 1448
5cde2829 1449 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1450 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1451 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1452 if (unlink)
1453 list_del_init(&dev->close_list);
44345724 1454 }
bd380811
PM
1455
1456 return 0;
1457}
99c4a26a 1458EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1459
1460/**
1461 * dev_close - shutdown an interface.
1462 * @dev: device to shutdown
1463 *
1464 * This function moves an active device into down state. A
1465 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1466 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1467 * chain.
1468 */
1469int dev_close(struct net_device *dev)
1470{
e14a5993
ED
1471 if (dev->flags & IFF_UP) {
1472 LIST_HEAD(single);
1da177e4 1473
5cde2829 1474 list_add(&dev->close_list, &single);
99c4a26a 1475 dev_close_many(&single, true);
e14a5993
ED
1476 list_del(&single);
1477 }
da6e378b 1478 return 0;
1da177e4 1479}
d1b19dff 1480EXPORT_SYMBOL(dev_close);
1da177e4
LT
1481
1482
0187bdfb
BH
1483/**
1484 * dev_disable_lro - disable Large Receive Offload on a device
1485 * @dev: device
1486 *
1487 * Disable Large Receive Offload (LRO) on a net device. Must be
1488 * called under RTNL. This is needed if received packets may be
1489 * forwarded to another interface.
1490 */
1491void dev_disable_lro(struct net_device *dev)
1492{
fbe168ba
MK
1493 struct net_device *lower_dev;
1494 struct list_head *iter;
529d0489 1495
bc5787c6
MM
1496 dev->wanted_features &= ~NETIF_F_LRO;
1497 netdev_update_features(dev);
27660515 1498
22d5969f
MM
1499 if (unlikely(dev->features & NETIF_F_LRO))
1500 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1501
1502 netdev_for_each_lower_dev(dev, lower_dev, iter)
1503 dev_disable_lro(lower_dev);
0187bdfb
BH
1504}
1505EXPORT_SYMBOL(dev_disable_lro);
1506
351638e7
JP
1507static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1508 struct net_device *dev)
1509{
1510 struct netdev_notifier_info info;
1511
1512 netdev_notifier_info_init(&info, dev);
1513 return nb->notifier_call(nb, val, &info);
1514}
0187bdfb 1515
881d966b
EB
1516static int dev_boot_phase = 1;
1517
1da177e4
LT
1518/**
1519 * register_netdevice_notifier - register a network notifier block
1520 * @nb: notifier
1521 *
1522 * Register a notifier to be called when network device events occur.
1523 * The notifier passed is linked into the kernel structures and must
1524 * not be reused until it has been unregistered. A negative errno code
1525 * is returned on a failure.
1526 *
1527 * When registered all registration and up events are replayed
4ec93edb 1528 * to the new notifier to allow device to have a race free
1da177e4
LT
1529 * view of the network device list.
1530 */
1531
1532int register_netdevice_notifier(struct notifier_block *nb)
1533{
1534 struct net_device *dev;
fcc5a03a 1535 struct net_device *last;
881d966b 1536 struct net *net;
1da177e4
LT
1537 int err;
1538
1539 rtnl_lock();
f07d5b94 1540 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1541 if (err)
1542 goto unlock;
881d966b
EB
1543 if (dev_boot_phase)
1544 goto unlock;
1545 for_each_net(net) {
1546 for_each_netdev(net, dev) {
351638e7 1547 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1548 err = notifier_to_errno(err);
1549 if (err)
1550 goto rollback;
1551
1552 if (!(dev->flags & IFF_UP))
1553 continue;
1da177e4 1554
351638e7 1555 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1556 }
1da177e4 1557 }
fcc5a03a
HX
1558
1559unlock:
1da177e4
LT
1560 rtnl_unlock();
1561 return err;
fcc5a03a
HX
1562
1563rollback:
1564 last = dev;
881d966b
EB
1565 for_each_net(net) {
1566 for_each_netdev(net, dev) {
1567 if (dev == last)
8f891489 1568 goto outroll;
fcc5a03a 1569
881d966b 1570 if (dev->flags & IFF_UP) {
351638e7
JP
1571 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1572 dev);
1573 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1574 }
351638e7 1575 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1576 }
fcc5a03a 1577 }
c67625a1 1578
8f891489 1579outroll:
c67625a1 1580 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1581 goto unlock;
1da177e4 1582}
d1b19dff 1583EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1584
1585/**
1586 * unregister_netdevice_notifier - unregister a network notifier block
1587 * @nb: notifier
1588 *
1589 * Unregister a notifier previously registered by
1590 * register_netdevice_notifier(). The notifier is unlinked into the
1591 * kernel structures and may then be reused. A negative errno code
1592 * is returned on a failure.
7d3d43da
EB
1593 *
1594 * After unregistering unregister and down device events are synthesized
1595 * for all devices on the device list to the removed notifier to remove
1596 * the need for special case cleanup code.
1da177e4
LT
1597 */
1598
1599int unregister_netdevice_notifier(struct notifier_block *nb)
1600{
7d3d43da
EB
1601 struct net_device *dev;
1602 struct net *net;
9f514950
HX
1603 int err;
1604
1605 rtnl_lock();
f07d5b94 1606 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1607 if (err)
1608 goto unlock;
1609
1610 for_each_net(net) {
1611 for_each_netdev(net, dev) {
1612 if (dev->flags & IFF_UP) {
351638e7
JP
1613 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1614 dev);
1615 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1616 }
351638e7 1617 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1618 }
1619 }
1620unlock:
9f514950
HX
1621 rtnl_unlock();
1622 return err;
1da177e4 1623}
d1b19dff 1624EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1625
351638e7
JP
1626/**
1627 * call_netdevice_notifiers_info - call all network notifier blocks
1628 * @val: value passed unmodified to notifier function
1629 * @dev: net_device pointer passed unmodified to notifier function
1630 * @info: notifier information data
1631 *
1632 * Call all network notifier blocks. Parameters and return value
1633 * are as for raw_notifier_call_chain().
1634 */
1635
1d143d9f 1636static int call_netdevice_notifiers_info(unsigned long val,
1637 struct net_device *dev,
1638 struct netdev_notifier_info *info)
351638e7
JP
1639{
1640 ASSERT_RTNL();
1641 netdev_notifier_info_init(info, dev);
1642 return raw_notifier_call_chain(&netdev_chain, val, info);
1643}
351638e7 1644
1da177e4
LT
1645/**
1646 * call_netdevice_notifiers - call all network notifier blocks
1647 * @val: value passed unmodified to notifier function
c4ea43c5 1648 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1649 *
1650 * Call all network notifier blocks. Parameters and return value
f07d5b94 1651 * are as for raw_notifier_call_chain().
1da177e4
LT
1652 */
1653
ad7379d4 1654int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1655{
351638e7
JP
1656 struct netdev_notifier_info info;
1657
1658 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1659}
edf947f1 1660EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1661
1cf51900 1662#ifdef CONFIG_NET_INGRESS
4577139b
DB
1663static struct static_key ingress_needed __read_mostly;
1664
1665void net_inc_ingress_queue(void)
1666{
1667 static_key_slow_inc(&ingress_needed);
1668}
1669EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1670
1671void net_dec_ingress_queue(void)
1672{
1673 static_key_slow_dec(&ingress_needed);
1674}
1675EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1676#endif
1677
c5905afb 1678static struct static_key netstamp_needed __read_mostly;
b90e5794 1679#ifdef HAVE_JUMP_LABEL
c5905afb 1680/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1681 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1682 * static_key_slow_dec() calls.
b90e5794
ED
1683 */
1684static atomic_t netstamp_needed_deferred;
1685#endif
1da177e4
LT
1686
1687void net_enable_timestamp(void)
1688{
b90e5794
ED
1689#ifdef HAVE_JUMP_LABEL
1690 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1691
1692 if (deferred) {
1693 while (--deferred)
c5905afb 1694 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1695 return;
1696 }
1697#endif
c5905afb 1698 static_key_slow_inc(&netstamp_needed);
1da177e4 1699}
d1b19dff 1700EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1701
1702void net_disable_timestamp(void)
1703{
b90e5794
ED
1704#ifdef HAVE_JUMP_LABEL
1705 if (in_interrupt()) {
1706 atomic_inc(&netstamp_needed_deferred);
1707 return;
1708 }
1709#endif
c5905afb 1710 static_key_slow_dec(&netstamp_needed);
1da177e4 1711}
d1b19dff 1712EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1713
3b098e2d 1714static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1715{
588f0330 1716 skb->tstamp.tv64 = 0;
c5905afb 1717 if (static_key_false(&netstamp_needed))
a61bbcf2 1718 __net_timestamp(skb);
1da177e4
LT
1719}
1720
588f0330 1721#define net_timestamp_check(COND, SKB) \
c5905afb 1722 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1723 if ((COND) && !(SKB)->tstamp.tv64) \
1724 __net_timestamp(SKB); \
1725 } \
3b098e2d 1726
1ee481fb 1727bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
79b569f0
DL
1728{
1729 unsigned int len;
1730
1731 if (!(dev->flags & IFF_UP))
1732 return false;
1733
1734 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1735 if (skb->len <= len)
1736 return true;
1737
1738 /* if TSO is enabled, we don't care about the length as the packet
1739 * could be forwarded without being segmented before
1740 */
1741 if (skb_is_gso(skb))
1742 return true;
1743
1744 return false;
1745}
1ee481fb 1746EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1747
a0265d28
HX
1748int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1749{
bbbf2df0
WB
1750 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1751 unlikely(!is_skb_forwardable(dev, skb))) {
a0265d28
HX
1752 atomic_long_inc(&dev->rx_dropped);
1753 kfree_skb(skb);
1754 return NET_RX_DROP;
1755 }
1756
1757 skb_scrub_packet(skb, true);
08b4b8ea 1758 skb->priority = 0;
a0265d28 1759 skb->protocol = eth_type_trans(skb, dev);
2c26d34b 1760 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
a0265d28
HX
1761
1762 return 0;
1763}
1764EXPORT_SYMBOL_GPL(__dev_forward_skb);
1765
44540960
AB
1766/**
1767 * dev_forward_skb - loopback an skb to another netif
1768 *
1769 * @dev: destination network device
1770 * @skb: buffer to forward
1771 *
1772 * return values:
1773 * NET_RX_SUCCESS (no congestion)
6ec82562 1774 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1775 *
1776 * dev_forward_skb can be used for injecting an skb from the
1777 * start_xmit function of one device into the receive queue
1778 * of another device.
1779 *
1780 * The receiving device may be in another namespace, so
1781 * we have to clear all information in the skb that could
1782 * impact namespace isolation.
1783 */
1784int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1785{
a0265d28 1786 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1787}
1788EXPORT_SYMBOL_GPL(dev_forward_skb);
1789
71d9dec2
CG
1790static inline int deliver_skb(struct sk_buff *skb,
1791 struct packet_type *pt_prev,
1792 struct net_device *orig_dev)
1793{
1080e512
MT
1794 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1795 return -ENOMEM;
71d9dec2
CG
1796 atomic_inc(&skb->users);
1797 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1798}
1799
7866a621
SN
1800static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1801 struct packet_type **pt,
fbcb2170
JP
1802 struct net_device *orig_dev,
1803 __be16 type,
7866a621
SN
1804 struct list_head *ptype_list)
1805{
1806 struct packet_type *ptype, *pt_prev = *pt;
1807
1808 list_for_each_entry_rcu(ptype, ptype_list, list) {
1809 if (ptype->type != type)
1810 continue;
1811 if (pt_prev)
fbcb2170 1812 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
1813 pt_prev = ptype;
1814 }
1815 *pt = pt_prev;
1816}
1817
c0de08d0
EL
1818static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1819{
a3d744e9 1820 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1821 return false;
1822
1823 if (ptype->id_match)
1824 return ptype->id_match(ptype, skb->sk);
1825 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1826 return true;
1827
1828 return false;
1829}
1830
1da177e4
LT
1831/*
1832 * Support routine. Sends outgoing frames to any network
1833 * taps currently in use.
1834 */
1835
f6a78bfc 1836static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1837{
1838 struct packet_type *ptype;
71d9dec2
CG
1839 struct sk_buff *skb2 = NULL;
1840 struct packet_type *pt_prev = NULL;
7866a621 1841 struct list_head *ptype_list = &ptype_all;
a61bbcf2 1842
1da177e4 1843 rcu_read_lock();
7866a621
SN
1844again:
1845 list_for_each_entry_rcu(ptype, ptype_list, list) {
1da177e4
LT
1846 /* Never send packets back to the socket
1847 * they originated from - MvS (miquels@drinkel.ow.org)
1848 */
7866a621
SN
1849 if (skb_loop_sk(ptype, skb))
1850 continue;
71d9dec2 1851
7866a621
SN
1852 if (pt_prev) {
1853 deliver_skb(skb2, pt_prev, skb->dev);
1854 pt_prev = ptype;
1855 continue;
1856 }
1da177e4 1857
7866a621
SN
1858 /* need to clone skb, done only once */
1859 skb2 = skb_clone(skb, GFP_ATOMIC);
1860 if (!skb2)
1861 goto out_unlock;
70978182 1862
7866a621 1863 net_timestamp_set(skb2);
1da177e4 1864
7866a621
SN
1865 /* skb->nh should be correctly
1866 * set by sender, so that the second statement is
1867 * just protection against buggy protocols.
1868 */
1869 skb_reset_mac_header(skb2);
1870
1871 if (skb_network_header(skb2) < skb2->data ||
1872 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1873 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1874 ntohs(skb2->protocol),
1875 dev->name);
1876 skb_reset_network_header(skb2);
1da177e4 1877 }
7866a621
SN
1878
1879 skb2->transport_header = skb2->network_header;
1880 skb2->pkt_type = PACKET_OUTGOING;
1881 pt_prev = ptype;
1882 }
1883
1884 if (ptype_list == &ptype_all) {
1885 ptype_list = &dev->ptype_all;
1886 goto again;
1da177e4 1887 }
7866a621 1888out_unlock:
71d9dec2
CG
1889 if (pt_prev)
1890 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1891 rcu_read_unlock();
1892}
1893
2c53040f
BH
1894/**
1895 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1896 * @dev: Network device
1897 * @txq: number of queues available
1898 *
1899 * If real_num_tx_queues is changed the tc mappings may no longer be
1900 * valid. To resolve this verify the tc mapping remains valid and if
1901 * not NULL the mapping. With no priorities mapping to this
1902 * offset/count pair it will no longer be used. In the worst case TC0
1903 * is invalid nothing can be done so disable priority mappings. If is
1904 * expected that drivers will fix this mapping if they can before
1905 * calling netif_set_real_num_tx_queues.
1906 */
bb134d22 1907static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1908{
1909 int i;
1910 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1911
1912 /* If TC0 is invalidated disable TC mapping */
1913 if (tc->offset + tc->count > txq) {
7b6cd1ce 1914 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1915 dev->num_tc = 0;
1916 return;
1917 }
1918
1919 /* Invalidated prio to tc mappings set to TC0 */
1920 for (i = 1; i < TC_BITMASK + 1; i++) {
1921 int q = netdev_get_prio_tc_map(dev, i);
1922
1923 tc = &dev->tc_to_txq[q];
1924 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1925 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1926 i, q);
4f57c087
JF
1927 netdev_set_prio_tc_map(dev, i, 0);
1928 }
1929 }
1930}
1931
537c00de
AD
1932#ifdef CONFIG_XPS
1933static DEFINE_MUTEX(xps_map_mutex);
1934#define xmap_dereference(P) \
1935 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1936
10cdc3f3
AD
1937static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1938 int cpu, u16 index)
537c00de 1939{
10cdc3f3
AD
1940 struct xps_map *map = NULL;
1941 int pos;
537c00de 1942
10cdc3f3
AD
1943 if (dev_maps)
1944 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1945
10cdc3f3
AD
1946 for (pos = 0; map && pos < map->len; pos++) {
1947 if (map->queues[pos] == index) {
537c00de
AD
1948 if (map->len > 1) {
1949 map->queues[pos] = map->queues[--map->len];
1950 } else {
10cdc3f3 1951 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1952 kfree_rcu(map, rcu);
1953 map = NULL;
1954 }
10cdc3f3 1955 break;
537c00de 1956 }
537c00de
AD
1957 }
1958
10cdc3f3
AD
1959 return map;
1960}
1961
024e9679 1962static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1963{
1964 struct xps_dev_maps *dev_maps;
024e9679 1965 int cpu, i;
10cdc3f3
AD
1966 bool active = false;
1967
1968 mutex_lock(&xps_map_mutex);
1969 dev_maps = xmap_dereference(dev->xps_maps);
1970
1971 if (!dev_maps)
1972 goto out_no_maps;
1973
1974 for_each_possible_cpu(cpu) {
024e9679
AD
1975 for (i = index; i < dev->num_tx_queues; i++) {
1976 if (!remove_xps_queue(dev_maps, cpu, i))
1977 break;
1978 }
1979 if (i == dev->num_tx_queues)
10cdc3f3
AD
1980 active = true;
1981 }
1982
1983 if (!active) {
537c00de
AD
1984 RCU_INIT_POINTER(dev->xps_maps, NULL);
1985 kfree_rcu(dev_maps, rcu);
1986 }
1987
024e9679
AD
1988 for (i = index; i < dev->num_tx_queues; i++)
1989 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1990 NUMA_NO_NODE);
1991
537c00de
AD
1992out_no_maps:
1993 mutex_unlock(&xps_map_mutex);
1994}
1995
01c5f864
AD
1996static struct xps_map *expand_xps_map(struct xps_map *map,
1997 int cpu, u16 index)
1998{
1999 struct xps_map *new_map;
2000 int alloc_len = XPS_MIN_MAP_ALLOC;
2001 int i, pos;
2002
2003 for (pos = 0; map && pos < map->len; pos++) {
2004 if (map->queues[pos] != index)
2005 continue;
2006 return map;
2007 }
2008
2009 /* Need to add queue to this CPU's existing map */
2010 if (map) {
2011 if (pos < map->alloc_len)
2012 return map;
2013
2014 alloc_len = map->alloc_len * 2;
2015 }
2016
2017 /* Need to allocate new map to store queue on this CPU's map */
2018 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2019 cpu_to_node(cpu));
2020 if (!new_map)
2021 return NULL;
2022
2023 for (i = 0; i < pos; i++)
2024 new_map->queues[i] = map->queues[i];
2025 new_map->alloc_len = alloc_len;
2026 new_map->len = pos;
2027
2028 return new_map;
2029}
2030
3573540c
MT
2031int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2032 u16 index)
537c00de 2033{
01c5f864 2034 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 2035 struct xps_map *map, *new_map;
537c00de 2036 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
2037 int cpu, numa_node_id = -2;
2038 bool active = false;
537c00de
AD
2039
2040 mutex_lock(&xps_map_mutex);
2041
2042 dev_maps = xmap_dereference(dev->xps_maps);
2043
01c5f864
AD
2044 /* allocate memory for queue storage */
2045 for_each_online_cpu(cpu) {
2046 if (!cpumask_test_cpu(cpu, mask))
2047 continue;
2048
2049 if (!new_dev_maps)
2050 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2051 if (!new_dev_maps) {
2052 mutex_unlock(&xps_map_mutex);
01c5f864 2053 return -ENOMEM;
2bb60cb9 2054 }
01c5f864
AD
2055
2056 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2057 NULL;
2058
2059 map = expand_xps_map(map, cpu, index);
2060 if (!map)
2061 goto error;
2062
2063 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2064 }
2065
2066 if (!new_dev_maps)
2067 goto out_no_new_maps;
2068
537c00de 2069 for_each_possible_cpu(cpu) {
01c5f864
AD
2070 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2071 /* add queue to CPU maps */
2072 int pos = 0;
2073
2074 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2075 while ((pos < map->len) && (map->queues[pos] != index))
2076 pos++;
2077
2078 if (pos == map->len)
2079 map->queues[map->len++] = index;
537c00de 2080#ifdef CONFIG_NUMA
537c00de
AD
2081 if (numa_node_id == -2)
2082 numa_node_id = cpu_to_node(cpu);
2083 else if (numa_node_id != cpu_to_node(cpu))
2084 numa_node_id = -1;
537c00de 2085#endif
01c5f864
AD
2086 } else if (dev_maps) {
2087 /* fill in the new device map from the old device map */
2088 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2089 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 2090 }
01c5f864 2091
537c00de
AD
2092 }
2093
01c5f864
AD
2094 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2095
537c00de 2096 /* Cleanup old maps */
01c5f864
AD
2097 if (dev_maps) {
2098 for_each_possible_cpu(cpu) {
2099 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2100 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2101 if (map && map != new_map)
2102 kfree_rcu(map, rcu);
2103 }
537c00de 2104
01c5f864 2105 kfree_rcu(dev_maps, rcu);
537c00de
AD
2106 }
2107
01c5f864
AD
2108 dev_maps = new_dev_maps;
2109 active = true;
537c00de 2110
01c5f864
AD
2111out_no_new_maps:
2112 /* update Tx queue numa node */
537c00de
AD
2113 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2114 (numa_node_id >= 0) ? numa_node_id :
2115 NUMA_NO_NODE);
2116
01c5f864
AD
2117 if (!dev_maps)
2118 goto out_no_maps;
2119
2120 /* removes queue from unused CPUs */
2121 for_each_possible_cpu(cpu) {
2122 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2123 continue;
2124
2125 if (remove_xps_queue(dev_maps, cpu, index))
2126 active = true;
2127 }
2128
2129 /* free map if not active */
2130 if (!active) {
2131 RCU_INIT_POINTER(dev->xps_maps, NULL);
2132 kfree_rcu(dev_maps, rcu);
2133 }
2134
2135out_no_maps:
537c00de
AD
2136 mutex_unlock(&xps_map_mutex);
2137
2138 return 0;
2139error:
01c5f864
AD
2140 /* remove any maps that we added */
2141 for_each_possible_cpu(cpu) {
2142 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2143 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2144 NULL;
2145 if (new_map && new_map != map)
2146 kfree(new_map);
2147 }
2148
537c00de
AD
2149 mutex_unlock(&xps_map_mutex);
2150
537c00de
AD
2151 kfree(new_dev_maps);
2152 return -ENOMEM;
2153}
2154EXPORT_SYMBOL(netif_set_xps_queue);
2155
2156#endif
f0796d5c
JF
2157/*
2158 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2159 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2160 */
e6484930 2161int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2162{
1d24eb48
TH
2163 int rc;
2164
e6484930
TH
2165 if (txq < 1 || txq > dev->num_tx_queues)
2166 return -EINVAL;
f0796d5c 2167
5c56580b
BH
2168 if (dev->reg_state == NETREG_REGISTERED ||
2169 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2170 ASSERT_RTNL();
2171
1d24eb48
TH
2172 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2173 txq);
bf264145
TH
2174 if (rc)
2175 return rc;
2176
4f57c087
JF
2177 if (dev->num_tc)
2178 netif_setup_tc(dev, txq);
2179
024e9679 2180 if (txq < dev->real_num_tx_queues) {
e6484930 2181 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2182#ifdef CONFIG_XPS
2183 netif_reset_xps_queues_gt(dev, txq);
2184#endif
2185 }
f0796d5c 2186 }
e6484930
TH
2187
2188 dev->real_num_tx_queues = txq;
2189 return 0;
f0796d5c
JF
2190}
2191EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2192
a953be53 2193#ifdef CONFIG_SYSFS
62fe0b40
BH
2194/**
2195 * netif_set_real_num_rx_queues - set actual number of RX queues used
2196 * @dev: Network device
2197 * @rxq: Actual number of RX queues
2198 *
2199 * This must be called either with the rtnl_lock held or before
2200 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2201 * negative error code. If called before registration, it always
2202 * succeeds.
62fe0b40
BH
2203 */
2204int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2205{
2206 int rc;
2207
bd25fa7b
TH
2208 if (rxq < 1 || rxq > dev->num_rx_queues)
2209 return -EINVAL;
2210
62fe0b40
BH
2211 if (dev->reg_state == NETREG_REGISTERED) {
2212 ASSERT_RTNL();
2213
62fe0b40
BH
2214 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2215 rxq);
2216 if (rc)
2217 return rc;
62fe0b40
BH
2218 }
2219
2220 dev->real_num_rx_queues = rxq;
2221 return 0;
2222}
2223EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2224#endif
2225
2c53040f
BH
2226/**
2227 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2228 *
2229 * This routine should set an upper limit on the number of RSS queues
2230 * used by default by multiqueue devices.
2231 */
a55b138b 2232int netif_get_num_default_rss_queues(void)
16917b87
YM
2233{
2234 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2235}
2236EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2237
def82a1d 2238static inline void __netif_reschedule(struct Qdisc *q)
56079431 2239{
def82a1d
JP
2240 struct softnet_data *sd;
2241 unsigned long flags;
56079431 2242
def82a1d 2243 local_irq_save(flags);
903ceff7 2244 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2245 q->next_sched = NULL;
2246 *sd->output_queue_tailp = q;
2247 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2248 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2249 local_irq_restore(flags);
2250}
2251
2252void __netif_schedule(struct Qdisc *q)
2253{
2254 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2255 __netif_reschedule(q);
56079431
DV
2256}
2257EXPORT_SYMBOL(__netif_schedule);
2258
e6247027
ED
2259struct dev_kfree_skb_cb {
2260 enum skb_free_reason reason;
2261};
2262
2263static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2264{
e6247027
ED
2265 return (struct dev_kfree_skb_cb *)skb->cb;
2266}
2267
46e5da40
JF
2268void netif_schedule_queue(struct netdev_queue *txq)
2269{
2270 rcu_read_lock();
2271 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2272 struct Qdisc *q = rcu_dereference(txq->qdisc);
2273
2274 __netif_schedule(q);
2275 }
2276 rcu_read_unlock();
2277}
2278EXPORT_SYMBOL(netif_schedule_queue);
2279
2280/**
2281 * netif_wake_subqueue - allow sending packets on subqueue
2282 * @dev: network device
2283 * @queue_index: sub queue index
2284 *
2285 * Resume individual transmit queue of a device with multiple transmit queues.
2286 */
2287void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2288{
2289 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2290
2291 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2292 struct Qdisc *q;
2293
2294 rcu_read_lock();
2295 q = rcu_dereference(txq->qdisc);
2296 __netif_schedule(q);
2297 rcu_read_unlock();
2298 }
2299}
2300EXPORT_SYMBOL(netif_wake_subqueue);
2301
2302void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2303{
2304 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2305 struct Qdisc *q;
2306
2307 rcu_read_lock();
2308 q = rcu_dereference(dev_queue->qdisc);
2309 __netif_schedule(q);
2310 rcu_read_unlock();
2311 }
2312}
2313EXPORT_SYMBOL(netif_tx_wake_queue);
2314
e6247027 2315void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2316{
e6247027 2317 unsigned long flags;
56079431 2318
e6247027
ED
2319 if (likely(atomic_read(&skb->users) == 1)) {
2320 smp_rmb();
2321 atomic_set(&skb->users, 0);
2322 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2323 return;
bea3348e 2324 }
e6247027
ED
2325 get_kfree_skb_cb(skb)->reason = reason;
2326 local_irq_save(flags);
2327 skb->next = __this_cpu_read(softnet_data.completion_queue);
2328 __this_cpu_write(softnet_data.completion_queue, skb);
2329 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2330 local_irq_restore(flags);
56079431 2331}
e6247027 2332EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2333
e6247027 2334void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2335{
2336 if (in_irq() || irqs_disabled())
e6247027 2337 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2338 else
2339 dev_kfree_skb(skb);
2340}
e6247027 2341EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2342
2343
bea3348e
SH
2344/**
2345 * netif_device_detach - mark device as removed
2346 * @dev: network device
2347 *
2348 * Mark device as removed from system and therefore no longer available.
2349 */
56079431
DV
2350void netif_device_detach(struct net_device *dev)
2351{
2352 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2353 netif_running(dev)) {
d543103a 2354 netif_tx_stop_all_queues(dev);
56079431
DV
2355 }
2356}
2357EXPORT_SYMBOL(netif_device_detach);
2358
bea3348e
SH
2359/**
2360 * netif_device_attach - mark device as attached
2361 * @dev: network device
2362 *
2363 * Mark device as attached from system and restart if needed.
2364 */
56079431
DV
2365void netif_device_attach(struct net_device *dev)
2366{
2367 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2368 netif_running(dev)) {
d543103a 2369 netif_tx_wake_all_queues(dev);
4ec93edb 2370 __netdev_watchdog_up(dev);
56079431
DV
2371 }
2372}
2373EXPORT_SYMBOL(netif_device_attach);
2374
5605c762
JP
2375/*
2376 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2377 * to be used as a distribution range.
2378 */
2379u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2380 unsigned int num_tx_queues)
2381{
2382 u32 hash;
2383 u16 qoffset = 0;
2384 u16 qcount = num_tx_queues;
2385
2386 if (skb_rx_queue_recorded(skb)) {
2387 hash = skb_get_rx_queue(skb);
2388 while (unlikely(hash >= num_tx_queues))
2389 hash -= num_tx_queues;
2390 return hash;
2391 }
2392
2393 if (dev->num_tc) {
2394 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2395 qoffset = dev->tc_to_txq[tc].offset;
2396 qcount = dev->tc_to_txq[tc].count;
2397 }
2398
2399 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2400}
2401EXPORT_SYMBOL(__skb_tx_hash);
2402
36c92474
BH
2403static void skb_warn_bad_offload(const struct sk_buff *skb)
2404{
65e9d2fa 2405 static const netdev_features_t null_features = 0;
36c92474 2406 struct net_device *dev = skb->dev;
88ad4175 2407 const char *name = "";
36c92474 2408
c846ad9b
BG
2409 if (!net_ratelimit())
2410 return;
2411
88ad4175
BM
2412 if (dev) {
2413 if (dev->dev.parent)
2414 name = dev_driver_string(dev->dev.parent);
2415 else
2416 name = netdev_name(dev);
2417 }
36c92474
BH
2418 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2419 "gso_type=%d ip_summed=%d\n",
88ad4175 2420 name, dev ? &dev->features : &null_features,
65e9d2fa 2421 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2422 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2423 skb_shinfo(skb)->gso_type, skb->ip_summed);
2424}
2425
1da177e4
LT
2426/*
2427 * Invalidate hardware checksum when packet is to be mangled, and
2428 * complete checksum manually on outgoing path.
2429 */
84fa7933 2430int skb_checksum_help(struct sk_buff *skb)
1da177e4 2431{
d3bc23e7 2432 __wsum csum;
663ead3b 2433 int ret = 0, offset;
1da177e4 2434
84fa7933 2435 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2436 goto out_set_summed;
2437
2438 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2439 skb_warn_bad_offload(skb);
2440 return -EINVAL;
1da177e4
LT
2441 }
2442
cef401de
ED
2443 /* Before computing a checksum, we should make sure no frag could
2444 * be modified by an external entity : checksum could be wrong.
2445 */
2446 if (skb_has_shared_frag(skb)) {
2447 ret = __skb_linearize(skb);
2448 if (ret)
2449 goto out;
2450 }
2451
55508d60 2452 offset = skb_checksum_start_offset(skb);
a030847e
HX
2453 BUG_ON(offset >= skb_headlen(skb));
2454 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2455
2456 offset += skb->csum_offset;
2457 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2458
2459 if (skb_cloned(skb) &&
2460 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2461 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2462 if (ret)
2463 goto out;
2464 }
2465
a030847e 2466 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2467out_set_summed:
1da177e4 2468 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2469out:
1da177e4
LT
2470 return ret;
2471}
d1b19dff 2472EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2473
53d6471c 2474__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2475{
252e3346 2476 __be16 type = skb->protocol;
f6a78bfc 2477
19acc327
PS
2478 /* Tunnel gso handlers can set protocol to ethernet. */
2479 if (type == htons(ETH_P_TEB)) {
2480 struct ethhdr *eth;
2481
2482 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2483 return 0;
2484
2485 eth = (struct ethhdr *)skb_mac_header(skb);
2486 type = eth->h_proto;
2487 }
2488
d4bcef3f 2489 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
2490}
2491
2492/**
2493 * skb_mac_gso_segment - mac layer segmentation handler.
2494 * @skb: buffer to segment
2495 * @features: features for the output path (see dev->features)
2496 */
2497struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2498 netdev_features_t features)
2499{
2500 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2501 struct packet_offload *ptype;
53d6471c
VY
2502 int vlan_depth = skb->mac_len;
2503 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2504
2505 if (unlikely(!type))
2506 return ERR_PTR(-EINVAL);
2507
53d6471c 2508 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2509
2510 rcu_read_lock();
22061d80 2511 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2512 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2513 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2514 break;
2515 }
2516 }
2517 rcu_read_unlock();
2518
98e399f8 2519 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2520
f6a78bfc
HX
2521 return segs;
2522}
05e8ef4a
PS
2523EXPORT_SYMBOL(skb_mac_gso_segment);
2524
2525
2526/* openvswitch calls this on rx path, so we need a different check.
2527 */
2528static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2529{
2530 if (tx_path)
2531 return skb->ip_summed != CHECKSUM_PARTIAL;
2532 else
2533 return skb->ip_summed == CHECKSUM_NONE;
2534}
2535
2536/**
2537 * __skb_gso_segment - Perform segmentation on skb.
2538 * @skb: buffer to segment
2539 * @features: features for the output path (see dev->features)
2540 * @tx_path: whether it is called in TX path
2541 *
2542 * This function segments the given skb and returns a list of segments.
2543 *
2544 * It may return NULL if the skb requires no segmentation. This is
2545 * only possible when GSO is used for verifying header integrity.
2546 */
2547struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2548 netdev_features_t features, bool tx_path)
2549{
2550 if (unlikely(skb_needs_check(skb, tx_path))) {
2551 int err;
2552
2553 skb_warn_bad_offload(skb);
2554
a40e0a66 2555 err = skb_cow_head(skb, 0);
2556 if (err < 0)
05e8ef4a
PS
2557 return ERR_PTR(err);
2558 }
2559
68c33163 2560 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2561 SKB_GSO_CB(skb)->encap_level = 0;
2562
05e8ef4a
PS
2563 skb_reset_mac_header(skb);
2564 skb_reset_mac_len(skb);
2565
2566 return skb_mac_gso_segment(skb, features);
2567}
12b0004d 2568EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2569
fb286bb2
HX
2570/* Take action when hardware reception checksum errors are detected. */
2571#ifdef CONFIG_BUG
2572void netdev_rx_csum_fault(struct net_device *dev)
2573{
2574 if (net_ratelimit()) {
7b6cd1ce 2575 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2576 dump_stack();
2577 }
2578}
2579EXPORT_SYMBOL(netdev_rx_csum_fault);
2580#endif
2581
1da177e4
LT
2582/* Actually, we should eliminate this check as soon as we know, that:
2583 * 1. IOMMU is present and allows to map all the memory.
2584 * 2. No high memory really exists on this machine.
2585 */
2586
c1e756bf 2587static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2588{
3d3a8533 2589#ifdef CONFIG_HIGHMEM
1da177e4 2590 int i;
5acbbd42 2591 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2592 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2593 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2594 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2595 return 1;
ea2ab693 2596 }
5acbbd42 2597 }
1da177e4 2598
5acbbd42
FT
2599 if (PCI_DMA_BUS_IS_PHYS) {
2600 struct device *pdev = dev->dev.parent;
1da177e4 2601
9092c658
ED
2602 if (!pdev)
2603 return 0;
5acbbd42 2604 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2605 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2606 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2607 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2608 return 1;
2609 }
2610 }
3d3a8533 2611#endif
1da177e4
LT
2612 return 0;
2613}
1da177e4 2614
3b392ddb
SH
2615/* If MPLS offload request, verify we are testing hardware MPLS features
2616 * instead of standard features for the netdev.
2617 */
d0edc7bf 2618#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2619static netdev_features_t net_mpls_features(struct sk_buff *skb,
2620 netdev_features_t features,
2621 __be16 type)
2622{
25cd9ba0 2623 if (eth_p_mpls(type))
3b392ddb
SH
2624 features &= skb->dev->mpls_features;
2625
2626 return features;
2627}
2628#else
2629static netdev_features_t net_mpls_features(struct sk_buff *skb,
2630 netdev_features_t features,
2631 __be16 type)
2632{
2633 return features;
2634}
2635#endif
2636
c8f44aff 2637static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2638 netdev_features_t features)
f01a5236 2639{
53d6471c 2640 int tmp;
3b392ddb
SH
2641 __be16 type;
2642
2643 type = skb_network_protocol(skb, &tmp);
2644 features = net_mpls_features(skb, features, type);
53d6471c 2645
c0d680e5 2646 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2647 !can_checksum_protocol(features, type)) {
f01a5236 2648 features &= ~NETIF_F_ALL_CSUM;
c1e756bf 2649 } else if (illegal_highdma(skb->dev, skb)) {
f01a5236
JG
2650 features &= ~NETIF_F_SG;
2651 }
2652
2653 return features;
2654}
2655
e38f3025
TM
2656netdev_features_t passthru_features_check(struct sk_buff *skb,
2657 struct net_device *dev,
2658 netdev_features_t features)
2659{
2660 return features;
2661}
2662EXPORT_SYMBOL(passthru_features_check);
2663
8cb65d00
TM
2664static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2665 struct net_device *dev,
2666 netdev_features_t features)
2667{
2668 return vlan_features_check(skb, features);
2669}
2670
c1e756bf 2671netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2672{
5f35227e 2673 struct net_device *dev = skb->dev;
fcbeb976
ED
2674 netdev_features_t features = dev->features;
2675 u16 gso_segs = skb_shinfo(skb)->gso_segs;
58e998c6 2676
fcbeb976 2677 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
30b678d8
BH
2678 features &= ~NETIF_F_GSO_MASK;
2679
5f35227e
JG
2680 /* If encapsulation offload request, verify we are testing
2681 * hardware encapsulation features instead of standard
2682 * features for the netdev
2683 */
2684 if (skb->encapsulation)
2685 features &= dev->hw_enc_features;
2686
f5a7fb88
TM
2687 if (skb_vlan_tagged(skb))
2688 features = netdev_intersect_features(features,
2689 dev->vlan_features |
2690 NETIF_F_HW_VLAN_CTAG_TX |
2691 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2692
5f35227e
JG
2693 if (dev->netdev_ops->ndo_features_check)
2694 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2695 features);
8cb65d00
TM
2696 else
2697 features &= dflt_features_check(skb, dev, features);
5f35227e 2698
c1e756bf 2699 return harmonize_features(skb, features);
58e998c6 2700}
c1e756bf 2701EXPORT_SYMBOL(netif_skb_features);
58e998c6 2702
2ea25513 2703static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2704 struct netdev_queue *txq, bool more)
f6a78bfc 2705{
2ea25513
DM
2706 unsigned int len;
2707 int rc;
00829823 2708
7866a621 2709 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2ea25513 2710 dev_queue_xmit_nit(skb, dev);
fc741216 2711
2ea25513
DM
2712 len = skb->len;
2713 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 2714 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 2715 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 2716
2ea25513
DM
2717 return rc;
2718}
7b9c6090 2719
8dcda22a
DM
2720struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2721 struct netdev_queue *txq, int *ret)
7f2e870f
DM
2722{
2723 struct sk_buff *skb = first;
2724 int rc = NETDEV_TX_OK;
7b9c6090 2725
7f2e870f
DM
2726 while (skb) {
2727 struct sk_buff *next = skb->next;
fc70fb64 2728
7f2e870f 2729 skb->next = NULL;
95f6b3dd 2730 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
2731 if (unlikely(!dev_xmit_complete(rc))) {
2732 skb->next = next;
2733 goto out;
2734 }
6afff0ca 2735
7f2e870f
DM
2736 skb = next;
2737 if (netif_xmit_stopped(txq) && skb) {
2738 rc = NETDEV_TX_BUSY;
2739 break;
9ccb8975 2740 }
7f2e870f 2741 }
9ccb8975 2742
7f2e870f
DM
2743out:
2744 *ret = rc;
2745 return skb;
2746}
b40863c6 2747
1ff0dc94
ED
2748static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2749 netdev_features_t features)
f6a78bfc 2750{
df8a39de 2751 if (skb_vlan_tag_present(skb) &&
5968250c
JP
2752 !vlan_hw_offload_capable(features, skb->vlan_proto))
2753 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
2754 return skb;
2755}
f6a78bfc 2756
55a93b3e 2757static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
2758{
2759 netdev_features_t features;
f6a78bfc 2760
eae3f88e
DM
2761 if (skb->next)
2762 return skb;
068a2de5 2763
eae3f88e
DM
2764 features = netif_skb_features(skb);
2765 skb = validate_xmit_vlan(skb, features);
2766 if (unlikely(!skb))
2767 goto out_null;
7b9c6090 2768
8b86a61d 2769 if (netif_needs_gso(skb, features)) {
ce93718f
DM
2770 struct sk_buff *segs;
2771
2772 segs = skb_gso_segment(skb, features);
cecda693 2773 if (IS_ERR(segs)) {
af6dabc9 2774 goto out_kfree_skb;
cecda693
JW
2775 } else if (segs) {
2776 consume_skb(skb);
2777 skb = segs;
f6a78bfc 2778 }
eae3f88e
DM
2779 } else {
2780 if (skb_needs_linearize(skb, features) &&
2781 __skb_linearize(skb))
2782 goto out_kfree_skb;
4ec93edb 2783
eae3f88e
DM
2784 /* If packet is not checksummed and device does not
2785 * support checksumming for this protocol, complete
2786 * checksumming here.
2787 */
2788 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2789 if (skb->encapsulation)
2790 skb_set_inner_transport_header(skb,
2791 skb_checksum_start_offset(skb));
2792 else
2793 skb_set_transport_header(skb,
2794 skb_checksum_start_offset(skb));
2795 if (!(features & NETIF_F_ALL_CSUM) &&
2796 skb_checksum_help(skb))
2797 goto out_kfree_skb;
7b9c6090 2798 }
0c772159 2799 }
7b9c6090 2800
eae3f88e 2801 return skb;
fc70fb64 2802
f6a78bfc
HX
2803out_kfree_skb:
2804 kfree_skb(skb);
eae3f88e
DM
2805out_null:
2806 return NULL;
2807}
6afff0ca 2808
55a93b3e
ED
2809struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2810{
2811 struct sk_buff *next, *head = NULL, *tail;
2812
bec3cfdc 2813 for (; skb != NULL; skb = next) {
55a93b3e
ED
2814 next = skb->next;
2815 skb->next = NULL;
bec3cfdc
ED
2816
2817 /* in case skb wont be segmented, point to itself */
2818 skb->prev = skb;
2819
55a93b3e 2820 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
2821 if (!skb)
2822 continue;
55a93b3e 2823
bec3cfdc
ED
2824 if (!head)
2825 head = skb;
2826 else
2827 tail->next = skb;
2828 /* If skb was segmented, skb->prev points to
2829 * the last segment. If not, it still contains skb.
2830 */
2831 tail = skb->prev;
55a93b3e
ED
2832 }
2833 return head;
f6a78bfc
HX
2834}
2835
1def9238
ED
2836static void qdisc_pkt_len_init(struct sk_buff *skb)
2837{
2838 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2839
2840 qdisc_skb_cb(skb)->pkt_len = skb->len;
2841
2842 /* To get more precise estimation of bytes sent on wire,
2843 * we add to pkt_len the headers size of all segments
2844 */
2845 if (shinfo->gso_size) {
757b8b1d 2846 unsigned int hdr_len;
15e5a030 2847 u16 gso_segs = shinfo->gso_segs;
1def9238 2848
757b8b1d
ED
2849 /* mac layer + network layer */
2850 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2851
2852 /* + transport layer */
1def9238
ED
2853 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2854 hdr_len += tcp_hdrlen(skb);
2855 else
2856 hdr_len += sizeof(struct udphdr);
15e5a030
JW
2857
2858 if (shinfo->gso_type & SKB_GSO_DODGY)
2859 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2860 shinfo->gso_size);
2861
2862 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
2863 }
2864}
2865
bbd8a0d3
KK
2866static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2867 struct net_device *dev,
2868 struct netdev_queue *txq)
2869{
2870 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 2871 bool contended;
bbd8a0d3
KK
2872 int rc;
2873
1def9238 2874 qdisc_pkt_len_init(skb);
a2da570d 2875 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
2876 /*
2877 * Heuristic to force contended enqueues to serialize on a
2878 * separate lock before trying to get qdisc main lock.
9bf2b8c2
YX
2879 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2880 * often and dequeue packets faster.
79640a4c 2881 */
a2da570d 2882 contended = qdisc_is_running(q);
79640a4c
ED
2883 if (unlikely(contended))
2884 spin_lock(&q->busylock);
2885
bbd8a0d3
KK
2886 spin_lock(root_lock);
2887 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2888 kfree_skb(skb);
2889 rc = NET_XMIT_DROP;
2890 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2891 qdisc_run_begin(q)) {
bbd8a0d3
KK
2892 /*
2893 * This is a work-conserving queue; there are no old skbs
2894 * waiting to be sent out; and the qdisc is not running -
2895 * xmit the skb directly.
2896 */
bfe0d029 2897
bfe0d029
ED
2898 qdisc_bstats_update(q, skb);
2899
55a93b3e 2900 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
2901 if (unlikely(contended)) {
2902 spin_unlock(&q->busylock);
2903 contended = false;
2904 }
bbd8a0d3 2905 __qdisc_run(q);
79640a4c 2906 } else
bc135b23 2907 qdisc_run_end(q);
bbd8a0d3
KK
2908
2909 rc = NET_XMIT_SUCCESS;
2910 } else {
a2da570d 2911 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
2912 if (qdisc_run_begin(q)) {
2913 if (unlikely(contended)) {
2914 spin_unlock(&q->busylock);
2915 contended = false;
2916 }
2917 __qdisc_run(q);
2918 }
bbd8a0d3
KK
2919 }
2920 spin_unlock(root_lock);
79640a4c
ED
2921 if (unlikely(contended))
2922 spin_unlock(&q->busylock);
bbd8a0d3
KK
2923 return rc;
2924}
2925
86f8515f 2926#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
2927static void skb_update_prio(struct sk_buff *skb)
2928{
6977a79d 2929 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 2930
91c68ce2
ED
2931 if (!skb->priority && skb->sk && map) {
2932 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2933
2934 if (prioidx < map->priomap_len)
2935 skb->priority = map->priomap[prioidx];
2936 }
5bc1421e
NH
2937}
2938#else
2939#define skb_update_prio(skb)
2940#endif
2941
f60e5990 2942DEFINE_PER_CPU(int, xmit_recursion);
2943EXPORT_SYMBOL(xmit_recursion);
2944
11a766ce 2945#define RECURSION_LIMIT 10
745e20f1 2946
95603e22
MM
2947/**
2948 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
2949 * @net: network namespace this loopback is happening in
2950 * @sk: sk needed to be a netfilter okfn
95603e22
MM
2951 * @skb: buffer to transmit
2952 */
0c4b51f0 2953int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
2954{
2955 skb_reset_mac_header(skb);
2956 __skb_pull(skb, skb_network_offset(skb));
2957 skb->pkt_type = PACKET_LOOPBACK;
2958 skb->ip_summed = CHECKSUM_UNNECESSARY;
2959 WARN_ON(!skb_dst(skb));
2960 skb_dst_force(skb);
2961 netif_rx_ni(skb);
2962 return 0;
2963}
2964EXPORT_SYMBOL(dev_loopback_xmit);
2965
638b2a69
JP
2966static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2967{
2968#ifdef CONFIG_XPS
2969 struct xps_dev_maps *dev_maps;
2970 struct xps_map *map;
2971 int queue_index = -1;
2972
2973 rcu_read_lock();
2974 dev_maps = rcu_dereference(dev->xps_maps);
2975 if (dev_maps) {
2976 map = rcu_dereference(
2977 dev_maps->cpu_map[skb->sender_cpu - 1]);
2978 if (map) {
2979 if (map->len == 1)
2980 queue_index = map->queues[0];
2981 else
2982 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
2983 map->len)];
2984 if (unlikely(queue_index >= dev->real_num_tx_queues))
2985 queue_index = -1;
2986 }
2987 }
2988 rcu_read_unlock();
2989
2990 return queue_index;
2991#else
2992 return -1;
2993#endif
2994}
2995
2996static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2997{
2998 struct sock *sk = skb->sk;
2999 int queue_index = sk_tx_queue_get(sk);
3000
3001 if (queue_index < 0 || skb->ooo_okay ||
3002 queue_index >= dev->real_num_tx_queues) {
3003 int new_index = get_xps_queue(dev, skb);
3004 if (new_index < 0)
3005 new_index = skb_tx_hash(dev, skb);
3006
3007 if (queue_index != new_index && sk &&
004a5d01 3008 sk_fullsock(sk) &&
638b2a69
JP
3009 rcu_access_pointer(sk->sk_dst_cache))
3010 sk_tx_queue_set(sk, new_index);
3011
3012 queue_index = new_index;
3013 }
3014
3015 return queue_index;
3016}
3017
3018struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3019 struct sk_buff *skb,
3020 void *accel_priv)
3021{
3022 int queue_index = 0;
3023
3024#ifdef CONFIG_XPS
52bd2d62
ED
3025 u32 sender_cpu = skb->sender_cpu - 1;
3026
3027 if (sender_cpu >= (u32)NR_CPUS)
638b2a69
JP
3028 skb->sender_cpu = raw_smp_processor_id() + 1;
3029#endif
3030
3031 if (dev->real_num_tx_queues != 1) {
3032 const struct net_device_ops *ops = dev->netdev_ops;
3033 if (ops->ndo_select_queue)
3034 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3035 __netdev_pick_tx);
3036 else
3037 queue_index = __netdev_pick_tx(dev, skb);
3038
3039 if (!accel_priv)
3040 queue_index = netdev_cap_txqueue(dev, queue_index);
3041 }
3042
3043 skb_set_queue_mapping(skb, queue_index);
3044 return netdev_get_tx_queue(dev, queue_index);
3045}
3046
d29f749e 3047/**
9d08dd3d 3048 * __dev_queue_xmit - transmit a buffer
d29f749e 3049 * @skb: buffer to transmit
9d08dd3d 3050 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
3051 *
3052 * Queue a buffer for transmission to a network device. The caller must
3053 * have set the device and priority and built the buffer before calling
3054 * this function. The function can be called from an interrupt.
3055 *
3056 * A negative errno code is returned on a failure. A success does not
3057 * guarantee the frame will be transmitted as it may be dropped due
3058 * to congestion or traffic shaping.
3059 *
3060 * -----------------------------------------------------------------------------------
3061 * I notice this method can also return errors from the queue disciplines,
3062 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3063 * be positive.
3064 *
3065 * Regardless of the return value, the skb is consumed, so it is currently
3066 * difficult to retry a send to this method. (You can bump the ref count
3067 * before sending to hold a reference for retry if you are careful.)
3068 *
3069 * When calling this method, interrupts MUST be enabled. This is because
3070 * the BH enable code must have IRQs enabled so that it will not deadlock.
3071 * --BLG
3072 */
0a59f3a9 3073static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
3074{
3075 struct net_device *dev = skb->dev;
dc2b4847 3076 struct netdev_queue *txq;
1da177e4
LT
3077 struct Qdisc *q;
3078 int rc = -ENOMEM;
3079
6d1ccff6
ED
3080 skb_reset_mac_header(skb);
3081
e7fd2885
WB
3082 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3083 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3084
4ec93edb
YH
3085 /* Disable soft irqs for various locks below. Also
3086 * stops preemption for RCU.
1da177e4 3087 */
4ec93edb 3088 rcu_read_lock_bh();
1da177e4 3089
5bc1421e
NH
3090 skb_update_prio(skb);
3091
02875878
ED
3092 /* If device/qdisc don't need skb->dst, release it right now while
3093 * its hot in this cpu cache.
3094 */
3095 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3096 skb_dst_drop(skb);
3097 else
3098 skb_dst_force(skb);
3099
0c4f691f
SF
3100#ifdef CONFIG_NET_SWITCHDEV
3101 /* Don't forward if offload device already forwarded */
3102 if (skb->offload_fwd_mark &&
3103 skb->offload_fwd_mark == dev->offload_fwd_mark) {
3104 consume_skb(skb);
3105 rc = NET_XMIT_SUCCESS;
3106 goto out;
3107 }
3108#endif
3109
f663dd9a 3110 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 3111 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3112
1da177e4 3113#ifdef CONFIG_NET_CLS_ACT
d1b19dff 3114 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 3115#endif
cf66ba58 3116 trace_net_dev_queue(skb);
1da177e4 3117 if (q->enqueue) {
bbd8a0d3 3118 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3119 goto out;
1da177e4
LT
3120 }
3121
3122 /* The device has no queue. Common case for software devices:
3123 loopback, all the sorts of tunnels...
3124
932ff279
HX
3125 Really, it is unlikely that netif_tx_lock protection is necessary
3126 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
3127 counters.)
3128 However, it is possible, that they rely on protection
3129 made by us here.
3130
3131 Check this and shot the lock. It is not prone from deadlocks.
3132 Either shot noqueue qdisc, it is even simpler 8)
3133 */
3134 if (dev->flags & IFF_UP) {
3135 int cpu = smp_processor_id(); /* ok because BHs are off */
3136
c773e847 3137 if (txq->xmit_lock_owner != cpu) {
1da177e4 3138
745e20f1
ED
3139 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3140 goto recursion_alert;
3141
1f59533f
JDB
3142 skb = validate_xmit_skb(skb, dev);
3143 if (!skb)
3144 goto drop;
3145
c773e847 3146 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3147
73466498 3148 if (!netif_xmit_stopped(txq)) {
745e20f1 3149 __this_cpu_inc(xmit_recursion);
ce93718f 3150 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 3151 __this_cpu_dec(xmit_recursion);
572a9d7b 3152 if (dev_xmit_complete(rc)) {
c773e847 3153 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3154 goto out;
3155 }
3156 }
c773e847 3157 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3158 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3159 dev->name);
1da177e4
LT
3160 } else {
3161 /* Recursion is detected! It is possible,
745e20f1
ED
3162 * unfortunately
3163 */
3164recursion_alert:
e87cc472
JP
3165 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3166 dev->name);
1da177e4
LT
3167 }
3168 }
3169
3170 rc = -ENETDOWN;
1f59533f 3171drop:
d4828d85 3172 rcu_read_unlock_bh();
1da177e4 3173
015f0688 3174 atomic_long_inc(&dev->tx_dropped);
1f59533f 3175 kfree_skb_list(skb);
1da177e4
LT
3176 return rc;
3177out:
d4828d85 3178 rcu_read_unlock_bh();
1da177e4
LT
3179 return rc;
3180}
f663dd9a 3181
2b4aa3ce 3182int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
3183{
3184 return __dev_queue_xmit(skb, NULL);
3185}
2b4aa3ce 3186EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 3187
f663dd9a
JW
3188int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3189{
3190 return __dev_queue_xmit(skb, accel_priv);
3191}
3192EXPORT_SYMBOL(dev_queue_xmit_accel);
3193
1da177e4
LT
3194
3195/*=======================================================================
3196 Receiver routines
3197 =======================================================================*/
3198
6b2bedc3 3199int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3200EXPORT_SYMBOL(netdev_max_backlog);
3201
3b098e2d 3202int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
3203int netdev_budget __read_mostly = 300;
3204int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 3205
eecfd7c4
ED
3206/* Called with irq disabled */
3207static inline void ____napi_schedule(struct softnet_data *sd,
3208 struct napi_struct *napi)
3209{
3210 list_add_tail(&napi->poll_list, &sd->poll_list);
3211 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3212}
3213
bfb564e7
KK
3214#ifdef CONFIG_RPS
3215
3216/* One global table that all flow-based protocols share. */
6e3f7faf 3217struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 3218EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
3219u32 rps_cpu_mask __read_mostly;
3220EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 3221
c5905afb 3222struct static_key rps_needed __read_mostly;
adc9300e 3223
c445477d
BH
3224static struct rps_dev_flow *
3225set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3226 struct rps_dev_flow *rflow, u16 next_cpu)
3227{
a31196b0 3228 if (next_cpu < nr_cpu_ids) {
c445477d
BH
3229#ifdef CONFIG_RFS_ACCEL
3230 struct netdev_rx_queue *rxqueue;
3231 struct rps_dev_flow_table *flow_table;
3232 struct rps_dev_flow *old_rflow;
3233 u32 flow_id;
3234 u16 rxq_index;
3235 int rc;
3236
3237 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3238 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3239 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3240 goto out;
3241 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3242 if (rxq_index == skb_get_rx_queue(skb))
3243 goto out;
3244
3245 rxqueue = dev->_rx + rxq_index;
3246 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3247 if (!flow_table)
3248 goto out;
61b905da 3249 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3250 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3251 rxq_index, flow_id);
3252 if (rc < 0)
3253 goto out;
3254 old_rflow = rflow;
3255 rflow = &flow_table->flows[flow_id];
c445477d
BH
3256 rflow->filter = rc;
3257 if (old_rflow->filter == rflow->filter)
3258 old_rflow->filter = RPS_NO_FILTER;
3259 out:
3260#endif
3261 rflow->last_qtail =
09994d1b 3262 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3263 }
3264
09994d1b 3265 rflow->cpu = next_cpu;
c445477d
BH
3266 return rflow;
3267}
3268
bfb564e7
KK
3269/*
3270 * get_rps_cpu is called from netif_receive_skb and returns the target
3271 * CPU from the RPS map of the receiving queue for a given skb.
3272 * rcu_read_lock must be held on entry.
3273 */
3274static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3275 struct rps_dev_flow **rflowp)
3276{
567e4b79
ED
3277 const struct rps_sock_flow_table *sock_flow_table;
3278 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 3279 struct rps_dev_flow_table *flow_table;
567e4b79 3280 struct rps_map *map;
bfb564e7 3281 int cpu = -1;
567e4b79 3282 u32 tcpu;
61b905da 3283 u32 hash;
bfb564e7
KK
3284
3285 if (skb_rx_queue_recorded(skb)) {
3286 u16 index = skb_get_rx_queue(skb);
567e4b79 3287
62fe0b40
BH
3288 if (unlikely(index >= dev->real_num_rx_queues)) {
3289 WARN_ONCE(dev->real_num_rx_queues > 1,
3290 "%s received packet on queue %u, but number "
3291 "of RX queues is %u\n",
3292 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3293 goto done;
3294 }
567e4b79
ED
3295 rxqueue += index;
3296 }
bfb564e7 3297
567e4b79
ED
3298 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3299
3300 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 3301 map = rcu_dereference(rxqueue->rps_map);
567e4b79 3302 if (!flow_table && !map)
bfb564e7
KK
3303 goto done;
3304
2d47b459 3305 skb_reset_network_header(skb);
61b905da
TH
3306 hash = skb_get_hash(skb);
3307 if (!hash)
bfb564e7
KK
3308 goto done;
3309
fec5e652
TH
3310 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3311 if (flow_table && sock_flow_table) {
fec5e652 3312 struct rps_dev_flow *rflow;
567e4b79
ED
3313 u32 next_cpu;
3314 u32 ident;
3315
3316 /* First check into global flow table if there is a match */
3317 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3318 if ((ident ^ hash) & ~rps_cpu_mask)
3319 goto try_rps;
fec5e652 3320
567e4b79
ED
3321 next_cpu = ident & rps_cpu_mask;
3322
3323 /* OK, now we know there is a match,
3324 * we can look at the local (per receive queue) flow table
3325 */
61b905da 3326 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3327 tcpu = rflow->cpu;
3328
fec5e652
TH
3329 /*
3330 * If the desired CPU (where last recvmsg was done) is
3331 * different from current CPU (one in the rx-queue flow
3332 * table entry), switch if one of the following holds:
a31196b0 3333 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
3334 * - Current CPU is offline.
3335 * - The current CPU's queue tail has advanced beyond the
3336 * last packet that was enqueued using this table entry.
3337 * This guarantees that all previous packets for the flow
3338 * have been dequeued, thus preserving in order delivery.
3339 */
3340 if (unlikely(tcpu != next_cpu) &&
a31196b0 3341 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 3342 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3343 rflow->last_qtail)) >= 0)) {
3344 tcpu = next_cpu;
c445477d 3345 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3346 }
c445477d 3347
a31196b0 3348 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
3349 *rflowp = rflow;
3350 cpu = tcpu;
3351 goto done;
3352 }
3353 }
3354
567e4b79
ED
3355try_rps:
3356
0a9627f2 3357 if (map) {
8fc54f68 3358 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3359 if (cpu_online(tcpu)) {
3360 cpu = tcpu;
3361 goto done;
3362 }
3363 }
3364
3365done:
0a9627f2
TH
3366 return cpu;
3367}
3368
c445477d
BH
3369#ifdef CONFIG_RFS_ACCEL
3370
3371/**
3372 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3373 * @dev: Device on which the filter was set
3374 * @rxq_index: RX queue index
3375 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3376 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3377 *
3378 * Drivers that implement ndo_rx_flow_steer() should periodically call
3379 * this function for each installed filter and remove the filters for
3380 * which it returns %true.
3381 */
3382bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3383 u32 flow_id, u16 filter_id)
3384{
3385 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3386 struct rps_dev_flow_table *flow_table;
3387 struct rps_dev_flow *rflow;
3388 bool expire = true;
a31196b0 3389 unsigned int cpu;
c445477d
BH
3390
3391 rcu_read_lock();
3392 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3393 if (flow_table && flow_id <= flow_table->mask) {
3394 rflow = &flow_table->flows[flow_id];
3395 cpu = ACCESS_ONCE(rflow->cpu);
a31196b0 3396 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
3397 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3398 rflow->last_qtail) <
3399 (int)(10 * flow_table->mask)))
3400 expire = false;
3401 }
3402 rcu_read_unlock();
3403 return expire;
3404}
3405EXPORT_SYMBOL(rps_may_expire_flow);
3406
3407#endif /* CONFIG_RFS_ACCEL */
3408
0a9627f2 3409/* Called from hardirq (IPI) context */
e36fa2f7 3410static void rps_trigger_softirq(void *data)
0a9627f2 3411{
e36fa2f7
ED
3412 struct softnet_data *sd = data;
3413
eecfd7c4 3414 ____napi_schedule(sd, &sd->backlog);
dee42870 3415 sd->received_rps++;
0a9627f2 3416}
e36fa2f7 3417
fec5e652 3418#endif /* CONFIG_RPS */
0a9627f2 3419
e36fa2f7
ED
3420/*
3421 * Check if this softnet_data structure is another cpu one
3422 * If yes, queue it to our IPI list and return 1
3423 * If no, return 0
3424 */
3425static int rps_ipi_queued(struct softnet_data *sd)
3426{
3427#ifdef CONFIG_RPS
903ceff7 3428 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3429
3430 if (sd != mysd) {
3431 sd->rps_ipi_next = mysd->rps_ipi_list;
3432 mysd->rps_ipi_list = sd;
3433
3434 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3435 return 1;
3436 }
3437#endif /* CONFIG_RPS */
3438 return 0;
3439}
3440
99bbc707
WB
3441#ifdef CONFIG_NET_FLOW_LIMIT
3442int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3443#endif
3444
3445static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3446{
3447#ifdef CONFIG_NET_FLOW_LIMIT
3448 struct sd_flow_limit *fl;
3449 struct softnet_data *sd;
3450 unsigned int old_flow, new_flow;
3451
3452 if (qlen < (netdev_max_backlog >> 1))
3453 return false;
3454
903ceff7 3455 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3456
3457 rcu_read_lock();
3458 fl = rcu_dereference(sd->flow_limit);
3459 if (fl) {
3958afa1 3460 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3461 old_flow = fl->history[fl->history_head];
3462 fl->history[fl->history_head] = new_flow;
3463
3464 fl->history_head++;
3465 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3466
3467 if (likely(fl->buckets[old_flow]))
3468 fl->buckets[old_flow]--;
3469
3470 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3471 fl->count++;
3472 rcu_read_unlock();
3473 return true;
3474 }
3475 }
3476 rcu_read_unlock();
3477#endif
3478 return false;
3479}
3480
0a9627f2
TH
3481/*
3482 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3483 * queue (may be a remote CPU queue).
3484 */
fec5e652
TH
3485static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3486 unsigned int *qtail)
0a9627f2 3487{
e36fa2f7 3488 struct softnet_data *sd;
0a9627f2 3489 unsigned long flags;
99bbc707 3490 unsigned int qlen;
0a9627f2 3491
e36fa2f7 3492 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3493
3494 local_irq_save(flags);
0a9627f2 3495
e36fa2f7 3496 rps_lock(sd);
e9e4dd32
JA
3497 if (!netif_running(skb->dev))
3498 goto drop;
99bbc707
WB
3499 qlen = skb_queue_len(&sd->input_pkt_queue);
3500 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 3501 if (qlen) {
0a9627f2 3502enqueue:
e36fa2f7 3503 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3504 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3505 rps_unlock(sd);
152102c7 3506 local_irq_restore(flags);
0a9627f2
TH
3507 return NET_RX_SUCCESS;
3508 }
3509
ebda37c2
ED
3510 /* Schedule NAPI for backlog device
3511 * We can use non atomic operation since we own the queue lock
3512 */
3513 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3514 if (!rps_ipi_queued(sd))
eecfd7c4 3515 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3516 }
3517 goto enqueue;
3518 }
3519
e9e4dd32 3520drop:
dee42870 3521 sd->dropped++;
e36fa2f7 3522 rps_unlock(sd);
0a9627f2 3523
0a9627f2
TH
3524 local_irq_restore(flags);
3525
caf586e5 3526 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3527 kfree_skb(skb);
3528 return NET_RX_DROP;
3529}
1da177e4 3530
ae78dbfa 3531static int netif_rx_internal(struct sk_buff *skb)
1da177e4 3532{
b0e28f1e 3533 int ret;
1da177e4 3534
588f0330 3535 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3536
cf66ba58 3537 trace_netif_rx(skb);
df334545 3538#ifdef CONFIG_RPS
c5905afb 3539 if (static_key_false(&rps_needed)) {
fec5e652 3540 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3541 int cpu;
3542
cece1945 3543 preempt_disable();
b0e28f1e 3544 rcu_read_lock();
fec5e652
TH
3545
3546 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3547 if (cpu < 0)
3548 cpu = smp_processor_id();
fec5e652
TH
3549
3550 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3551
b0e28f1e 3552 rcu_read_unlock();
cece1945 3553 preempt_enable();
adc9300e
ED
3554 } else
3555#endif
fec5e652
TH
3556 {
3557 unsigned int qtail;
3558 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3559 put_cpu();
3560 }
b0e28f1e 3561 return ret;
1da177e4 3562}
ae78dbfa
BH
3563
3564/**
3565 * netif_rx - post buffer to the network code
3566 * @skb: buffer to post
3567 *
3568 * This function receives a packet from a device driver and queues it for
3569 * the upper (protocol) levels to process. It always succeeds. The buffer
3570 * may be dropped during processing for congestion control or by the
3571 * protocol layers.
3572 *
3573 * return values:
3574 * NET_RX_SUCCESS (no congestion)
3575 * NET_RX_DROP (packet was dropped)
3576 *
3577 */
3578
3579int netif_rx(struct sk_buff *skb)
3580{
3581 trace_netif_rx_entry(skb);
3582
3583 return netif_rx_internal(skb);
3584}
d1b19dff 3585EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3586
3587int netif_rx_ni(struct sk_buff *skb)
3588{
3589 int err;
3590
ae78dbfa
BH
3591 trace_netif_rx_ni_entry(skb);
3592
1da177e4 3593 preempt_disable();
ae78dbfa 3594 err = netif_rx_internal(skb);
1da177e4
LT
3595 if (local_softirq_pending())
3596 do_softirq();
3597 preempt_enable();
3598
3599 return err;
3600}
1da177e4
LT
3601EXPORT_SYMBOL(netif_rx_ni);
3602
1da177e4
LT
3603static void net_tx_action(struct softirq_action *h)
3604{
903ceff7 3605 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
3606
3607 if (sd->completion_queue) {
3608 struct sk_buff *clist;
3609
3610 local_irq_disable();
3611 clist = sd->completion_queue;
3612 sd->completion_queue = NULL;
3613 local_irq_enable();
3614
3615 while (clist) {
3616 struct sk_buff *skb = clist;
3617 clist = clist->next;
3618
547b792c 3619 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3620 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3621 trace_consume_skb(skb);
3622 else
3623 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3624 __kfree_skb(skb);
3625 }
3626 }
3627
3628 if (sd->output_queue) {
37437bb2 3629 struct Qdisc *head;
1da177e4
LT
3630
3631 local_irq_disable();
3632 head = sd->output_queue;
3633 sd->output_queue = NULL;
a9cbd588 3634 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3635 local_irq_enable();
3636
3637 while (head) {
37437bb2
DM
3638 struct Qdisc *q = head;
3639 spinlock_t *root_lock;
3640
1da177e4
LT
3641 head = head->next_sched;
3642
5fb66229 3643 root_lock = qdisc_lock(q);
37437bb2 3644 if (spin_trylock(root_lock)) {
4e857c58 3645 smp_mb__before_atomic();
def82a1d
JP
3646 clear_bit(__QDISC_STATE_SCHED,
3647 &q->state);
37437bb2
DM
3648 qdisc_run(q);
3649 spin_unlock(root_lock);
1da177e4 3650 } else {
195648bb 3651 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3652 &q->state)) {
195648bb 3653 __netif_reschedule(q);
e8a83e10 3654 } else {
4e857c58 3655 smp_mb__before_atomic();
e8a83e10
JP
3656 clear_bit(__QDISC_STATE_SCHED,
3657 &q->state);
3658 }
1da177e4
LT
3659 }
3660 }
3661 }
3662}
3663
ab95bfe0
JP
3664#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3665 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3666/* This hook is defined here for ATM LANE */
3667int (*br_fdb_test_addr_hook)(struct net_device *dev,
3668 unsigned char *addr) __read_mostly;
4fb019a0 3669EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3670#endif
1da177e4 3671
f697c3e8
HX
3672static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3673 struct packet_type **pt_prev,
3674 int *ret, struct net_device *orig_dev)
3675{
e7582bab 3676#ifdef CONFIG_NET_CLS_ACT
d2788d34
DB
3677 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3678 struct tcf_result cl_res;
24824a09 3679
c9e99fd0
DB
3680 /* If there's at least one ingress present somewhere (so
3681 * we get here via enabled static key), remaining devices
3682 * that are not configured with an ingress qdisc will bail
d2788d34 3683 * out here.
c9e99fd0 3684 */
d2788d34 3685 if (!cl)
4577139b 3686 return skb;
f697c3e8
HX
3687 if (*pt_prev) {
3688 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3689 *pt_prev = NULL;
1da177e4
LT
3690 }
3691
3365495c 3692 qdisc_skb_cb(skb)->pkt_len = skb->len;
c9e99fd0 3693 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
24ea591d 3694 qdisc_bstats_cpu_update(cl->q, skb);
c9e99fd0 3695
3b3ae880 3696 switch (tc_classify(skb, cl, &cl_res, false)) {
d2788d34
DB
3697 case TC_ACT_OK:
3698 case TC_ACT_RECLASSIFY:
3699 skb->tc_index = TC_H_MIN(cl_res.classid);
3700 break;
3701 case TC_ACT_SHOT:
24ea591d 3702 qdisc_qstats_cpu_drop(cl->q);
d2788d34
DB
3703 case TC_ACT_STOLEN:
3704 case TC_ACT_QUEUED:
3705 kfree_skb(skb);
3706 return NULL;
27b29f63
AS
3707 case TC_ACT_REDIRECT:
3708 /* skb_mac_header check was done by cls/act_bpf, so
3709 * we can safely push the L2 header back before
3710 * redirecting to another netdev
3711 */
3712 __skb_push(skb, skb->mac_len);
3713 skb_do_redirect(skb);
3714 return NULL;
d2788d34
DB
3715 default:
3716 break;
f697c3e8 3717 }
e7582bab 3718#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
3719 return skb;
3720}
1da177e4 3721
ab95bfe0
JP
3722/**
3723 * netdev_rx_handler_register - register receive handler
3724 * @dev: device to register a handler for
3725 * @rx_handler: receive handler to register
93e2c32b 3726 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 3727 *
e227867f 3728 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
3729 * called from __netif_receive_skb. A negative errno code is returned
3730 * on a failure.
3731 *
3732 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3733 *
3734 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3735 */
3736int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3737 rx_handler_func_t *rx_handler,
3738 void *rx_handler_data)
ab95bfe0
JP
3739{
3740 ASSERT_RTNL();
3741
3742 if (dev->rx_handler)
3743 return -EBUSY;
3744
00cfec37 3745 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3746 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3747 rcu_assign_pointer(dev->rx_handler, rx_handler);
3748
3749 return 0;
3750}
3751EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3752
3753/**
3754 * netdev_rx_handler_unregister - unregister receive handler
3755 * @dev: device to unregister a handler from
3756 *
166ec369 3757 * Unregister a receive handler from a device.
ab95bfe0
JP
3758 *
3759 * The caller must hold the rtnl_mutex.
3760 */
3761void netdev_rx_handler_unregister(struct net_device *dev)
3762{
3763
3764 ASSERT_RTNL();
a9b3cd7f 3765 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3766 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3767 * section has a guarantee to see a non NULL rx_handler_data
3768 * as well.
3769 */
3770 synchronize_net();
a9b3cd7f 3771 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3772}
3773EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3774
b4b9e355
MG
3775/*
3776 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3777 * the special handling of PFMEMALLOC skbs.
3778 */
3779static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3780{
3781 switch (skb->protocol) {
2b8837ae
JP
3782 case htons(ETH_P_ARP):
3783 case htons(ETH_P_IP):
3784 case htons(ETH_P_IPV6):
3785 case htons(ETH_P_8021Q):
3786 case htons(ETH_P_8021AD):
b4b9e355
MG
3787 return true;
3788 default:
3789 return false;
3790 }
3791}
3792
e687ad60
PN
3793static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
3794 int *ret, struct net_device *orig_dev)
3795{
e7582bab 3796#ifdef CONFIG_NETFILTER_INGRESS
e687ad60
PN
3797 if (nf_hook_ingress_active(skb)) {
3798 if (*pt_prev) {
3799 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3800 *pt_prev = NULL;
3801 }
3802
3803 return nf_hook_ingress(skb);
3804 }
e7582bab 3805#endif /* CONFIG_NETFILTER_INGRESS */
e687ad60
PN
3806 return 0;
3807}
e687ad60 3808
9754e293 3809static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
3810{
3811 struct packet_type *ptype, *pt_prev;
ab95bfe0 3812 rx_handler_func_t *rx_handler;
f2ccd8fa 3813 struct net_device *orig_dev;
8a4eb573 3814 bool deliver_exact = false;
1da177e4 3815 int ret = NET_RX_DROP;
252e3346 3816 __be16 type;
1da177e4 3817
588f0330 3818 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 3819
cf66ba58 3820 trace_netif_receive_skb(skb);
9b22ea56 3821
cc9bd5ce 3822 orig_dev = skb->dev;
8f903c70 3823
c1d2bbe1 3824 skb_reset_network_header(skb);
fda55eca
ED
3825 if (!skb_transport_header_was_set(skb))
3826 skb_reset_transport_header(skb);
0b5c9db1 3827 skb_reset_mac_len(skb);
1da177e4
LT
3828
3829 pt_prev = NULL;
3830
63d8ea7f 3831another_round:
b6858177 3832 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
3833
3834 __this_cpu_inc(softnet_data.processed);
3835
8ad227ff
PM
3836 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3837 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 3838 skb = skb_vlan_untag(skb);
bcc6d479 3839 if (unlikely(!skb))
2c17d27c 3840 goto out;
bcc6d479
JP
3841 }
3842
1da177e4
LT
3843#ifdef CONFIG_NET_CLS_ACT
3844 if (skb->tc_verd & TC_NCLS) {
3845 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3846 goto ncls;
3847 }
3848#endif
3849
9754e293 3850 if (pfmemalloc)
b4b9e355
MG
3851 goto skip_taps;
3852
1da177e4 3853 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
3854 if (pt_prev)
3855 ret = deliver_skb(skb, pt_prev, orig_dev);
3856 pt_prev = ptype;
3857 }
3858
3859 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3860 if (pt_prev)
3861 ret = deliver_skb(skb, pt_prev, orig_dev);
3862 pt_prev = ptype;
1da177e4
LT
3863 }
3864
b4b9e355 3865skip_taps:
1cf51900 3866#ifdef CONFIG_NET_INGRESS
4577139b
DB
3867 if (static_key_false(&ingress_needed)) {
3868 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3869 if (!skb)
2c17d27c 3870 goto out;
e687ad60
PN
3871
3872 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 3873 goto out;
4577139b 3874 }
1cf51900
PN
3875#endif
3876#ifdef CONFIG_NET_CLS_ACT
4577139b 3877 skb->tc_verd = 0;
1da177e4
LT
3878ncls:
3879#endif
9754e293 3880 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
3881 goto drop;
3882
df8a39de 3883 if (skb_vlan_tag_present(skb)) {
2425717b
JF
3884 if (pt_prev) {
3885 ret = deliver_skb(skb, pt_prev, orig_dev);
3886 pt_prev = NULL;
3887 }
48cc32d3 3888 if (vlan_do_receive(&skb))
2425717b
JF
3889 goto another_round;
3890 else if (unlikely(!skb))
2c17d27c 3891 goto out;
2425717b
JF
3892 }
3893
48cc32d3 3894 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
3895 if (rx_handler) {
3896 if (pt_prev) {
3897 ret = deliver_skb(skb, pt_prev, orig_dev);
3898 pt_prev = NULL;
3899 }
8a4eb573
JP
3900 switch (rx_handler(&skb)) {
3901 case RX_HANDLER_CONSUMED:
3bc1b1ad 3902 ret = NET_RX_SUCCESS;
2c17d27c 3903 goto out;
8a4eb573 3904 case RX_HANDLER_ANOTHER:
63d8ea7f 3905 goto another_round;
8a4eb573
JP
3906 case RX_HANDLER_EXACT:
3907 deliver_exact = true;
3908 case RX_HANDLER_PASS:
3909 break;
3910 default:
3911 BUG();
3912 }
ab95bfe0 3913 }
1da177e4 3914
df8a39de
JP
3915 if (unlikely(skb_vlan_tag_present(skb))) {
3916 if (skb_vlan_tag_get_id(skb))
d4b812de
ED
3917 skb->pkt_type = PACKET_OTHERHOST;
3918 /* Note: we might in the future use prio bits
3919 * and set skb->priority like in vlan_do_receive()
3920 * For the time being, just ignore Priority Code Point
3921 */
3922 skb->vlan_tci = 0;
3923 }
48cc32d3 3924
7866a621
SN
3925 type = skb->protocol;
3926
63d8ea7f 3927 /* deliver only exact match when indicated */
7866a621
SN
3928 if (likely(!deliver_exact)) {
3929 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3930 &ptype_base[ntohs(type) &
3931 PTYPE_HASH_MASK]);
3932 }
1f3c8804 3933
7866a621
SN
3934 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3935 &orig_dev->ptype_specific);
3936
3937 if (unlikely(skb->dev != orig_dev)) {
3938 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3939 &skb->dev->ptype_specific);
1da177e4
LT
3940 }
3941
3942 if (pt_prev) {
1080e512 3943 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 3944 goto drop;
1080e512
MT
3945 else
3946 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3947 } else {
b4b9e355 3948drop:
caf586e5 3949 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3950 kfree_skb(skb);
3951 /* Jamal, now you will not able to escape explaining
3952 * me how you were going to use this. :-)
3953 */
3954 ret = NET_RX_DROP;
3955 }
3956
2c17d27c 3957out:
9754e293
DM
3958 return ret;
3959}
3960
3961static int __netif_receive_skb(struct sk_buff *skb)
3962{
3963 int ret;
3964
3965 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3966 unsigned long pflags = current->flags;
3967
3968 /*
3969 * PFMEMALLOC skbs are special, they should
3970 * - be delivered to SOCK_MEMALLOC sockets only
3971 * - stay away from userspace
3972 * - have bounded memory usage
3973 *
3974 * Use PF_MEMALLOC as this saves us from propagating the allocation
3975 * context down to all allocation sites.
3976 */
3977 current->flags |= PF_MEMALLOC;
3978 ret = __netif_receive_skb_core(skb, true);
3979 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3980 } else
3981 ret = __netif_receive_skb_core(skb, false);
3982
1da177e4
LT
3983 return ret;
3984}
0a9627f2 3985
ae78dbfa 3986static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 3987{
2c17d27c
JA
3988 int ret;
3989
588f0330 3990 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 3991
c1f19b51
RC
3992 if (skb_defer_rx_timestamp(skb))
3993 return NET_RX_SUCCESS;
3994
2c17d27c
JA
3995 rcu_read_lock();
3996
df334545 3997#ifdef CONFIG_RPS
c5905afb 3998 if (static_key_false(&rps_needed)) {
3b098e2d 3999 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 4000 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 4001
3b098e2d
ED
4002 if (cpu >= 0) {
4003 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4004 rcu_read_unlock();
adc9300e 4005 return ret;
3b098e2d 4006 }
fec5e652 4007 }
1e94d72f 4008#endif
2c17d27c
JA
4009 ret = __netif_receive_skb(skb);
4010 rcu_read_unlock();
4011 return ret;
0a9627f2 4012}
ae78dbfa
BH
4013
4014/**
4015 * netif_receive_skb - process receive buffer from network
4016 * @skb: buffer to process
4017 *
4018 * netif_receive_skb() is the main receive data processing function.
4019 * It always succeeds. The buffer may be dropped during processing
4020 * for congestion control or by the protocol layers.
4021 *
4022 * This function may only be called from softirq context and interrupts
4023 * should be enabled.
4024 *
4025 * Return values (usually ignored):
4026 * NET_RX_SUCCESS: no congestion
4027 * NET_RX_DROP: packet was dropped
4028 */
04eb4489 4029int netif_receive_skb(struct sk_buff *skb)
ae78dbfa
BH
4030{
4031 trace_netif_receive_skb_entry(skb);
4032
4033 return netif_receive_skb_internal(skb);
4034}
04eb4489 4035EXPORT_SYMBOL(netif_receive_skb);
1da177e4 4036
88751275
ED
4037/* Network device is going away, flush any packets still pending
4038 * Called with irqs disabled.
4039 */
152102c7 4040static void flush_backlog(void *arg)
6e583ce5 4041{
152102c7 4042 struct net_device *dev = arg;
903ceff7 4043 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6e583ce5
SH
4044 struct sk_buff *skb, *tmp;
4045
e36fa2f7 4046 rps_lock(sd);
6e7676c1 4047 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 4048 if (skb->dev == dev) {
e36fa2f7 4049 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 4050 kfree_skb(skb);
76cc8b13 4051 input_queue_head_incr(sd);
6e583ce5 4052 }
6e7676c1 4053 }
e36fa2f7 4054 rps_unlock(sd);
6e7676c1
CG
4055
4056 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4057 if (skb->dev == dev) {
4058 __skb_unlink(skb, &sd->process_queue);
4059 kfree_skb(skb);
76cc8b13 4060 input_queue_head_incr(sd);
6e7676c1
CG
4061 }
4062 }
6e583ce5
SH
4063}
4064
d565b0a1
HX
4065static int napi_gro_complete(struct sk_buff *skb)
4066{
22061d80 4067 struct packet_offload *ptype;
d565b0a1 4068 __be16 type = skb->protocol;
22061d80 4069 struct list_head *head = &offload_base;
d565b0a1
HX
4070 int err = -ENOENT;
4071
c3c7c254
ED
4072 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4073
fc59f9a3
HX
4074 if (NAPI_GRO_CB(skb)->count == 1) {
4075 skb_shinfo(skb)->gso_size = 0;
d565b0a1 4076 goto out;
fc59f9a3 4077 }
d565b0a1
HX
4078
4079 rcu_read_lock();
4080 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4081 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
4082 continue;
4083
299603e8 4084 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
4085 break;
4086 }
4087 rcu_read_unlock();
4088
4089 if (err) {
4090 WARN_ON(&ptype->list == head);
4091 kfree_skb(skb);
4092 return NET_RX_SUCCESS;
4093 }
4094
4095out:
ae78dbfa 4096 return netif_receive_skb_internal(skb);
d565b0a1
HX
4097}
4098
2e71a6f8
ED
4099/* napi->gro_list contains packets ordered by age.
4100 * youngest packets at the head of it.
4101 * Complete skbs in reverse order to reduce latencies.
4102 */
4103void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 4104{
2e71a6f8 4105 struct sk_buff *skb, *prev = NULL;
d565b0a1 4106
2e71a6f8
ED
4107 /* scan list and build reverse chain */
4108 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4109 skb->prev = prev;
4110 prev = skb;
4111 }
4112
4113 for (skb = prev; skb; skb = prev) {
d565b0a1 4114 skb->next = NULL;
2e71a6f8
ED
4115
4116 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4117 return;
4118
4119 prev = skb->prev;
d565b0a1 4120 napi_gro_complete(skb);
2e71a6f8 4121 napi->gro_count--;
d565b0a1
HX
4122 }
4123
4124 napi->gro_list = NULL;
4125}
86cac58b 4126EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 4127
89c5fa33
ED
4128static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4129{
4130 struct sk_buff *p;
4131 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 4132 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
4133
4134 for (p = napi->gro_list; p; p = p->next) {
4135 unsigned long diffs;
4136
0b4cec8c
TH
4137 NAPI_GRO_CB(p)->flush = 0;
4138
4139 if (hash != skb_get_hash_raw(p)) {
4140 NAPI_GRO_CB(p)->same_flow = 0;
4141 continue;
4142 }
4143
89c5fa33
ED
4144 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4145 diffs |= p->vlan_tci ^ skb->vlan_tci;
4146 if (maclen == ETH_HLEN)
4147 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 4148 skb_mac_header(skb));
89c5fa33
ED
4149 else if (!diffs)
4150 diffs = memcmp(skb_mac_header(p),
a50e233c 4151 skb_mac_header(skb),
89c5fa33
ED
4152 maclen);
4153 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
4154 }
4155}
4156
299603e8
JC
4157static void skb_gro_reset_offset(struct sk_buff *skb)
4158{
4159 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4160 const skb_frag_t *frag0 = &pinfo->frags[0];
4161
4162 NAPI_GRO_CB(skb)->data_offset = 0;
4163 NAPI_GRO_CB(skb)->frag0 = NULL;
4164 NAPI_GRO_CB(skb)->frag0_len = 0;
4165
4166 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4167 pinfo->nr_frags &&
4168 !PageHighMem(skb_frag_page(frag0))) {
4169 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4170 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
89c5fa33
ED
4171 }
4172}
4173
a50e233c
ED
4174static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4175{
4176 struct skb_shared_info *pinfo = skb_shinfo(skb);
4177
4178 BUG_ON(skb->end - skb->tail < grow);
4179
4180 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4181
4182 skb->data_len -= grow;
4183 skb->tail += grow;
4184
4185 pinfo->frags[0].page_offset += grow;
4186 skb_frag_size_sub(&pinfo->frags[0], grow);
4187
4188 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4189 skb_frag_unref(skb, 0);
4190 memmove(pinfo->frags, pinfo->frags + 1,
4191 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4192 }
4193}
4194
bb728820 4195static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
4196{
4197 struct sk_buff **pp = NULL;
22061d80 4198 struct packet_offload *ptype;
d565b0a1 4199 __be16 type = skb->protocol;
22061d80 4200 struct list_head *head = &offload_base;
0da2afd5 4201 int same_flow;
5b252f0c 4202 enum gro_result ret;
a50e233c 4203 int grow;
d565b0a1 4204
9c62a68d 4205 if (!(skb->dev->features & NETIF_F_GRO))
d565b0a1
HX
4206 goto normal;
4207
5a212329 4208 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
f17f5c91
HX
4209 goto normal;
4210
89c5fa33
ED
4211 gro_list_prepare(napi, skb);
4212
d565b0a1
HX
4213 rcu_read_lock();
4214 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4215 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4216 continue;
4217
86911732 4218 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4219 skb_reset_mac_len(skb);
d565b0a1
HX
4220 NAPI_GRO_CB(skb)->same_flow = 0;
4221 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 4222 NAPI_GRO_CB(skb)->free = 0;
b582ef09 4223 NAPI_GRO_CB(skb)->udp_mark = 0;
15e2396d 4224 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 4225
662880f4
TH
4226 /* Setup for GRO checksum validation */
4227 switch (skb->ip_summed) {
4228 case CHECKSUM_COMPLETE:
4229 NAPI_GRO_CB(skb)->csum = skb->csum;
4230 NAPI_GRO_CB(skb)->csum_valid = 1;
4231 NAPI_GRO_CB(skb)->csum_cnt = 0;
4232 break;
4233 case CHECKSUM_UNNECESSARY:
4234 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4235 NAPI_GRO_CB(skb)->csum_valid = 0;
4236 break;
4237 default:
4238 NAPI_GRO_CB(skb)->csum_cnt = 0;
4239 NAPI_GRO_CB(skb)->csum_valid = 0;
4240 }
d565b0a1 4241
f191a1d1 4242 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4243 break;
4244 }
4245 rcu_read_unlock();
4246
4247 if (&ptype->list == head)
4248 goto normal;
4249
0da2afd5 4250 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4251 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4252
d565b0a1
HX
4253 if (pp) {
4254 struct sk_buff *nskb = *pp;
4255
4256 *pp = nskb->next;
4257 nskb->next = NULL;
4258 napi_gro_complete(nskb);
4ae5544f 4259 napi->gro_count--;
d565b0a1
HX
4260 }
4261
0da2afd5 4262 if (same_flow)
d565b0a1
HX
4263 goto ok;
4264
600adc18 4265 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4266 goto normal;
d565b0a1 4267
600adc18
ED
4268 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4269 struct sk_buff *nskb = napi->gro_list;
4270
4271 /* locate the end of the list to select the 'oldest' flow */
4272 while (nskb->next) {
4273 pp = &nskb->next;
4274 nskb = *pp;
4275 }
4276 *pp = NULL;
4277 nskb->next = NULL;
4278 napi_gro_complete(nskb);
4279 } else {
4280 napi->gro_count++;
4281 }
d565b0a1 4282 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4283 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4284 NAPI_GRO_CB(skb)->last = skb;
86911732 4285 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4286 skb->next = napi->gro_list;
4287 napi->gro_list = skb;
5d0d9be8 4288 ret = GRO_HELD;
d565b0a1 4289
ad0f9904 4290pull:
a50e233c
ED
4291 grow = skb_gro_offset(skb) - skb_headlen(skb);
4292 if (grow > 0)
4293 gro_pull_from_frag0(skb, grow);
d565b0a1 4294ok:
5d0d9be8 4295 return ret;
d565b0a1
HX
4296
4297normal:
ad0f9904
HX
4298 ret = GRO_NORMAL;
4299 goto pull;
5d38a079 4300}
96e93eab 4301
bf5a755f
JC
4302struct packet_offload *gro_find_receive_by_type(__be16 type)
4303{
4304 struct list_head *offload_head = &offload_base;
4305 struct packet_offload *ptype;
4306
4307 list_for_each_entry_rcu(ptype, offload_head, list) {
4308 if (ptype->type != type || !ptype->callbacks.gro_receive)
4309 continue;
4310 return ptype;
4311 }
4312 return NULL;
4313}
e27a2f83 4314EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4315
4316struct packet_offload *gro_find_complete_by_type(__be16 type)
4317{
4318 struct list_head *offload_head = &offload_base;
4319 struct packet_offload *ptype;
4320
4321 list_for_each_entry_rcu(ptype, offload_head, list) {
4322 if (ptype->type != type || !ptype->callbacks.gro_complete)
4323 continue;
4324 return ptype;
4325 }
4326 return NULL;
4327}
e27a2f83 4328EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4329
bb728820 4330static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4331{
5d0d9be8
HX
4332 switch (ret) {
4333 case GRO_NORMAL:
ae78dbfa 4334 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4335 ret = GRO_DROP;
4336 break;
5d38a079 4337
5d0d9be8 4338 case GRO_DROP:
5d38a079
HX
4339 kfree_skb(skb);
4340 break;
5b252f0c 4341
daa86548 4342 case GRO_MERGED_FREE:
d7e8883c
ED
4343 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4344 kmem_cache_free(skbuff_head_cache, skb);
4345 else
4346 __kfree_skb(skb);
daa86548
ED
4347 break;
4348
5b252f0c
BH
4349 case GRO_HELD:
4350 case GRO_MERGED:
4351 break;
5d38a079
HX
4352 }
4353
c7c4b3b6 4354 return ret;
5d0d9be8 4355}
5d0d9be8 4356
c7c4b3b6 4357gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4358{
ae78dbfa 4359 trace_napi_gro_receive_entry(skb);
86911732 4360
a50e233c
ED
4361 skb_gro_reset_offset(skb);
4362
89c5fa33 4363 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4364}
4365EXPORT_SYMBOL(napi_gro_receive);
4366
d0c2b0d2 4367static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4368{
93a35f59
ED
4369 if (unlikely(skb->pfmemalloc)) {
4370 consume_skb(skb);
4371 return;
4372 }
96e93eab 4373 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4374 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4375 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4376 skb->vlan_tci = 0;
66c46d74 4377 skb->dev = napi->dev;
6d152e23 4378 skb->skb_iif = 0;
c3caf119
JC
4379 skb->encapsulation = 0;
4380 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 4381 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
96e93eab
HX
4382
4383 napi->skb = skb;
4384}
96e93eab 4385
76620aaf 4386struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4387{
5d38a079 4388 struct sk_buff *skb = napi->skb;
5d38a079
HX
4389
4390 if (!skb) {
fd11a83d 4391 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
84b9cd63 4392 napi->skb = skb;
80595d59 4393 }
96e93eab
HX
4394 return skb;
4395}
76620aaf 4396EXPORT_SYMBOL(napi_get_frags);
96e93eab 4397
a50e233c
ED
4398static gro_result_t napi_frags_finish(struct napi_struct *napi,
4399 struct sk_buff *skb,
4400 gro_result_t ret)
96e93eab 4401{
5d0d9be8
HX
4402 switch (ret) {
4403 case GRO_NORMAL:
a50e233c
ED
4404 case GRO_HELD:
4405 __skb_push(skb, ETH_HLEN);
4406 skb->protocol = eth_type_trans(skb, skb->dev);
4407 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 4408 ret = GRO_DROP;
86911732 4409 break;
5d38a079 4410
5d0d9be8 4411 case GRO_DROP:
5d0d9be8
HX
4412 case GRO_MERGED_FREE:
4413 napi_reuse_skb(napi, skb);
4414 break;
5b252f0c
BH
4415
4416 case GRO_MERGED:
4417 break;
5d0d9be8 4418 }
5d38a079 4419
c7c4b3b6 4420 return ret;
5d38a079 4421}
5d0d9be8 4422
a50e233c
ED
4423/* Upper GRO stack assumes network header starts at gro_offset=0
4424 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4425 * We copy ethernet header into skb->data to have a common layout.
4426 */
4adb9c4a 4427static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4428{
4429 struct sk_buff *skb = napi->skb;
a50e233c
ED
4430 const struct ethhdr *eth;
4431 unsigned int hlen = sizeof(*eth);
76620aaf
HX
4432
4433 napi->skb = NULL;
4434
a50e233c
ED
4435 skb_reset_mac_header(skb);
4436 skb_gro_reset_offset(skb);
4437
4438 eth = skb_gro_header_fast(skb, 0);
4439 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4440 eth = skb_gro_header_slow(skb, hlen, 0);
4441 if (unlikely(!eth)) {
4442 napi_reuse_skb(napi, skb);
4443 return NULL;
4444 }
4445 } else {
4446 gro_pull_from_frag0(skb, hlen);
4447 NAPI_GRO_CB(skb)->frag0 += hlen;
4448 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 4449 }
a50e233c
ED
4450 __skb_pull(skb, hlen);
4451
4452 /*
4453 * This works because the only protocols we care about don't require
4454 * special handling.
4455 * We'll fix it up properly in napi_frags_finish()
4456 */
4457 skb->protocol = eth->h_proto;
76620aaf 4458
76620aaf
HX
4459 return skb;
4460}
76620aaf 4461
c7c4b3b6 4462gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4463{
76620aaf 4464 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4465
4466 if (!skb)
c7c4b3b6 4467 return GRO_DROP;
5d0d9be8 4468
ae78dbfa
BH
4469 trace_napi_gro_frags_entry(skb);
4470
89c5fa33 4471 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4472}
5d38a079
HX
4473EXPORT_SYMBOL(napi_gro_frags);
4474
573e8fca
TH
4475/* Compute the checksum from gro_offset and return the folded value
4476 * after adding in any pseudo checksum.
4477 */
4478__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4479{
4480 __wsum wsum;
4481 __sum16 sum;
4482
4483 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4484
4485 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4486 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4487 if (likely(!sum)) {
4488 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4489 !skb->csum_complete_sw)
4490 netdev_rx_csum_fault(skb->dev);
4491 }
4492
4493 NAPI_GRO_CB(skb)->csum = wsum;
4494 NAPI_GRO_CB(skb)->csum_valid = 1;
4495
4496 return sum;
4497}
4498EXPORT_SYMBOL(__skb_gro_checksum_complete);
4499
e326bed2 4500/*
855abcf0 4501 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4502 * Note: called with local irq disabled, but exits with local irq enabled.
4503 */
4504static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4505{
4506#ifdef CONFIG_RPS
4507 struct softnet_data *remsd = sd->rps_ipi_list;
4508
4509 if (remsd) {
4510 sd->rps_ipi_list = NULL;
4511
4512 local_irq_enable();
4513
4514 /* Send pending IPI's to kick RPS processing on remote cpus. */
4515 while (remsd) {
4516 struct softnet_data *next = remsd->rps_ipi_next;
4517
4518 if (cpu_online(remsd->cpu))
c46fff2a 4519 smp_call_function_single_async(remsd->cpu,
fce8ad15 4520 &remsd->csd);
e326bed2
ED
4521 remsd = next;
4522 }
4523 } else
4524#endif
4525 local_irq_enable();
4526}
4527
d75b1ade
ED
4528static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4529{
4530#ifdef CONFIG_RPS
4531 return sd->rps_ipi_list != NULL;
4532#else
4533 return false;
4534#endif
4535}
4536
bea3348e 4537static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4538{
4539 int work = 0;
eecfd7c4 4540 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4541
e326bed2
ED
4542 /* Check if we have pending ipi, its better to send them now,
4543 * not waiting net_rx_action() end.
4544 */
d75b1ade 4545 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
4546 local_irq_disable();
4547 net_rps_action_and_irq_enable(sd);
4548 }
d75b1ade 4549
bea3348e 4550 napi->weight = weight_p;
6e7676c1 4551 local_irq_disable();
11ef7a89 4552 while (1) {
1da177e4 4553 struct sk_buff *skb;
6e7676c1
CG
4554
4555 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 4556 rcu_read_lock();
6e7676c1
CG
4557 local_irq_enable();
4558 __netif_receive_skb(skb);
2c17d27c 4559 rcu_read_unlock();
6e7676c1 4560 local_irq_disable();
76cc8b13
TH
4561 input_queue_head_incr(sd);
4562 if (++work >= quota) {
4563 local_irq_enable();
4564 return work;
4565 }
6e7676c1 4566 }
1da177e4 4567
e36fa2f7 4568 rps_lock(sd);
11ef7a89 4569 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
4570 /*
4571 * Inline a custom version of __napi_complete().
4572 * only current cpu owns and manipulates this napi,
11ef7a89
TH
4573 * and NAPI_STATE_SCHED is the only possible flag set
4574 * on backlog.
4575 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
4576 * and we dont need an smp_mb() memory barrier.
4577 */
eecfd7c4 4578 napi->state = 0;
11ef7a89 4579 rps_unlock(sd);
eecfd7c4 4580
11ef7a89 4581 break;
bea3348e 4582 }
11ef7a89
TH
4583
4584 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4585 &sd->process_queue);
e36fa2f7 4586 rps_unlock(sd);
6e7676c1
CG
4587 }
4588 local_irq_enable();
1da177e4 4589
bea3348e
SH
4590 return work;
4591}
1da177e4 4592
bea3348e
SH
4593/**
4594 * __napi_schedule - schedule for receive
c4ea43c5 4595 * @n: entry to schedule
bea3348e 4596 *
bc9ad166
ED
4597 * The entry's receive function will be scheduled to run.
4598 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 4599 */
b5606c2d 4600void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4601{
4602 unsigned long flags;
1da177e4 4603
bea3348e 4604 local_irq_save(flags);
903ceff7 4605 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 4606 local_irq_restore(flags);
1da177e4 4607}
bea3348e
SH
4608EXPORT_SYMBOL(__napi_schedule);
4609
bc9ad166
ED
4610/**
4611 * __napi_schedule_irqoff - schedule for receive
4612 * @n: entry to schedule
4613 *
4614 * Variant of __napi_schedule() assuming hard irqs are masked
4615 */
4616void __napi_schedule_irqoff(struct napi_struct *n)
4617{
4618 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4619}
4620EXPORT_SYMBOL(__napi_schedule_irqoff);
4621
d565b0a1
HX
4622void __napi_complete(struct napi_struct *n)
4623{
4624 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
d565b0a1 4625
d75b1ade 4626 list_del_init(&n->poll_list);
4e857c58 4627 smp_mb__before_atomic();
d565b0a1
HX
4628 clear_bit(NAPI_STATE_SCHED, &n->state);
4629}
4630EXPORT_SYMBOL(__napi_complete);
4631
3b47d303 4632void napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1
HX
4633{
4634 unsigned long flags;
4635
4636 /*
4637 * don't let napi dequeue from the cpu poll list
4638 * just in case its running on a different cpu
4639 */
4640 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4641 return;
4642
3b47d303
ED
4643 if (n->gro_list) {
4644 unsigned long timeout = 0;
d75b1ade 4645
3b47d303
ED
4646 if (work_done)
4647 timeout = n->dev->gro_flush_timeout;
4648
4649 if (timeout)
4650 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4651 HRTIMER_MODE_REL_PINNED);
4652 else
4653 napi_gro_flush(n, false);
4654 }
d75b1ade
ED
4655 if (likely(list_empty(&n->poll_list))) {
4656 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4657 } else {
4658 /* If n->poll_list is not empty, we need to mask irqs */
4659 local_irq_save(flags);
4660 __napi_complete(n);
4661 local_irq_restore(flags);
4662 }
d565b0a1 4663}
3b47d303 4664EXPORT_SYMBOL(napi_complete_done);
d565b0a1 4665
af12fa6e 4666/* must be called under rcu_read_lock(), as we dont take a reference */
02d62e86 4667static struct napi_struct *napi_by_id(unsigned int napi_id)
af12fa6e
ET
4668{
4669 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4670 struct napi_struct *napi;
4671
4672 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4673 if (napi->napi_id == napi_id)
4674 return napi;
4675
4676 return NULL;
4677}
02d62e86
ED
4678
4679#if defined(CONFIG_NET_RX_BUSY_POLL)
ce6aea93 4680#define BUSY_POLL_BUDGET 8
02d62e86
ED
4681bool sk_busy_loop(struct sock *sk, int nonblock)
4682{
4683 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
ce6aea93 4684 int (*busy_poll)(struct napi_struct *dev);
02d62e86
ED
4685 struct napi_struct *napi;
4686 int rc = false;
4687
2a028ecb 4688 rcu_read_lock();
02d62e86
ED
4689
4690 napi = napi_by_id(sk->sk_napi_id);
4691 if (!napi)
4692 goto out;
4693
ce6aea93
ED
4694 /* Note: ndo_busy_poll method is optional in linux-4.5 */
4695 busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
02d62e86
ED
4696
4697 do {
ce6aea93 4698 rc = 0;
2a028ecb 4699 local_bh_disable();
ce6aea93
ED
4700 if (busy_poll) {
4701 rc = busy_poll(napi);
4702 } else if (napi_schedule_prep(napi)) {
4703 void *have = netpoll_poll_lock(napi);
4704
4705 if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
4706 rc = napi->poll(napi, BUSY_POLL_BUDGET);
4707 trace_napi_poll(napi);
4708 if (rc == BUSY_POLL_BUDGET) {
4709 napi_complete_done(napi, rc);
4710 napi_schedule(napi);
4711 }
4712 }
4713 netpoll_poll_unlock(have);
4714 }
2a028ecb
ED
4715 if (rc > 0)
4716 NET_ADD_STATS_BH(sock_net(sk),
4717 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
4718 local_bh_enable();
02d62e86
ED
4719
4720 if (rc == LL_FLUSH_FAILED)
4721 break; /* permanent failure */
4722
02d62e86 4723 cpu_relax();
02d62e86
ED
4724 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
4725 !need_resched() && !busy_loop_timeout(end_time));
4726
4727 rc = !skb_queue_empty(&sk->sk_receive_queue);
4728out:
2a028ecb 4729 rcu_read_unlock();
02d62e86
ED
4730 return rc;
4731}
4732EXPORT_SYMBOL(sk_busy_loop);
4733
4734#endif /* CONFIG_NET_RX_BUSY_POLL */
af12fa6e
ET
4735
4736void napi_hash_add(struct napi_struct *napi)
4737{
52bd2d62
ED
4738 if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
4739 return;
af12fa6e 4740
52bd2d62 4741 spin_lock(&napi_hash_lock);
af12fa6e 4742
52bd2d62
ED
4743 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4744 do {
4745 if (unlikely(++napi_gen_id < NR_CPUS + 1))
4746 napi_gen_id = NR_CPUS + 1;
4747 } while (napi_by_id(napi_gen_id));
4748 napi->napi_id = napi_gen_id;
af12fa6e 4749
52bd2d62
ED
4750 hlist_add_head_rcu(&napi->napi_hash_node,
4751 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
af12fa6e 4752
52bd2d62 4753 spin_unlock(&napi_hash_lock);
af12fa6e
ET
4754}
4755EXPORT_SYMBOL_GPL(napi_hash_add);
4756
4757/* Warning : caller is responsible to make sure rcu grace period
4758 * is respected before freeing memory containing @napi
4759 */
4760void napi_hash_del(struct napi_struct *napi)
4761{
4762 spin_lock(&napi_hash_lock);
4763
4764 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4765 hlist_del_rcu(&napi->napi_hash_node);
4766
4767 spin_unlock(&napi_hash_lock);
4768}
4769EXPORT_SYMBOL_GPL(napi_hash_del);
4770
3b47d303
ED
4771static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4772{
4773 struct napi_struct *napi;
4774
4775 napi = container_of(timer, struct napi_struct, timer);
4776 if (napi->gro_list)
4777 napi_schedule(napi);
4778
4779 return HRTIMER_NORESTART;
4780}
4781
d565b0a1
HX
4782void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4783 int (*poll)(struct napi_struct *, int), int weight)
4784{
4785 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
4786 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4787 napi->timer.function = napi_watchdog;
4ae5544f 4788 napi->gro_count = 0;
d565b0a1 4789 napi->gro_list = NULL;
5d38a079 4790 napi->skb = NULL;
d565b0a1 4791 napi->poll = poll;
82dc3c63
ED
4792 if (weight > NAPI_POLL_WEIGHT)
4793 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4794 weight, dev->name);
d565b0a1
HX
4795 napi->weight = weight;
4796 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 4797 napi->dev = dev;
5d38a079 4798#ifdef CONFIG_NETPOLL
d565b0a1
HX
4799 spin_lock_init(&napi->poll_lock);
4800 napi->poll_owner = -1;
4801#endif
4802 set_bit(NAPI_STATE_SCHED, &napi->state);
4803}
4804EXPORT_SYMBOL(netif_napi_add);
4805
3b47d303
ED
4806void napi_disable(struct napi_struct *n)
4807{
4808 might_sleep();
4809 set_bit(NAPI_STATE_DISABLE, &n->state);
4810
4811 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4812 msleep(1);
2d8bff12
NH
4813 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
4814 msleep(1);
3b47d303
ED
4815
4816 hrtimer_cancel(&n->timer);
4817
4818 clear_bit(NAPI_STATE_DISABLE, &n->state);
4819}
4820EXPORT_SYMBOL(napi_disable);
4821
d565b0a1
HX
4822void netif_napi_del(struct napi_struct *napi)
4823{
d7b06636 4824 list_del_init(&napi->dev_list);
76620aaf 4825 napi_free_frags(napi);
d565b0a1 4826
289dccbe 4827 kfree_skb_list(napi->gro_list);
d565b0a1 4828 napi->gro_list = NULL;
4ae5544f 4829 napi->gro_count = 0;
d565b0a1
HX
4830}
4831EXPORT_SYMBOL(netif_napi_del);
4832
726ce70e
HX
4833static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4834{
4835 void *have;
4836 int work, weight;
4837
4838 list_del_init(&n->poll_list);
4839
4840 have = netpoll_poll_lock(n);
4841
4842 weight = n->weight;
4843
4844 /* This NAPI_STATE_SCHED test is for avoiding a race
4845 * with netpoll's poll_napi(). Only the entity which
4846 * obtains the lock and sees NAPI_STATE_SCHED set will
4847 * actually make the ->poll() call. Therefore we avoid
4848 * accidentally calling ->poll() when NAPI is not scheduled.
4849 */
4850 work = 0;
4851 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4852 work = n->poll(n, weight);
4853 trace_napi_poll(n);
4854 }
4855
4856 WARN_ON_ONCE(work > weight);
4857
4858 if (likely(work < weight))
4859 goto out_unlock;
4860
4861 /* Drivers must not modify the NAPI state if they
4862 * consume the entire weight. In such cases this code
4863 * still "owns" the NAPI instance and therefore can
4864 * move the instance around on the list at-will.
4865 */
4866 if (unlikely(napi_disable_pending(n))) {
4867 napi_complete(n);
4868 goto out_unlock;
4869 }
4870
4871 if (n->gro_list) {
4872 /* flush too old packets
4873 * If HZ < 1000, flush all packets.
4874 */
4875 napi_gro_flush(n, HZ >= 1000);
4876 }
4877
001ce546
HX
4878 /* Some drivers may have called napi_schedule
4879 * prior to exhausting their budget.
4880 */
4881 if (unlikely(!list_empty(&n->poll_list))) {
4882 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4883 n->dev ? n->dev->name : "backlog");
4884 goto out_unlock;
4885 }
4886
726ce70e
HX
4887 list_add_tail(&n->poll_list, repoll);
4888
4889out_unlock:
4890 netpoll_poll_unlock(have);
4891
4892 return work;
4893}
4894
1da177e4
LT
4895static void net_rx_action(struct softirq_action *h)
4896{
903ceff7 4897 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
24f8b238 4898 unsigned long time_limit = jiffies + 2;
51b0bded 4899 int budget = netdev_budget;
d75b1ade
ED
4900 LIST_HEAD(list);
4901 LIST_HEAD(repoll);
53fb95d3 4902
1da177e4 4903 local_irq_disable();
d75b1ade
ED
4904 list_splice_init(&sd->poll_list, &list);
4905 local_irq_enable();
1da177e4 4906
ceb8d5bf 4907 for (;;) {
bea3348e 4908 struct napi_struct *n;
1da177e4 4909
ceb8d5bf
HX
4910 if (list_empty(&list)) {
4911 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4912 return;
4913 break;
4914 }
4915
6bd373eb
HX
4916 n = list_first_entry(&list, struct napi_struct, poll_list);
4917 budget -= napi_poll(n, &repoll);
4918
d75b1ade 4919 /* If softirq window is exhausted then punt.
24f8b238
SH
4920 * Allow this to run for 2 jiffies since which will allow
4921 * an average latency of 1.5/HZ.
bea3348e 4922 */
ceb8d5bf
HX
4923 if (unlikely(budget <= 0 ||
4924 time_after_eq(jiffies, time_limit))) {
4925 sd->time_squeeze++;
4926 break;
4927 }
1da177e4 4928 }
d75b1ade 4929
d75b1ade
ED
4930 local_irq_disable();
4931
4932 list_splice_tail_init(&sd->poll_list, &list);
4933 list_splice_tail(&repoll, &list);
4934 list_splice(&list, &sd->poll_list);
4935 if (!list_empty(&sd->poll_list))
4936 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4937
e326bed2 4938 net_rps_action_and_irq_enable(sd);
1da177e4
LT
4939}
4940
aa9d8560 4941struct netdev_adjacent {
9ff162a8 4942 struct net_device *dev;
5d261913
VF
4943
4944 /* upper master flag, there can only be one master device per list */
9ff162a8 4945 bool master;
5d261913 4946
5d261913
VF
4947 /* counter for the number of times this device was added to us */
4948 u16 ref_nr;
4949
402dae96
VF
4950 /* private field for the users */
4951 void *private;
4952
9ff162a8
JP
4953 struct list_head list;
4954 struct rcu_head rcu;
9ff162a8
JP
4955};
4956
6ea29da1 4957static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 4958 struct list_head *adj_list)
9ff162a8 4959{
5d261913 4960 struct netdev_adjacent *adj;
5d261913 4961
2f268f12 4962 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
4963 if (adj->dev == adj_dev)
4964 return adj;
9ff162a8
JP
4965 }
4966 return NULL;
4967}
4968
4969/**
4970 * netdev_has_upper_dev - Check if device is linked to an upper device
4971 * @dev: device
4972 * @upper_dev: upper device to check
4973 *
4974 * Find out if a device is linked to specified upper device and return true
4975 * in case it is. Note that this checks only immediate upper device,
4976 * not through a complete stack of devices. The caller must hold the RTNL lock.
4977 */
4978bool netdev_has_upper_dev(struct net_device *dev,
4979 struct net_device *upper_dev)
4980{
4981 ASSERT_RTNL();
4982
6ea29da1 4983 return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
4984}
4985EXPORT_SYMBOL(netdev_has_upper_dev);
4986
4987/**
4988 * netdev_has_any_upper_dev - Check if device is linked to some device
4989 * @dev: device
4990 *
4991 * Find out if a device is linked to an upper device and return true in case
4992 * it is. The caller must hold the RTNL lock.
4993 */
1d143d9f 4994static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
4995{
4996 ASSERT_RTNL();
4997
2f268f12 4998 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 4999}
9ff162a8
JP
5000
5001/**
5002 * netdev_master_upper_dev_get - Get master upper device
5003 * @dev: device
5004 *
5005 * Find a master upper device and return pointer to it or NULL in case
5006 * it's not there. The caller must hold the RTNL lock.
5007 */
5008struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5009{
aa9d8560 5010 struct netdev_adjacent *upper;
9ff162a8
JP
5011
5012 ASSERT_RTNL();
5013
2f268f12 5014 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
5015 return NULL;
5016
2f268f12 5017 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 5018 struct netdev_adjacent, list);
9ff162a8
JP
5019 if (likely(upper->master))
5020 return upper->dev;
5021 return NULL;
5022}
5023EXPORT_SYMBOL(netdev_master_upper_dev_get);
5024
b6ccba4c
VF
5025void *netdev_adjacent_get_private(struct list_head *adj_list)
5026{
5027 struct netdev_adjacent *adj;
5028
5029 adj = list_entry(adj_list, struct netdev_adjacent, list);
5030
5031 return adj->private;
5032}
5033EXPORT_SYMBOL(netdev_adjacent_get_private);
5034
44a40855
VY
5035/**
5036 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5037 * @dev: device
5038 * @iter: list_head ** of the current position
5039 *
5040 * Gets the next device from the dev's upper list, starting from iter
5041 * position. The caller must hold RCU read lock.
5042 */
5043struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5044 struct list_head **iter)
5045{
5046 struct netdev_adjacent *upper;
5047
5048 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5049
5050 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5051
5052 if (&upper->list == &dev->adj_list.upper)
5053 return NULL;
5054
5055 *iter = &upper->list;
5056
5057 return upper->dev;
5058}
5059EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5060
31088a11
VF
5061/**
5062 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
5063 * @dev: device
5064 * @iter: list_head ** of the current position
5065 *
5066 * Gets the next device from the dev's upper list, starting from iter
5067 * position. The caller must hold RCU read lock.
5068 */
2f268f12
VF
5069struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
5070 struct list_head **iter)
48311f46
VF
5071{
5072 struct netdev_adjacent *upper;
5073
85328240 5074 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
5075
5076 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5077
2f268f12 5078 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
5079 return NULL;
5080
5081 *iter = &upper->list;
5082
5083 return upper->dev;
5084}
2f268f12 5085EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 5086
31088a11
VF
5087/**
5088 * netdev_lower_get_next_private - Get the next ->private from the
5089 * lower neighbour list
5090 * @dev: device
5091 * @iter: list_head ** of the current position
5092 *
5093 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5094 * list, starting from iter position. The caller must hold either hold the
5095 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 5096 * list will remain unchanged.
31088a11
VF
5097 */
5098void *netdev_lower_get_next_private(struct net_device *dev,
5099 struct list_head **iter)
5100{
5101 struct netdev_adjacent *lower;
5102
5103 lower = list_entry(*iter, struct netdev_adjacent, list);
5104
5105 if (&lower->list == &dev->adj_list.lower)
5106 return NULL;
5107
6859e7df 5108 *iter = lower->list.next;
31088a11
VF
5109
5110 return lower->private;
5111}
5112EXPORT_SYMBOL(netdev_lower_get_next_private);
5113
5114/**
5115 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5116 * lower neighbour list, RCU
5117 * variant
5118 * @dev: device
5119 * @iter: list_head ** of the current position
5120 *
5121 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5122 * list, starting from iter position. The caller must hold RCU read lock.
5123 */
5124void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5125 struct list_head **iter)
5126{
5127 struct netdev_adjacent *lower;
5128
5129 WARN_ON_ONCE(!rcu_read_lock_held());
5130
5131 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5132
5133 if (&lower->list == &dev->adj_list.lower)
5134 return NULL;
5135
6859e7df 5136 *iter = &lower->list;
31088a11
VF
5137
5138 return lower->private;
5139}
5140EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5141
4085ebe8
VY
5142/**
5143 * netdev_lower_get_next - Get the next device from the lower neighbour
5144 * list
5145 * @dev: device
5146 * @iter: list_head ** of the current position
5147 *
5148 * Gets the next netdev_adjacent from the dev's lower neighbour
5149 * list, starting from iter position. The caller must hold RTNL lock or
5150 * its own locking that guarantees that the neighbour lower
b469139e 5151 * list will remain unchanged.
4085ebe8
VY
5152 */
5153void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5154{
5155 struct netdev_adjacent *lower;
5156
5157 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5158
5159 if (&lower->list == &dev->adj_list.lower)
5160 return NULL;
5161
5162 *iter = &lower->list;
5163
5164 return lower->dev;
5165}
5166EXPORT_SYMBOL(netdev_lower_get_next);
5167
e001bfad 5168/**
5169 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5170 * lower neighbour list, RCU
5171 * variant
5172 * @dev: device
5173 *
5174 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5175 * list. The caller must hold RCU read lock.
5176 */
5177void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5178{
5179 struct netdev_adjacent *lower;
5180
5181 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5182 struct netdev_adjacent, list);
5183 if (lower)
5184 return lower->private;
5185 return NULL;
5186}
5187EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5188
9ff162a8
JP
5189/**
5190 * netdev_master_upper_dev_get_rcu - Get master upper device
5191 * @dev: device
5192 *
5193 * Find a master upper device and return pointer to it or NULL in case
5194 * it's not there. The caller must hold the RCU read lock.
5195 */
5196struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5197{
aa9d8560 5198 struct netdev_adjacent *upper;
9ff162a8 5199
2f268f12 5200 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 5201 struct netdev_adjacent, list);
9ff162a8
JP
5202 if (upper && likely(upper->master))
5203 return upper->dev;
5204 return NULL;
5205}
5206EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5207
0a59f3a9 5208static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
5209 struct net_device *adj_dev,
5210 struct list_head *dev_list)
5211{
5212 char linkname[IFNAMSIZ+7];
5213 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5214 "upper_%s" : "lower_%s", adj_dev->name);
5215 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5216 linkname);
5217}
0a59f3a9 5218static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
5219 char *name,
5220 struct list_head *dev_list)
5221{
5222 char linkname[IFNAMSIZ+7];
5223 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5224 "upper_%s" : "lower_%s", name);
5225 sysfs_remove_link(&(dev->dev.kobj), linkname);
5226}
5227
7ce64c79
AF
5228static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5229 struct net_device *adj_dev,
5230 struct list_head *dev_list)
5231{
5232 return (dev_list == &dev->adj_list.upper ||
5233 dev_list == &dev->adj_list.lower) &&
5234 net_eq(dev_net(dev), dev_net(adj_dev));
5235}
3ee32707 5236
5d261913
VF
5237static int __netdev_adjacent_dev_insert(struct net_device *dev,
5238 struct net_device *adj_dev,
7863c054 5239 struct list_head *dev_list,
402dae96 5240 void *private, bool master)
5d261913
VF
5241{
5242 struct netdev_adjacent *adj;
842d67a7 5243 int ret;
5d261913 5244
6ea29da1 5245 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
5246
5247 if (adj) {
5d261913
VF
5248 adj->ref_nr++;
5249 return 0;
5250 }
5251
5252 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5253 if (!adj)
5254 return -ENOMEM;
5255
5256 adj->dev = adj_dev;
5257 adj->master = master;
5d261913 5258 adj->ref_nr = 1;
402dae96 5259 adj->private = private;
5d261913 5260 dev_hold(adj_dev);
2f268f12
VF
5261
5262 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5263 adj_dev->name, dev->name, adj_dev->name);
5d261913 5264
7ce64c79 5265 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 5266 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
5267 if (ret)
5268 goto free_adj;
5269 }
5270
7863c054 5271 /* Ensure that master link is always the first item in list. */
842d67a7
VF
5272 if (master) {
5273 ret = sysfs_create_link(&(dev->dev.kobj),
5274 &(adj_dev->dev.kobj), "master");
5275 if (ret)
5831d66e 5276 goto remove_symlinks;
842d67a7 5277
7863c054 5278 list_add_rcu(&adj->list, dev_list);
842d67a7 5279 } else {
7863c054 5280 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 5281 }
5d261913
VF
5282
5283 return 0;
842d67a7 5284
5831d66e 5285remove_symlinks:
7ce64c79 5286 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5287 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
5288free_adj:
5289 kfree(adj);
974daef7 5290 dev_put(adj_dev);
842d67a7
VF
5291
5292 return ret;
5d261913
VF
5293}
5294
1d143d9f 5295static void __netdev_adjacent_dev_remove(struct net_device *dev,
5296 struct net_device *adj_dev,
5297 struct list_head *dev_list)
5d261913
VF
5298{
5299 struct netdev_adjacent *adj;
5300
6ea29da1 5301 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 5302
2f268f12
VF
5303 if (!adj) {
5304 pr_err("tried to remove device %s from %s\n",
5305 dev->name, adj_dev->name);
5d261913 5306 BUG();
2f268f12 5307 }
5d261913
VF
5308
5309 if (adj->ref_nr > 1) {
2f268f12
VF
5310 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5311 adj->ref_nr-1);
5d261913
VF
5312 adj->ref_nr--;
5313 return;
5314 }
5315
842d67a7
VF
5316 if (adj->master)
5317 sysfs_remove_link(&(dev->dev.kobj), "master");
5318
7ce64c79 5319 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5320 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 5321
5d261913 5322 list_del_rcu(&adj->list);
2f268f12
VF
5323 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5324 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
5325 dev_put(adj_dev);
5326 kfree_rcu(adj, rcu);
5327}
5328
1d143d9f 5329static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5330 struct net_device *upper_dev,
5331 struct list_head *up_list,
5332 struct list_head *down_list,
5333 void *private, bool master)
5d261913
VF
5334{
5335 int ret;
5336
402dae96
VF
5337 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5338 master);
5d261913
VF
5339 if (ret)
5340 return ret;
5341
402dae96
VF
5342 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5343 false);
5d261913 5344 if (ret) {
2f268f12 5345 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
5346 return ret;
5347 }
5348
5349 return 0;
5350}
5351
1d143d9f 5352static int __netdev_adjacent_dev_link(struct net_device *dev,
5353 struct net_device *upper_dev)
5d261913 5354{
2f268f12
VF
5355 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5356 &dev->all_adj_list.upper,
5357 &upper_dev->all_adj_list.lower,
402dae96 5358 NULL, false);
5d261913
VF
5359}
5360
1d143d9f 5361static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5362 struct net_device *upper_dev,
5363 struct list_head *up_list,
5364 struct list_head *down_list)
5d261913 5365{
2f268f12
VF
5366 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5367 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
5368}
5369
1d143d9f 5370static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5371 struct net_device *upper_dev)
5d261913 5372{
2f268f12
VF
5373 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5374 &dev->all_adj_list.upper,
5375 &upper_dev->all_adj_list.lower);
5376}
5377
1d143d9f 5378static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5379 struct net_device *upper_dev,
5380 void *private, bool master)
2f268f12
VF
5381{
5382 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5383
5384 if (ret)
5385 return ret;
5386
5387 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5388 &dev->adj_list.upper,
5389 &upper_dev->adj_list.lower,
402dae96 5390 private, master);
2f268f12
VF
5391 if (ret) {
5392 __netdev_adjacent_dev_unlink(dev, upper_dev);
5393 return ret;
5394 }
5395
5396 return 0;
5d261913
VF
5397}
5398
1d143d9f 5399static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5400 struct net_device *upper_dev)
2f268f12
VF
5401{
5402 __netdev_adjacent_dev_unlink(dev, upper_dev);
5403 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5404 &dev->adj_list.upper,
5405 &upper_dev->adj_list.lower);
5406}
5d261913 5407
9ff162a8 5408static int __netdev_upper_dev_link(struct net_device *dev,
402dae96
VF
5409 struct net_device *upper_dev, bool master,
5410 void *private)
9ff162a8 5411{
0e4ead9d 5412 struct netdev_notifier_changeupper_info changeupper_info;
5d261913
VF
5413 struct netdev_adjacent *i, *j, *to_i, *to_j;
5414 int ret = 0;
9ff162a8
JP
5415
5416 ASSERT_RTNL();
5417
5418 if (dev == upper_dev)
5419 return -EBUSY;
5420
5421 /* To prevent loops, check if dev is not upper device to upper_dev. */
6ea29da1 5422 if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
5423 return -EBUSY;
5424
6ea29da1 5425 if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
9ff162a8
JP
5426 return -EEXIST;
5427
5428 if (master && netdev_master_upper_dev_get(dev))
5429 return -EBUSY;
5430
0e4ead9d
JP
5431 changeupper_info.upper_dev = upper_dev;
5432 changeupper_info.master = master;
5433 changeupper_info.linking = true;
5434
573c7ba0
JP
5435 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5436 &changeupper_info.info);
5437 ret = notifier_to_errno(ret);
5438 if (ret)
5439 return ret;
5440
402dae96
VF
5441 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5442 master);
5d261913
VF
5443 if (ret)
5444 return ret;
9ff162a8 5445
5d261913 5446 /* Now that we linked these devs, make all the upper_dev's
2f268f12 5447 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
5448 * versa, and don't forget the devices itself. All of these
5449 * links are non-neighbours.
5450 */
2f268f12
VF
5451 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5452 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5453 pr_debug("Interlinking %s with %s, non-neighbour\n",
5454 i->dev->name, j->dev->name);
5d261913
VF
5455 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5456 if (ret)
5457 goto rollback_mesh;
5458 }
5459 }
5460
5461 /* add dev to every upper_dev's upper device */
2f268f12
VF
5462 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5463 pr_debug("linking %s's upper device %s with %s\n",
5464 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
5465 ret = __netdev_adjacent_dev_link(dev, i->dev);
5466 if (ret)
5467 goto rollback_upper_mesh;
5468 }
5469
5470 /* add upper_dev to every dev's lower device */
2f268f12
VF
5471 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5472 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5473 i->dev->name, upper_dev->name);
5d261913
VF
5474 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5475 if (ret)
5476 goto rollback_lower_mesh;
5477 }
9ff162a8 5478
0e4ead9d
JP
5479 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5480 &changeupper_info.info);
9ff162a8 5481 return 0;
5d261913
VF
5482
5483rollback_lower_mesh:
5484 to_i = i;
2f268f12 5485 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
5486 if (i == to_i)
5487 break;
5488 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5489 }
5490
5491 i = NULL;
5492
5493rollback_upper_mesh:
5494 to_i = i;
2f268f12 5495 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5496 if (i == to_i)
5497 break;
5498 __netdev_adjacent_dev_unlink(dev, i->dev);
5499 }
5500
5501 i = j = NULL;
5502
5503rollback_mesh:
5504 to_i = i;
5505 to_j = j;
2f268f12
VF
5506 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5507 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5508 if (i == to_i && j == to_j)
5509 break;
5510 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5511 }
5512 if (i == to_i)
5513 break;
5514 }
5515
2f268f12 5516 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5517
5518 return ret;
9ff162a8
JP
5519}
5520
5521/**
5522 * netdev_upper_dev_link - Add a link to the upper device
5523 * @dev: device
5524 * @upper_dev: new upper device
5525 *
5526 * Adds a link to device which is upper to this one. The caller must hold
5527 * the RTNL lock. On a failure a negative errno code is returned.
5528 * On success the reference counts are adjusted and the function
5529 * returns zero.
5530 */
5531int netdev_upper_dev_link(struct net_device *dev,
5532 struct net_device *upper_dev)
5533{
402dae96 5534 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
9ff162a8
JP
5535}
5536EXPORT_SYMBOL(netdev_upper_dev_link);
5537
5538/**
5539 * netdev_master_upper_dev_link - Add a master link to the upper device
5540 * @dev: device
5541 * @upper_dev: new upper device
5542 *
5543 * Adds a link to device which is upper to this one. In this case, only
5544 * one master upper device can be linked, although other non-master devices
5545 * might be linked as well. The caller must hold the RTNL lock.
5546 * On a failure a negative errno code is returned. On success the reference
5547 * counts are adjusted and the function returns zero.
5548 */
5549int netdev_master_upper_dev_link(struct net_device *dev,
5550 struct net_device *upper_dev)
5551{
402dae96 5552 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
9ff162a8
JP
5553}
5554EXPORT_SYMBOL(netdev_master_upper_dev_link);
5555
402dae96
VF
5556int netdev_master_upper_dev_link_private(struct net_device *dev,
5557 struct net_device *upper_dev,
5558 void *private)
5559{
5560 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5561}
5562EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5563
9ff162a8
JP
5564/**
5565 * netdev_upper_dev_unlink - Removes a link to upper device
5566 * @dev: device
5567 * @upper_dev: new upper device
5568 *
5569 * Removes a link to device which is upper to this one. The caller must hold
5570 * the RTNL lock.
5571 */
5572void netdev_upper_dev_unlink(struct net_device *dev,
5573 struct net_device *upper_dev)
5574{
0e4ead9d 5575 struct netdev_notifier_changeupper_info changeupper_info;
5d261913 5576 struct netdev_adjacent *i, *j;
9ff162a8
JP
5577 ASSERT_RTNL();
5578
0e4ead9d
JP
5579 changeupper_info.upper_dev = upper_dev;
5580 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5581 changeupper_info.linking = false;
5582
573c7ba0
JP
5583 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5584 &changeupper_info.info);
5585
2f268f12 5586 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5587
5588 /* Here is the tricky part. We must remove all dev's lower
5589 * devices from all upper_dev's upper devices and vice
5590 * versa, to maintain the graph relationship.
5591 */
2f268f12
VF
5592 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5593 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5594 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5595
5596 /* remove also the devices itself from lower/upper device
5597 * list
5598 */
2f268f12 5599 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
5600 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5601
2f268f12 5602 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5603 __netdev_adjacent_dev_unlink(dev, i->dev);
5604
0e4ead9d
JP
5605 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5606 &changeupper_info.info);
9ff162a8
JP
5607}
5608EXPORT_SYMBOL(netdev_upper_dev_unlink);
5609
61bd3857
MS
5610/**
5611 * netdev_bonding_info_change - Dispatch event about slave change
5612 * @dev: device
4a26e453 5613 * @bonding_info: info to dispatch
61bd3857
MS
5614 *
5615 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5616 * The caller must hold the RTNL lock.
5617 */
5618void netdev_bonding_info_change(struct net_device *dev,
5619 struct netdev_bonding_info *bonding_info)
5620{
5621 struct netdev_notifier_bonding_info info;
5622
5623 memcpy(&info.bonding_info, bonding_info,
5624 sizeof(struct netdev_bonding_info));
5625 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5626 &info.info);
5627}
5628EXPORT_SYMBOL(netdev_bonding_info_change);
5629
2ce1ee17 5630static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
5631{
5632 struct netdev_adjacent *iter;
5633
5634 struct net *net = dev_net(dev);
5635
5636 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5637 if (!net_eq(net,dev_net(iter->dev)))
5638 continue;
5639 netdev_adjacent_sysfs_add(iter->dev, dev,
5640 &iter->dev->adj_list.lower);
5641 netdev_adjacent_sysfs_add(dev, iter->dev,
5642 &dev->adj_list.upper);
5643 }
5644
5645 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5646 if (!net_eq(net,dev_net(iter->dev)))
5647 continue;
5648 netdev_adjacent_sysfs_add(iter->dev, dev,
5649 &iter->dev->adj_list.upper);
5650 netdev_adjacent_sysfs_add(dev, iter->dev,
5651 &dev->adj_list.lower);
5652 }
5653}
5654
2ce1ee17 5655static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
5656{
5657 struct netdev_adjacent *iter;
5658
5659 struct net *net = dev_net(dev);
5660
5661 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5662 if (!net_eq(net,dev_net(iter->dev)))
5663 continue;
5664 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5665 &iter->dev->adj_list.lower);
5666 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5667 &dev->adj_list.upper);
5668 }
5669
5670 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5671 if (!net_eq(net,dev_net(iter->dev)))
5672 continue;
5673 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5674 &iter->dev->adj_list.upper);
5675 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5676 &dev->adj_list.lower);
5677 }
5678}
5679
5bb025fa 5680void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 5681{
5bb025fa 5682 struct netdev_adjacent *iter;
402dae96 5683
4c75431a
AF
5684 struct net *net = dev_net(dev);
5685
5bb025fa 5686 list_for_each_entry(iter, &dev->adj_list.upper, list) {
4c75431a
AF
5687 if (!net_eq(net,dev_net(iter->dev)))
5688 continue;
5bb025fa
VF
5689 netdev_adjacent_sysfs_del(iter->dev, oldname,
5690 &iter->dev->adj_list.lower);
5691 netdev_adjacent_sysfs_add(iter->dev, dev,
5692 &iter->dev->adj_list.lower);
5693 }
402dae96 5694
5bb025fa 5695 list_for_each_entry(iter, &dev->adj_list.lower, list) {
4c75431a
AF
5696 if (!net_eq(net,dev_net(iter->dev)))
5697 continue;
5bb025fa
VF
5698 netdev_adjacent_sysfs_del(iter->dev, oldname,
5699 &iter->dev->adj_list.upper);
5700 netdev_adjacent_sysfs_add(iter->dev, dev,
5701 &iter->dev->adj_list.upper);
5702 }
402dae96 5703}
402dae96
VF
5704
5705void *netdev_lower_dev_get_private(struct net_device *dev,
5706 struct net_device *lower_dev)
5707{
5708 struct netdev_adjacent *lower;
5709
5710 if (!lower_dev)
5711 return NULL;
6ea29da1 5712 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
5713 if (!lower)
5714 return NULL;
5715
5716 return lower->private;
5717}
5718EXPORT_SYMBOL(netdev_lower_dev_get_private);
5719
4085ebe8
VY
5720
5721int dev_get_nest_level(struct net_device *dev,
5722 bool (*type_check)(struct net_device *dev))
5723{
5724 struct net_device *lower = NULL;
5725 struct list_head *iter;
5726 int max_nest = -1;
5727 int nest;
5728
5729 ASSERT_RTNL();
5730
5731 netdev_for_each_lower_dev(dev, lower, iter) {
5732 nest = dev_get_nest_level(lower, type_check);
5733 if (max_nest < nest)
5734 max_nest = nest;
5735 }
5736
5737 if (type_check(dev))
5738 max_nest++;
5739
5740 return max_nest;
5741}
5742EXPORT_SYMBOL(dev_get_nest_level);
5743
b6c40d68
PM
5744static void dev_change_rx_flags(struct net_device *dev, int flags)
5745{
d314774c
SH
5746 const struct net_device_ops *ops = dev->netdev_ops;
5747
d2615bf4 5748 if (ops->ndo_change_rx_flags)
d314774c 5749 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
5750}
5751
991fb3f7 5752static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 5753{
b536db93 5754 unsigned int old_flags = dev->flags;
d04a48b0
EB
5755 kuid_t uid;
5756 kgid_t gid;
1da177e4 5757
24023451
PM
5758 ASSERT_RTNL();
5759
dad9b335
WC
5760 dev->flags |= IFF_PROMISC;
5761 dev->promiscuity += inc;
5762 if (dev->promiscuity == 0) {
5763 /*
5764 * Avoid overflow.
5765 * If inc causes overflow, untouch promisc and return error.
5766 */
5767 if (inc < 0)
5768 dev->flags &= ~IFF_PROMISC;
5769 else {
5770 dev->promiscuity -= inc;
7b6cd1ce
JP
5771 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5772 dev->name);
dad9b335
WC
5773 return -EOVERFLOW;
5774 }
5775 }
52609c0b 5776 if (dev->flags != old_flags) {
7b6cd1ce
JP
5777 pr_info("device %s %s promiscuous mode\n",
5778 dev->name,
5779 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
5780 if (audit_enabled) {
5781 current_uid_gid(&uid, &gid);
7759db82
KHK
5782 audit_log(current->audit_context, GFP_ATOMIC,
5783 AUDIT_ANOM_PROMISCUOUS,
5784 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5785 dev->name, (dev->flags & IFF_PROMISC),
5786 (old_flags & IFF_PROMISC),
e1760bd5 5787 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
5788 from_kuid(&init_user_ns, uid),
5789 from_kgid(&init_user_ns, gid),
7759db82 5790 audit_get_sessionid(current));
8192b0c4 5791 }
24023451 5792
b6c40d68 5793 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 5794 }
991fb3f7
ND
5795 if (notify)
5796 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 5797 return 0;
1da177e4
LT
5798}
5799
4417da66
PM
5800/**
5801 * dev_set_promiscuity - update promiscuity count on a device
5802 * @dev: device
5803 * @inc: modifier
5804 *
5805 * Add or remove promiscuity from a device. While the count in the device
5806 * remains above zero the interface remains promiscuous. Once it hits zero
5807 * the device reverts back to normal filtering operation. A negative inc
5808 * value is used to drop promiscuity on the device.
dad9b335 5809 * Return 0 if successful or a negative errno code on error.
4417da66 5810 */
dad9b335 5811int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 5812{
b536db93 5813 unsigned int old_flags = dev->flags;
dad9b335 5814 int err;
4417da66 5815
991fb3f7 5816 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 5817 if (err < 0)
dad9b335 5818 return err;
4417da66
PM
5819 if (dev->flags != old_flags)
5820 dev_set_rx_mode(dev);
dad9b335 5821 return err;
4417da66 5822}
d1b19dff 5823EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 5824
991fb3f7 5825static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 5826{
991fb3f7 5827 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 5828
24023451
PM
5829 ASSERT_RTNL();
5830
1da177e4 5831 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
5832 dev->allmulti += inc;
5833 if (dev->allmulti == 0) {
5834 /*
5835 * Avoid overflow.
5836 * If inc causes overflow, untouch allmulti and return error.
5837 */
5838 if (inc < 0)
5839 dev->flags &= ~IFF_ALLMULTI;
5840 else {
5841 dev->allmulti -= inc;
7b6cd1ce
JP
5842 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5843 dev->name);
dad9b335
WC
5844 return -EOVERFLOW;
5845 }
5846 }
24023451 5847 if (dev->flags ^ old_flags) {
b6c40d68 5848 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 5849 dev_set_rx_mode(dev);
991fb3f7
ND
5850 if (notify)
5851 __dev_notify_flags(dev, old_flags,
5852 dev->gflags ^ old_gflags);
24023451 5853 }
dad9b335 5854 return 0;
4417da66 5855}
991fb3f7
ND
5856
5857/**
5858 * dev_set_allmulti - update allmulti count on a device
5859 * @dev: device
5860 * @inc: modifier
5861 *
5862 * Add or remove reception of all multicast frames to a device. While the
5863 * count in the device remains above zero the interface remains listening
5864 * to all interfaces. Once it hits zero the device reverts back to normal
5865 * filtering operation. A negative @inc value is used to drop the counter
5866 * when releasing a resource needing all multicasts.
5867 * Return 0 if successful or a negative errno code on error.
5868 */
5869
5870int dev_set_allmulti(struct net_device *dev, int inc)
5871{
5872 return __dev_set_allmulti(dev, inc, true);
5873}
d1b19dff 5874EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
5875
5876/*
5877 * Upload unicast and multicast address lists to device and
5878 * configure RX filtering. When the device doesn't support unicast
53ccaae1 5879 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
5880 * are present.
5881 */
5882void __dev_set_rx_mode(struct net_device *dev)
5883{
d314774c
SH
5884 const struct net_device_ops *ops = dev->netdev_ops;
5885
4417da66
PM
5886 /* dev_open will call this function so the list will stay sane. */
5887 if (!(dev->flags&IFF_UP))
5888 return;
5889
5890 if (!netif_device_present(dev))
40b77c94 5891 return;
4417da66 5892
01789349 5893 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
5894 /* Unicast addresses changes may only happen under the rtnl,
5895 * therefore calling __dev_set_promiscuity here is safe.
5896 */
32e7bfc4 5897 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 5898 __dev_set_promiscuity(dev, 1, false);
2d348d1f 5899 dev->uc_promisc = true;
32e7bfc4 5900 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 5901 __dev_set_promiscuity(dev, -1, false);
2d348d1f 5902 dev->uc_promisc = false;
4417da66 5903 }
4417da66 5904 }
01789349
JP
5905
5906 if (ops->ndo_set_rx_mode)
5907 ops->ndo_set_rx_mode(dev);
4417da66
PM
5908}
5909
5910void dev_set_rx_mode(struct net_device *dev)
5911{
b9e40857 5912 netif_addr_lock_bh(dev);
4417da66 5913 __dev_set_rx_mode(dev);
b9e40857 5914 netif_addr_unlock_bh(dev);
1da177e4
LT
5915}
5916
f0db275a
SH
5917/**
5918 * dev_get_flags - get flags reported to userspace
5919 * @dev: device
5920 *
5921 * Get the combination of flag bits exported through APIs to userspace.
5922 */
95c96174 5923unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 5924{
95c96174 5925 unsigned int flags;
1da177e4
LT
5926
5927 flags = (dev->flags & ~(IFF_PROMISC |
5928 IFF_ALLMULTI |
b00055aa
SR
5929 IFF_RUNNING |
5930 IFF_LOWER_UP |
5931 IFF_DORMANT)) |
1da177e4
LT
5932 (dev->gflags & (IFF_PROMISC |
5933 IFF_ALLMULTI));
5934
b00055aa
SR
5935 if (netif_running(dev)) {
5936 if (netif_oper_up(dev))
5937 flags |= IFF_RUNNING;
5938 if (netif_carrier_ok(dev))
5939 flags |= IFF_LOWER_UP;
5940 if (netif_dormant(dev))
5941 flags |= IFF_DORMANT;
5942 }
1da177e4
LT
5943
5944 return flags;
5945}
d1b19dff 5946EXPORT_SYMBOL(dev_get_flags);
1da177e4 5947
bd380811 5948int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 5949{
b536db93 5950 unsigned int old_flags = dev->flags;
bd380811 5951 int ret;
1da177e4 5952
24023451
PM
5953 ASSERT_RTNL();
5954
1da177e4
LT
5955 /*
5956 * Set the flags on our device.
5957 */
5958
5959 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5960 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5961 IFF_AUTOMEDIA)) |
5962 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5963 IFF_ALLMULTI));
5964
5965 /*
5966 * Load in the correct multicast list now the flags have changed.
5967 */
5968
b6c40d68
PM
5969 if ((old_flags ^ flags) & IFF_MULTICAST)
5970 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 5971
4417da66 5972 dev_set_rx_mode(dev);
1da177e4
LT
5973
5974 /*
5975 * Have we downed the interface. We handle IFF_UP ourselves
5976 * according to user attempts to set it, rather than blindly
5977 * setting it.
5978 */
5979
5980 ret = 0;
d215d10f 5981 if ((old_flags ^ flags) & IFF_UP)
bd380811 5982 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4 5983
1da177e4 5984 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 5985 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 5986 unsigned int old_flags = dev->flags;
d1b19dff 5987
1da177e4 5988 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
5989
5990 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5991 if (dev->flags != old_flags)
5992 dev_set_rx_mode(dev);
1da177e4
LT
5993 }
5994
5995 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5996 is important. Some (broken) drivers set IFF_PROMISC, when
5997 IFF_ALLMULTI is requested not asking us and not reporting.
5998 */
5999 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
6000 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6001
1da177e4 6002 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 6003 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
6004 }
6005
bd380811
PM
6006 return ret;
6007}
6008
a528c219
ND
6009void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6010 unsigned int gchanges)
bd380811
PM
6011{
6012 unsigned int changes = dev->flags ^ old_flags;
6013
a528c219 6014 if (gchanges)
7f294054 6015 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 6016
bd380811
PM
6017 if (changes & IFF_UP) {
6018 if (dev->flags & IFF_UP)
6019 call_netdevice_notifiers(NETDEV_UP, dev);
6020 else
6021 call_netdevice_notifiers(NETDEV_DOWN, dev);
6022 }
6023
6024 if (dev->flags & IFF_UP &&
be9efd36
JP
6025 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6026 struct netdev_notifier_change_info change_info;
6027
6028 change_info.flags_changed = changes;
6029 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6030 &change_info.info);
6031 }
bd380811
PM
6032}
6033
6034/**
6035 * dev_change_flags - change device settings
6036 * @dev: device
6037 * @flags: device state flags
6038 *
6039 * Change settings on device based state flags. The flags are
6040 * in the userspace exported format.
6041 */
b536db93 6042int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 6043{
b536db93 6044 int ret;
991fb3f7 6045 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
6046
6047 ret = __dev_change_flags(dev, flags);
6048 if (ret < 0)
6049 return ret;
6050
991fb3f7 6051 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 6052 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
6053 return ret;
6054}
d1b19dff 6055EXPORT_SYMBOL(dev_change_flags);
1da177e4 6056
2315dc91
VF
6057static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6058{
6059 const struct net_device_ops *ops = dev->netdev_ops;
6060
6061 if (ops->ndo_change_mtu)
6062 return ops->ndo_change_mtu(dev, new_mtu);
6063
6064 dev->mtu = new_mtu;
6065 return 0;
6066}
6067
f0db275a
SH
6068/**
6069 * dev_set_mtu - Change maximum transfer unit
6070 * @dev: device
6071 * @new_mtu: new transfer unit
6072 *
6073 * Change the maximum transfer size of the network device.
6074 */
1da177e4
LT
6075int dev_set_mtu(struct net_device *dev, int new_mtu)
6076{
2315dc91 6077 int err, orig_mtu;
1da177e4
LT
6078
6079 if (new_mtu == dev->mtu)
6080 return 0;
6081
6082 /* MTU must be positive. */
6083 if (new_mtu < 0)
6084 return -EINVAL;
6085
6086 if (!netif_device_present(dev))
6087 return -ENODEV;
6088
1d486bfb
VF
6089 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6090 err = notifier_to_errno(err);
6091 if (err)
6092 return err;
d314774c 6093
2315dc91
VF
6094 orig_mtu = dev->mtu;
6095 err = __dev_set_mtu(dev, new_mtu);
d314774c 6096
2315dc91
VF
6097 if (!err) {
6098 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6099 err = notifier_to_errno(err);
6100 if (err) {
6101 /* setting mtu back and notifying everyone again,
6102 * so that they have a chance to revert changes.
6103 */
6104 __dev_set_mtu(dev, orig_mtu);
6105 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6106 }
6107 }
1da177e4
LT
6108 return err;
6109}
d1b19dff 6110EXPORT_SYMBOL(dev_set_mtu);
1da177e4 6111
cbda10fa
VD
6112/**
6113 * dev_set_group - Change group this device belongs to
6114 * @dev: device
6115 * @new_group: group this device should belong to
6116 */
6117void dev_set_group(struct net_device *dev, int new_group)
6118{
6119 dev->group = new_group;
6120}
6121EXPORT_SYMBOL(dev_set_group);
6122
f0db275a
SH
6123/**
6124 * dev_set_mac_address - Change Media Access Control Address
6125 * @dev: device
6126 * @sa: new address
6127 *
6128 * Change the hardware (MAC) address of the device
6129 */
1da177e4
LT
6130int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6131{
d314774c 6132 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
6133 int err;
6134
d314774c 6135 if (!ops->ndo_set_mac_address)
1da177e4
LT
6136 return -EOPNOTSUPP;
6137 if (sa->sa_family != dev->type)
6138 return -EINVAL;
6139 if (!netif_device_present(dev))
6140 return -ENODEV;
d314774c 6141 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
6142 if (err)
6143 return err;
fbdeca2d 6144 dev->addr_assign_type = NET_ADDR_SET;
f6521516 6145 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 6146 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 6147 return 0;
1da177e4 6148}
d1b19dff 6149EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 6150
4bf84c35
JP
6151/**
6152 * dev_change_carrier - Change device carrier
6153 * @dev: device
691b3b7e 6154 * @new_carrier: new value
4bf84c35
JP
6155 *
6156 * Change device carrier
6157 */
6158int dev_change_carrier(struct net_device *dev, bool new_carrier)
6159{
6160 const struct net_device_ops *ops = dev->netdev_ops;
6161
6162 if (!ops->ndo_change_carrier)
6163 return -EOPNOTSUPP;
6164 if (!netif_device_present(dev))
6165 return -ENODEV;
6166 return ops->ndo_change_carrier(dev, new_carrier);
6167}
6168EXPORT_SYMBOL(dev_change_carrier);
6169
66b52b0d
JP
6170/**
6171 * dev_get_phys_port_id - Get device physical port ID
6172 * @dev: device
6173 * @ppid: port ID
6174 *
6175 * Get device physical port ID
6176 */
6177int dev_get_phys_port_id(struct net_device *dev,
02637fce 6178 struct netdev_phys_item_id *ppid)
66b52b0d
JP
6179{
6180 const struct net_device_ops *ops = dev->netdev_ops;
6181
6182 if (!ops->ndo_get_phys_port_id)
6183 return -EOPNOTSUPP;
6184 return ops->ndo_get_phys_port_id(dev, ppid);
6185}
6186EXPORT_SYMBOL(dev_get_phys_port_id);
6187
db24a904
DA
6188/**
6189 * dev_get_phys_port_name - Get device physical port name
6190 * @dev: device
6191 * @name: port name
6192 *
6193 * Get device physical port name
6194 */
6195int dev_get_phys_port_name(struct net_device *dev,
6196 char *name, size_t len)
6197{
6198 const struct net_device_ops *ops = dev->netdev_ops;
6199
6200 if (!ops->ndo_get_phys_port_name)
6201 return -EOPNOTSUPP;
6202 return ops->ndo_get_phys_port_name(dev, name, len);
6203}
6204EXPORT_SYMBOL(dev_get_phys_port_name);
6205
d746d707
AK
6206/**
6207 * dev_change_proto_down - update protocol port state information
6208 * @dev: device
6209 * @proto_down: new value
6210 *
6211 * This info can be used by switch drivers to set the phys state of the
6212 * port.
6213 */
6214int dev_change_proto_down(struct net_device *dev, bool proto_down)
6215{
6216 const struct net_device_ops *ops = dev->netdev_ops;
6217
6218 if (!ops->ndo_change_proto_down)
6219 return -EOPNOTSUPP;
6220 if (!netif_device_present(dev))
6221 return -ENODEV;
6222 return ops->ndo_change_proto_down(dev, proto_down);
6223}
6224EXPORT_SYMBOL(dev_change_proto_down);
6225
1da177e4
LT
6226/**
6227 * dev_new_index - allocate an ifindex
c4ea43c5 6228 * @net: the applicable net namespace
1da177e4
LT
6229 *
6230 * Returns a suitable unique value for a new device interface
6231 * number. The caller must hold the rtnl semaphore or the
6232 * dev_base_lock to be sure it remains unique.
6233 */
881d966b 6234static int dev_new_index(struct net *net)
1da177e4 6235{
aa79e66e 6236 int ifindex = net->ifindex;
1da177e4
LT
6237 for (;;) {
6238 if (++ifindex <= 0)
6239 ifindex = 1;
881d966b 6240 if (!__dev_get_by_index(net, ifindex))
aa79e66e 6241 return net->ifindex = ifindex;
1da177e4
LT
6242 }
6243}
6244
1da177e4 6245/* Delayed registration/unregisteration */
3b5b34fd 6246static LIST_HEAD(net_todo_list);
200b916f 6247DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 6248
6f05f629 6249static void net_set_todo(struct net_device *dev)
1da177e4 6250{
1da177e4 6251 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 6252 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
6253}
6254
9b5e383c 6255static void rollback_registered_many(struct list_head *head)
93ee31f1 6256{
e93737b0 6257 struct net_device *dev, *tmp;
5cde2829 6258 LIST_HEAD(close_head);
9b5e383c 6259
93ee31f1
DL
6260 BUG_ON(dev_boot_phase);
6261 ASSERT_RTNL();
6262
e93737b0 6263 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 6264 /* Some devices call without registering
e93737b0
KK
6265 * for initialization unwind. Remove those
6266 * devices and proceed with the remaining.
9b5e383c
ED
6267 */
6268 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
6269 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6270 dev->name, dev);
93ee31f1 6271
9b5e383c 6272 WARN_ON(1);
e93737b0
KK
6273 list_del(&dev->unreg_list);
6274 continue;
9b5e383c 6275 }
449f4544 6276 dev->dismantle = true;
9b5e383c 6277 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 6278 }
93ee31f1 6279
44345724 6280 /* If device is running, close it first. */
5cde2829
EB
6281 list_for_each_entry(dev, head, unreg_list)
6282 list_add_tail(&dev->close_list, &close_head);
99c4a26a 6283 dev_close_many(&close_head, true);
93ee31f1 6284
44345724 6285 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
6286 /* And unlink it from device chain. */
6287 unlist_netdevice(dev);
93ee31f1 6288
9b5e383c 6289 dev->reg_state = NETREG_UNREGISTERING;
e9e4dd32 6290 on_each_cpu(flush_backlog, dev, 1);
9b5e383c 6291 }
93ee31f1
DL
6292
6293 synchronize_net();
6294
9b5e383c 6295 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
6296 struct sk_buff *skb = NULL;
6297
9b5e383c
ED
6298 /* Shutdown queueing discipline. */
6299 dev_shutdown(dev);
93ee31f1
DL
6300
6301
9b5e383c
ED
6302 /* Notify protocols, that we are about to destroy
6303 this device. They should clean all the things.
6304 */
6305 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 6306
395eea6c
MB
6307 if (!dev->rtnl_link_ops ||
6308 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6309 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6310 GFP_KERNEL);
6311
9b5e383c
ED
6312 /*
6313 * Flush the unicast and multicast chains
6314 */
a748ee24 6315 dev_uc_flush(dev);
22bedad3 6316 dev_mc_flush(dev);
93ee31f1 6317
9b5e383c
ED
6318 if (dev->netdev_ops->ndo_uninit)
6319 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 6320
395eea6c
MB
6321 if (skb)
6322 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 6323
9ff162a8
JP
6324 /* Notifier chain MUST detach us all upper devices. */
6325 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 6326
9b5e383c
ED
6327 /* Remove entries from kobject tree */
6328 netdev_unregister_kobject(dev);
024e9679
AD
6329#ifdef CONFIG_XPS
6330 /* Remove XPS queueing entries */
6331 netif_reset_xps_queues_gt(dev, 0);
6332#endif
9b5e383c 6333 }
93ee31f1 6334
850a545b 6335 synchronize_net();
395264d5 6336
a5ee1551 6337 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
6338 dev_put(dev);
6339}
6340
6341static void rollback_registered(struct net_device *dev)
6342{
6343 LIST_HEAD(single);
6344
6345 list_add(&dev->unreg_list, &single);
6346 rollback_registered_many(&single);
ceaaec98 6347 list_del(&single);
93ee31f1
DL
6348}
6349
fd867d51
JW
6350static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
6351 struct net_device *upper, netdev_features_t features)
6352{
6353 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6354 netdev_features_t feature;
5ba3f7d6 6355 int feature_bit;
fd867d51 6356
5ba3f7d6
JW
6357 for_each_netdev_feature(&upper_disables, feature_bit) {
6358 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
6359 if (!(upper->wanted_features & feature)
6360 && (features & feature)) {
6361 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
6362 &feature, upper->name);
6363 features &= ~feature;
6364 }
6365 }
6366
6367 return features;
6368}
6369
6370static void netdev_sync_lower_features(struct net_device *upper,
6371 struct net_device *lower, netdev_features_t features)
6372{
6373 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6374 netdev_features_t feature;
5ba3f7d6 6375 int feature_bit;
fd867d51 6376
5ba3f7d6
JW
6377 for_each_netdev_feature(&upper_disables, feature_bit) {
6378 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
6379 if (!(features & feature) && (lower->features & feature)) {
6380 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
6381 &feature, lower->name);
6382 lower->wanted_features &= ~feature;
6383 netdev_update_features(lower);
6384
6385 if (unlikely(lower->features & feature))
6386 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
6387 &feature, lower->name);
6388 }
6389 }
6390}
6391
c8f44aff
MM
6392static netdev_features_t netdev_fix_features(struct net_device *dev,
6393 netdev_features_t features)
b63365a2 6394{
57422dc5
MM
6395 /* Fix illegal checksum combinations */
6396 if ((features & NETIF_F_HW_CSUM) &&
6397 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6398 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
6399 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6400 }
6401
b63365a2 6402 /* TSO requires that SG is present as well. */
ea2d3688 6403 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 6404 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 6405 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
6406 }
6407
ec5f0615
PS
6408 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6409 !(features & NETIF_F_IP_CSUM)) {
6410 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6411 features &= ~NETIF_F_TSO;
6412 features &= ~NETIF_F_TSO_ECN;
6413 }
6414
6415 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6416 !(features & NETIF_F_IPV6_CSUM)) {
6417 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6418 features &= ~NETIF_F_TSO6;
6419 }
6420
31d8b9e0
BH
6421 /* TSO ECN requires that TSO is present as well. */
6422 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6423 features &= ~NETIF_F_TSO_ECN;
6424
212b573f
MM
6425 /* Software GSO depends on SG. */
6426 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 6427 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
6428 features &= ~NETIF_F_GSO;
6429 }
6430
acd1130e 6431 /* UFO needs SG and checksumming */
b63365a2 6432 if (features & NETIF_F_UFO) {
79032644
MM
6433 /* maybe split UFO into V4 and V6? */
6434 if (!((features & NETIF_F_GEN_CSUM) ||
6435 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6436 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6437 netdev_dbg(dev,
acd1130e 6438 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
6439 features &= ~NETIF_F_UFO;
6440 }
6441
6442 if (!(features & NETIF_F_SG)) {
6f404e44 6443 netdev_dbg(dev,
acd1130e 6444 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
6445 features &= ~NETIF_F_UFO;
6446 }
6447 }
6448
d0290214
JP
6449#ifdef CONFIG_NET_RX_BUSY_POLL
6450 if (dev->netdev_ops->ndo_busy_poll)
6451 features |= NETIF_F_BUSY_POLL;
6452 else
6453#endif
6454 features &= ~NETIF_F_BUSY_POLL;
6455
b63365a2
HX
6456 return features;
6457}
b63365a2 6458
6cb6a27c 6459int __netdev_update_features(struct net_device *dev)
5455c699 6460{
fd867d51 6461 struct net_device *upper, *lower;
c8f44aff 6462 netdev_features_t features;
fd867d51 6463 struct list_head *iter;
e7868a85 6464 int err = -1;
5455c699 6465
87267485
MM
6466 ASSERT_RTNL();
6467
5455c699
MM
6468 features = netdev_get_wanted_features(dev);
6469
6470 if (dev->netdev_ops->ndo_fix_features)
6471 features = dev->netdev_ops->ndo_fix_features(dev, features);
6472
6473 /* driver might be less strict about feature dependencies */
6474 features = netdev_fix_features(dev, features);
6475
fd867d51
JW
6476 /* some features can't be enabled if they're off an an upper device */
6477 netdev_for_each_upper_dev_rcu(dev, upper, iter)
6478 features = netdev_sync_upper_features(dev, upper, features);
6479
5455c699 6480 if (dev->features == features)
e7868a85 6481 goto sync_lower;
5455c699 6482
c8f44aff
MM
6483 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6484 &dev->features, &features);
5455c699
MM
6485
6486 if (dev->netdev_ops->ndo_set_features)
6487 err = dev->netdev_ops->ndo_set_features(dev, features);
5f8dc33e
NA
6488 else
6489 err = 0;
5455c699 6490
6cb6a27c 6491 if (unlikely(err < 0)) {
5455c699 6492 netdev_err(dev,
c8f44aff
MM
6493 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6494 err, &features, &dev->features);
17b85d29
NA
6495 /* return non-0 since some features might have changed and
6496 * it's better to fire a spurious notification than miss it
6497 */
6498 return -1;
6cb6a27c
MM
6499 }
6500
e7868a85 6501sync_lower:
fd867d51
JW
6502 /* some features must be disabled on lower devices when disabled
6503 * on an upper device (think: bonding master or bridge)
6504 */
6505 netdev_for_each_lower_dev(dev, lower, iter)
6506 netdev_sync_lower_features(dev, lower, features);
6507
6cb6a27c
MM
6508 if (!err)
6509 dev->features = features;
6510
e7868a85 6511 return err < 0 ? 0 : 1;
6cb6a27c
MM
6512}
6513
afe12cc8
MM
6514/**
6515 * netdev_update_features - recalculate device features
6516 * @dev: the device to check
6517 *
6518 * Recalculate dev->features set and send notifications if it
6519 * has changed. Should be called after driver or hardware dependent
6520 * conditions might have changed that influence the features.
6521 */
6cb6a27c
MM
6522void netdev_update_features(struct net_device *dev)
6523{
6524 if (__netdev_update_features(dev))
6525 netdev_features_change(dev);
5455c699
MM
6526}
6527EXPORT_SYMBOL(netdev_update_features);
6528
afe12cc8
MM
6529/**
6530 * netdev_change_features - recalculate device features
6531 * @dev: the device to check
6532 *
6533 * Recalculate dev->features set and send notifications even
6534 * if they have not changed. Should be called instead of
6535 * netdev_update_features() if also dev->vlan_features might
6536 * have changed to allow the changes to be propagated to stacked
6537 * VLAN devices.
6538 */
6539void netdev_change_features(struct net_device *dev)
6540{
6541 __netdev_update_features(dev);
6542 netdev_features_change(dev);
6543}
6544EXPORT_SYMBOL(netdev_change_features);
6545
fc4a7489
PM
6546/**
6547 * netif_stacked_transfer_operstate - transfer operstate
6548 * @rootdev: the root or lower level device to transfer state from
6549 * @dev: the device to transfer operstate to
6550 *
6551 * Transfer operational state from root to device. This is normally
6552 * called when a stacking relationship exists between the root
6553 * device and the device(a leaf device).
6554 */
6555void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6556 struct net_device *dev)
6557{
6558 if (rootdev->operstate == IF_OPER_DORMANT)
6559 netif_dormant_on(dev);
6560 else
6561 netif_dormant_off(dev);
6562
6563 if (netif_carrier_ok(rootdev)) {
6564 if (!netif_carrier_ok(dev))
6565 netif_carrier_on(dev);
6566 } else {
6567 if (netif_carrier_ok(dev))
6568 netif_carrier_off(dev);
6569 }
6570}
6571EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6572
a953be53 6573#ifdef CONFIG_SYSFS
1b4bf461
ED
6574static int netif_alloc_rx_queues(struct net_device *dev)
6575{
1b4bf461 6576 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 6577 struct netdev_rx_queue *rx;
10595902 6578 size_t sz = count * sizeof(*rx);
1b4bf461 6579
bd25fa7b 6580 BUG_ON(count < 1);
1b4bf461 6581
10595902
PG
6582 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6583 if (!rx) {
6584 rx = vzalloc(sz);
6585 if (!rx)
6586 return -ENOMEM;
6587 }
bd25fa7b
TH
6588 dev->_rx = rx;
6589
bd25fa7b 6590 for (i = 0; i < count; i++)
fe822240 6591 rx[i].dev = dev;
1b4bf461
ED
6592 return 0;
6593}
bf264145 6594#endif
1b4bf461 6595
aa942104
CG
6596static void netdev_init_one_queue(struct net_device *dev,
6597 struct netdev_queue *queue, void *_unused)
6598{
6599 /* Initialize queue lock */
6600 spin_lock_init(&queue->_xmit_lock);
6601 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6602 queue->xmit_lock_owner = -1;
b236da69 6603 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 6604 queue->dev = dev;
114cf580
TH
6605#ifdef CONFIG_BQL
6606 dql_init(&queue->dql, HZ);
6607#endif
aa942104
CG
6608}
6609
60877a32
ED
6610static void netif_free_tx_queues(struct net_device *dev)
6611{
4cb28970 6612 kvfree(dev->_tx);
60877a32
ED
6613}
6614
e6484930
TH
6615static int netif_alloc_netdev_queues(struct net_device *dev)
6616{
6617 unsigned int count = dev->num_tx_queues;
6618 struct netdev_queue *tx;
60877a32 6619 size_t sz = count * sizeof(*tx);
e6484930 6620
d339727c
ED
6621 if (count < 1 || count > 0xffff)
6622 return -EINVAL;
62b5942a 6623
60877a32
ED
6624 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6625 if (!tx) {
6626 tx = vzalloc(sz);
6627 if (!tx)
6628 return -ENOMEM;
6629 }
e6484930 6630 dev->_tx = tx;
1d24eb48 6631
e6484930
TH
6632 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6633 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
6634
6635 return 0;
e6484930
TH
6636}
6637
a2029240
DV
6638void netif_tx_stop_all_queues(struct net_device *dev)
6639{
6640 unsigned int i;
6641
6642 for (i = 0; i < dev->num_tx_queues; i++) {
6643 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6644 netif_tx_stop_queue(txq);
6645 }
6646}
6647EXPORT_SYMBOL(netif_tx_stop_all_queues);
6648
1da177e4
LT
6649/**
6650 * register_netdevice - register a network device
6651 * @dev: device to register
6652 *
6653 * Take a completed network device structure and add it to the kernel
6654 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6655 * chain. 0 is returned on success. A negative errno code is returned
6656 * on a failure to set up the device, or if the name is a duplicate.
6657 *
6658 * Callers must hold the rtnl semaphore. You may want
6659 * register_netdev() instead of this.
6660 *
6661 * BUGS:
6662 * The locking appears insufficient to guarantee two parallel registers
6663 * will not get the same name.
6664 */
6665
6666int register_netdevice(struct net_device *dev)
6667{
1da177e4 6668 int ret;
d314774c 6669 struct net *net = dev_net(dev);
1da177e4
LT
6670
6671 BUG_ON(dev_boot_phase);
6672 ASSERT_RTNL();
6673
b17a7c17
SH
6674 might_sleep();
6675
1da177e4
LT
6676 /* When net_device's are persistent, this will be fatal. */
6677 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 6678 BUG_ON(!net);
1da177e4 6679
f1f28aa3 6680 spin_lock_init(&dev->addr_list_lock);
cf508b12 6681 netdev_set_addr_lockdep_class(dev);
1da177e4 6682
828de4f6 6683 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
6684 if (ret < 0)
6685 goto out;
6686
1da177e4 6687 /* Init, if this function is available */
d314774c
SH
6688 if (dev->netdev_ops->ndo_init) {
6689 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
6690 if (ret) {
6691 if (ret > 0)
6692 ret = -EIO;
90833aa4 6693 goto out;
1da177e4
LT
6694 }
6695 }
4ec93edb 6696
f646968f
PM
6697 if (((dev->hw_features | dev->features) &
6698 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
6699 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6700 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6701 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6702 ret = -EINVAL;
6703 goto err_uninit;
6704 }
6705
9c7dafbf
PE
6706 ret = -EBUSY;
6707 if (!dev->ifindex)
6708 dev->ifindex = dev_new_index(net);
6709 else if (__dev_get_by_index(net, dev->ifindex))
6710 goto err_uninit;
6711
5455c699
MM
6712 /* Transfer changeable features to wanted_features and enable
6713 * software offloads (GSO and GRO).
6714 */
6715 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
6716 dev->features |= NETIF_F_SOFT_FEATURES;
6717 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 6718
34324dc2
MM
6719 if (!(dev->flags & IFF_LOOPBACK)) {
6720 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
6721 }
6722
1180e7d6 6723 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 6724 */
1180e7d6 6725 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 6726
ee579677
PS
6727 /* Make NETIF_F_SG inheritable to tunnel devices.
6728 */
6729 dev->hw_enc_features |= NETIF_F_SG;
6730
0d89d203
SH
6731 /* Make NETIF_F_SG inheritable to MPLS.
6732 */
6733 dev->mpls_features |= NETIF_F_SG;
6734
7ffbe3fd
JB
6735 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6736 ret = notifier_to_errno(ret);
6737 if (ret)
6738 goto err_uninit;
6739
8b41d188 6740 ret = netdev_register_kobject(dev);
b17a7c17 6741 if (ret)
7ce1b0ed 6742 goto err_uninit;
b17a7c17
SH
6743 dev->reg_state = NETREG_REGISTERED;
6744
6cb6a27c 6745 __netdev_update_features(dev);
8e9b59b2 6746
1da177e4
LT
6747 /*
6748 * Default initial state at registry is that the
6749 * device is present.
6750 */
6751
6752 set_bit(__LINK_STATE_PRESENT, &dev->state);
6753
8f4cccbb
BH
6754 linkwatch_init_dev(dev);
6755
1da177e4 6756 dev_init_scheduler(dev);
1da177e4 6757 dev_hold(dev);
ce286d32 6758 list_netdevice(dev);
7bf23575 6759 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 6760
948b337e
JP
6761 /* If the device has permanent device address, driver should
6762 * set dev_addr and also addr_assign_type should be set to
6763 * NET_ADDR_PERM (default value).
6764 */
6765 if (dev->addr_assign_type == NET_ADDR_PERM)
6766 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6767
1da177e4 6768 /* Notify protocols, that a new device appeared. */
056925ab 6769 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 6770 ret = notifier_to_errno(ret);
93ee31f1
DL
6771 if (ret) {
6772 rollback_registered(dev);
6773 dev->reg_state = NETREG_UNREGISTERED;
6774 }
d90a909e
EB
6775 /*
6776 * Prevent userspace races by waiting until the network
6777 * device is fully setup before sending notifications.
6778 */
a2835763
PM
6779 if (!dev->rtnl_link_ops ||
6780 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 6781 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
6782
6783out:
6784 return ret;
7ce1b0ed
HX
6785
6786err_uninit:
d314774c
SH
6787 if (dev->netdev_ops->ndo_uninit)
6788 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 6789 goto out;
1da177e4 6790}
d1b19dff 6791EXPORT_SYMBOL(register_netdevice);
1da177e4 6792
937f1ba5
BH
6793/**
6794 * init_dummy_netdev - init a dummy network device for NAPI
6795 * @dev: device to init
6796 *
6797 * This takes a network device structure and initialize the minimum
6798 * amount of fields so it can be used to schedule NAPI polls without
6799 * registering a full blown interface. This is to be used by drivers
6800 * that need to tie several hardware interfaces to a single NAPI
6801 * poll scheduler due to HW limitations.
6802 */
6803int init_dummy_netdev(struct net_device *dev)
6804{
6805 /* Clear everything. Note we don't initialize spinlocks
6806 * are they aren't supposed to be taken by any of the
6807 * NAPI code and this dummy netdev is supposed to be
6808 * only ever used for NAPI polls
6809 */
6810 memset(dev, 0, sizeof(struct net_device));
6811
6812 /* make sure we BUG if trying to hit standard
6813 * register/unregister code path
6814 */
6815 dev->reg_state = NETREG_DUMMY;
6816
937f1ba5
BH
6817 /* NAPI wants this */
6818 INIT_LIST_HEAD(&dev->napi_list);
6819
6820 /* a dummy interface is started by default */
6821 set_bit(__LINK_STATE_PRESENT, &dev->state);
6822 set_bit(__LINK_STATE_START, &dev->state);
6823
29b4433d
ED
6824 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6825 * because users of this 'device' dont need to change
6826 * its refcount.
6827 */
6828
937f1ba5
BH
6829 return 0;
6830}
6831EXPORT_SYMBOL_GPL(init_dummy_netdev);
6832
6833
1da177e4
LT
6834/**
6835 * register_netdev - register a network device
6836 * @dev: device to register
6837 *
6838 * Take a completed network device structure and add it to the kernel
6839 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6840 * chain. 0 is returned on success. A negative errno code is returned
6841 * on a failure to set up the device, or if the name is a duplicate.
6842 *
38b4da38 6843 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
6844 * and expands the device name if you passed a format string to
6845 * alloc_netdev.
6846 */
6847int register_netdev(struct net_device *dev)
6848{
6849 int err;
6850
6851 rtnl_lock();
1da177e4 6852 err = register_netdevice(dev);
1da177e4
LT
6853 rtnl_unlock();
6854 return err;
6855}
6856EXPORT_SYMBOL(register_netdev);
6857
29b4433d
ED
6858int netdev_refcnt_read(const struct net_device *dev)
6859{
6860 int i, refcnt = 0;
6861
6862 for_each_possible_cpu(i)
6863 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6864 return refcnt;
6865}
6866EXPORT_SYMBOL(netdev_refcnt_read);
6867
2c53040f 6868/**
1da177e4 6869 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 6870 * @dev: target net_device
1da177e4
LT
6871 *
6872 * This is called when unregistering network devices.
6873 *
6874 * Any protocol or device that holds a reference should register
6875 * for netdevice notification, and cleanup and put back the
6876 * reference if they receive an UNREGISTER event.
6877 * We can get stuck here if buggy protocols don't correctly
4ec93edb 6878 * call dev_put.
1da177e4
LT
6879 */
6880static void netdev_wait_allrefs(struct net_device *dev)
6881{
6882 unsigned long rebroadcast_time, warning_time;
29b4433d 6883 int refcnt;
1da177e4 6884
e014debe
ED
6885 linkwatch_forget_dev(dev);
6886
1da177e4 6887 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
6888 refcnt = netdev_refcnt_read(dev);
6889
6890 while (refcnt != 0) {
1da177e4 6891 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 6892 rtnl_lock();
1da177e4
LT
6893
6894 /* Rebroadcast unregister notification */
056925ab 6895 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 6896
748e2d93 6897 __rtnl_unlock();
0115e8e3 6898 rcu_barrier();
748e2d93
ED
6899 rtnl_lock();
6900
0115e8e3 6901 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
6902 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6903 &dev->state)) {
6904 /* We must not have linkwatch events
6905 * pending on unregister. If this
6906 * happens, we simply run the queue
6907 * unscheduled, resulting in a noop
6908 * for this device.
6909 */
6910 linkwatch_run_queue();
6911 }
6912
6756ae4b 6913 __rtnl_unlock();
1da177e4
LT
6914
6915 rebroadcast_time = jiffies;
6916 }
6917
6918 msleep(250);
6919
29b4433d
ED
6920 refcnt = netdev_refcnt_read(dev);
6921
1da177e4 6922 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
6923 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6924 dev->name, refcnt);
1da177e4
LT
6925 warning_time = jiffies;
6926 }
6927 }
6928}
6929
6930/* The sequence is:
6931 *
6932 * rtnl_lock();
6933 * ...
6934 * register_netdevice(x1);
6935 * register_netdevice(x2);
6936 * ...
6937 * unregister_netdevice(y1);
6938 * unregister_netdevice(y2);
6939 * ...
6940 * rtnl_unlock();
6941 * free_netdev(y1);
6942 * free_netdev(y2);
6943 *
58ec3b4d 6944 * We are invoked by rtnl_unlock().
1da177e4 6945 * This allows us to deal with problems:
b17a7c17 6946 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
6947 * without deadlocking with linkwatch via keventd.
6948 * 2) Since we run with the RTNL semaphore not held, we can sleep
6949 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
6950 *
6951 * We must not return until all unregister events added during
6952 * the interval the lock was held have been completed.
1da177e4 6953 */
1da177e4
LT
6954void netdev_run_todo(void)
6955{
626ab0e6 6956 struct list_head list;
1da177e4 6957
1da177e4 6958 /* Snapshot list, allow later requests */
626ab0e6 6959 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
6960
6961 __rtnl_unlock();
626ab0e6 6962
0115e8e3
ED
6963
6964 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
6965 if (!list_empty(&list))
6966 rcu_barrier();
6967
1da177e4
LT
6968 while (!list_empty(&list)) {
6969 struct net_device *dev
e5e26d75 6970 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
6971 list_del(&dev->todo_list);
6972
748e2d93 6973 rtnl_lock();
0115e8e3 6974 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 6975 __rtnl_unlock();
0115e8e3 6976
b17a7c17 6977 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 6978 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
6979 dev->name, dev->reg_state);
6980 dump_stack();
6981 continue;
6982 }
1da177e4 6983
b17a7c17 6984 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 6985
b17a7c17 6986 netdev_wait_allrefs(dev);
1da177e4 6987
b17a7c17 6988 /* paranoia */
29b4433d 6989 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
6990 BUG_ON(!list_empty(&dev->ptype_all));
6991 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
6992 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6993 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 6994 WARN_ON(dev->dn_ptr);
1da177e4 6995
b17a7c17
SH
6996 if (dev->destructor)
6997 dev->destructor(dev);
9093bbb2 6998
50624c93
EB
6999 /* Report a network device has been unregistered */
7000 rtnl_lock();
7001 dev_net(dev)->dev_unreg_count--;
7002 __rtnl_unlock();
7003 wake_up(&netdev_unregistering_wq);
7004
9093bbb2
SH
7005 /* Free network device */
7006 kobject_put(&dev->dev.kobj);
1da177e4 7007 }
1da177e4
LT
7008}
7009
3cfde79c
BH
7010/* Convert net_device_stats to rtnl_link_stats64. They have the same
7011 * fields in the same order, with only the type differing.
7012 */
77a1abf5
ED
7013void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7014 const struct net_device_stats *netdev_stats)
3cfde79c
BH
7015{
7016#if BITS_PER_LONG == 64
77a1abf5
ED
7017 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
7018 memcpy(stats64, netdev_stats, sizeof(*stats64));
3cfde79c
BH
7019#else
7020 size_t i, n = sizeof(*stats64) / sizeof(u64);
7021 const unsigned long *src = (const unsigned long *)netdev_stats;
7022 u64 *dst = (u64 *)stats64;
7023
7024 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
7025 sizeof(*stats64) / sizeof(u64));
7026 for (i = 0; i < n; i++)
7027 dst[i] = src[i];
7028#endif
7029}
77a1abf5 7030EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 7031
eeda3fd6
SH
7032/**
7033 * dev_get_stats - get network device statistics
7034 * @dev: device to get statistics from
28172739 7035 * @storage: place to store stats
eeda3fd6 7036 *
d7753516
BH
7037 * Get network statistics from device. Return @storage.
7038 * The device driver may provide its own method by setting
7039 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7040 * otherwise the internal statistics structure is used.
eeda3fd6 7041 */
d7753516
BH
7042struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7043 struct rtnl_link_stats64 *storage)
7004bf25 7044{
eeda3fd6
SH
7045 const struct net_device_ops *ops = dev->netdev_ops;
7046
28172739
ED
7047 if (ops->ndo_get_stats64) {
7048 memset(storage, 0, sizeof(*storage));
caf586e5
ED
7049 ops->ndo_get_stats64(dev, storage);
7050 } else if (ops->ndo_get_stats) {
3cfde79c 7051 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
7052 } else {
7053 netdev_stats_to_stats64(storage, &dev->stats);
28172739 7054 }
caf586e5 7055 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
015f0688 7056 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
28172739 7057 return storage;
c45d286e 7058}
eeda3fd6 7059EXPORT_SYMBOL(dev_get_stats);
c45d286e 7060
24824a09 7061struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 7062{
24824a09 7063 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 7064
24824a09
ED
7065#ifdef CONFIG_NET_CLS_ACT
7066 if (queue)
7067 return queue;
7068 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7069 if (!queue)
7070 return NULL;
7071 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 7072 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
7073 queue->qdisc_sleeping = &noop_qdisc;
7074 rcu_assign_pointer(dev->ingress_queue, queue);
7075#endif
7076 return queue;
bb949fbd
DM
7077}
7078
2c60db03
ED
7079static const struct ethtool_ops default_ethtool_ops;
7080
d07d7507
SG
7081void netdev_set_default_ethtool_ops(struct net_device *dev,
7082 const struct ethtool_ops *ops)
7083{
7084 if (dev->ethtool_ops == &default_ethtool_ops)
7085 dev->ethtool_ops = ops;
7086}
7087EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7088
74d332c1
ED
7089void netdev_freemem(struct net_device *dev)
7090{
7091 char *addr = (char *)dev - dev->padded;
7092
4cb28970 7093 kvfree(addr);
74d332c1
ED
7094}
7095
1da177e4 7096/**
36909ea4 7097 * alloc_netdev_mqs - allocate network device
c835a677
TG
7098 * @sizeof_priv: size of private data to allocate space for
7099 * @name: device name format string
7100 * @name_assign_type: origin of device name
7101 * @setup: callback to initialize device
7102 * @txqs: the number of TX subqueues to allocate
7103 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
7104 *
7105 * Allocates a struct net_device with private data area for driver use
90e51adf 7106 * and performs basic initialization. Also allocates subqueue structs
36909ea4 7107 * for each queue on the device.
1da177e4 7108 */
36909ea4 7109struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 7110 unsigned char name_assign_type,
36909ea4
TH
7111 void (*setup)(struct net_device *),
7112 unsigned int txqs, unsigned int rxqs)
1da177e4 7113{
1da177e4 7114 struct net_device *dev;
7943986c 7115 size_t alloc_size;
1ce8e7b5 7116 struct net_device *p;
1da177e4 7117
b6fe17d6
SH
7118 BUG_ON(strlen(name) >= sizeof(dev->name));
7119
36909ea4 7120 if (txqs < 1) {
7b6cd1ce 7121 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
7122 return NULL;
7123 }
7124
a953be53 7125#ifdef CONFIG_SYSFS
36909ea4 7126 if (rxqs < 1) {
7b6cd1ce 7127 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
7128 return NULL;
7129 }
7130#endif
7131
fd2ea0a7 7132 alloc_size = sizeof(struct net_device);
d1643d24
AD
7133 if (sizeof_priv) {
7134 /* ensure 32-byte alignment of private area */
1ce8e7b5 7135 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
7136 alloc_size += sizeof_priv;
7137 }
7138 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 7139 alloc_size += NETDEV_ALIGN - 1;
1da177e4 7140
74d332c1
ED
7141 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7142 if (!p)
7143 p = vzalloc(alloc_size);
62b5942a 7144 if (!p)
1da177e4 7145 return NULL;
1da177e4 7146
1ce8e7b5 7147 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 7148 dev->padded = (char *)dev - (char *)p;
ab9c73cc 7149
29b4433d
ED
7150 dev->pcpu_refcnt = alloc_percpu(int);
7151 if (!dev->pcpu_refcnt)
74d332c1 7152 goto free_dev;
ab9c73cc 7153
ab9c73cc 7154 if (dev_addr_init(dev))
29b4433d 7155 goto free_pcpu;
ab9c73cc 7156
22bedad3 7157 dev_mc_init(dev);
a748ee24 7158 dev_uc_init(dev);
ccffad25 7159
c346dca1 7160 dev_net_set(dev, &init_net);
1da177e4 7161
8d3bdbd5 7162 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 7163 dev->gso_max_segs = GSO_MAX_SEGS;
fcbeb976 7164 dev->gso_min_segs = 0;
8d3bdbd5 7165
8d3bdbd5
DM
7166 INIT_LIST_HEAD(&dev->napi_list);
7167 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 7168 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 7169 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
7170 INIT_LIST_HEAD(&dev->adj_list.upper);
7171 INIT_LIST_HEAD(&dev->adj_list.lower);
7172 INIT_LIST_HEAD(&dev->all_adj_list.upper);
7173 INIT_LIST_HEAD(&dev->all_adj_list.lower);
7866a621
SN
7174 INIT_LIST_HEAD(&dev->ptype_all);
7175 INIT_LIST_HEAD(&dev->ptype_specific);
02875878 7176 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
7177 setup(dev);
7178
906470c1 7179 if (!dev->tx_queue_len)
f84bb1ea 7180 dev->priv_flags |= IFF_NO_QUEUE;
906470c1 7181
36909ea4
TH
7182 dev->num_tx_queues = txqs;
7183 dev->real_num_tx_queues = txqs;
ed9af2e8 7184 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 7185 goto free_all;
e8a0464c 7186
a953be53 7187#ifdef CONFIG_SYSFS
36909ea4
TH
7188 dev->num_rx_queues = rxqs;
7189 dev->real_num_rx_queues = rxqs;
fe822240 7190 if (netif_alloc_rx_queues(dev))
8d3bdbd5 7191 goto free_all;
df334545 7192#endif
0a9627f2 7193
1da177e4 7194 strcpy(dev->name, name);
c835a677 7195 dev->name_assign_type = name_assign_type;
cbda10fa 7196 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
7197 if (!dev->ethtool_ops)
7198 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
7199
7200 nf_hook_ingress_init(dev);
7201
1da177e4 7202 return dev;
ab9c73cc 7203
8d3bdbd5
DM
7204free_all:
7205 free_netdev(dev);
7206 return NULL;
7207
29b4433d
ED
7208free_pcpu:
7209 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
7210free_dev:
7211 netdev_freemem(dev);
ab9c73cc 7212 return NULL;
1da177e4 7213}
36909ea4 7214EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
7215
7216/**
7217 * free_netdev - free network device
7218 * @dev: device
7219 *
4ec93edb
YH
7220 * This function does the last stage of destroying an allocated device
7221 * interface. The reference to the device object is released.
1da177e4
LT
7222 * If this is the last reference then it will be freed.
7223 */
7224void free_netdev(struct net_device *dev)
7225{
d565b0a1
HX
7226 struct napi_struct *p, *n;
7227
60877a32 7228 netif_free_tx_queues(dev);
a953be53 7229#ifdef CONFIG_SYSFS
10595902 7230 kvfree(dev->_rx);
fe822240 7231#endif
e8a0464c 7232
33d480ce 7233 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 7234
f001fde5
JP
7235 /* Flush device addresses */
7236 dev_addr_flush(dev);
7237
d565b0a1
HX
7238 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7239 netif_napi_del(p);
7240
29b4433d
ED
7241 free_percpu(dev->pcpu_refcnt);
7242 dev->pcpu_refcnt = NULL;
7243
3041a069 7244 /* Compatibility with error handling in drivers */
1da177e4 7245 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 7246 netdev_freemem(dev);
1da177e4
LT
7247 return;
7248 }
7249
7250 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7251 dev->reg_state = NETREG_RELEASED;
7252
43cb76d9
GKH
7253 /* will free via device release */
7254 put_device(&dev->dev);
1da177e4 7255}
d1b19dff 7256EXPORT_SYMBOL(free_netdev);
4ec93edb 7257
f0db275a
SH
7258/**
7259 * synchronize_net - Synchronize with packet receive processing
7260 *
7261 * Wait for packets currently being received to be done.
7262 * Does not block later packets from starting.
7263 */
4ec93edb 7264void synchronize_net(void)
1da177e4
LT
7265{
7266 might_sleep();
be3fc413
ED
7267 if (rtnl_is_locked())
7268 synchronize_rcu_expedited();
7269 else
7270 synchronize_rcu();
1da177e4 7271}
d1b19dff 7272EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
7273
7274/**
44a0873d 7275 * unregister_netdevice_queue - remove device from the kernel
1da177e4 7276 * @dev: device
44a0873d 7277 * @head: list
6ebfbc06 7278 *
1da177e4 7279 * This function shuts down a device interface and removes it
d59b54b1 7280 * from the kernel tables.
44a0873d 7281 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
7282 *
7283 * Callers must hold the rtnl semaphore. You may want
7284 * unregister_netdev() instead of this.
7285 */
7286
44a0873d 7287void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 7288{
a6620712
HX
7289 ASSERT_RTNL();
7290
44a0873d 7291 if (head) {
9fdce099 7292 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
7293 } else {
7294 rollback_registered(dev);
7295 /* Finish processing unregister after unlock */
7296 net_set_todo(dev);
7297 }
1da177e4 7298}
44a0873d 7299EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 7300
9b5e383c
ED
7301/**
7302 * unregister_netdevice_many - unregister many devices
7303 * @head: list of devices
87757a91
ED
7304 *
7305 * Note: As most callers use a stack allocated list_head,
7306 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
7307 */
7308void unregister_netdevice_many(struct list_head *head)
7309{
7310 struct net_device *dev;
7311
7312 if (!list_empty(head)) {
7313 rollback_registered_many(head);
7314 list_for_each_entry(dev, head, unreg_list)
7315 net_set_todo(dev);
87757a91 7316 list_del(head);
9b5e383c
ED
7317 }
7318}
63c8099d 7319EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 7320
1da177e4
LT
7321/**
7322 * unregister_netdev - remove device from the kernel
7323 * @dev: device
7324 *
7325 * This function shuts down a device interface and removes it
d59b54b1 7326 * from the kernel tables.
1da177e4
LT
7327 *
7328 * This is just a wrapper for unregister_netdevice that takes
7329 * the rtnl semaphore. In general you want to use this and not
7330 * unregister_netdevice.
7331 */
7332void unregister_netdev(struct net_device *dev)
7333{
7334 rtnl_lock();
7335 unregister_netdevice(dev);
7336 rtnl_unlock();
7337}
1da177e4
LT
7338EXPORT_SYMBOL(unregister_netdev);
7339
ce286d32
EB
7340/**
7341 * dev_change_net_namespace - move device to different nethost namespace
7342 * @dev: device
7343 * @net: network namespace
7344 * @pat: If not NULL name pattern to try if the current device name
7345 * is already taken in the destination network namespace.
7346 *
7347 * This function shuts down a device interface and moves it
7348 * to a new network namespace. On success 0 is returned, on
7349 * a failure a netagive errno code is returned.
7350 *
7351 * Callers must hold the rtnl semaphore.
7352 */
7353
7354int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7355{
ce286d32
EB
7356 int err;
7357
7358 ASSERT_RTNL();
7359
7360 /* Don't allow namespace local devices to be moved. */
7361 err = -EINVAL;
7362 if (dev->features & NETIF_F_NETNS_LOCAL)
7363 goto out;
7364
7365 /* Ensure the device has been registrered */
ce286d32
EB
7366 if (dev->reg_state != NETREG_REGISTERED)
7367 goto out;
7368
7369 /* Get out if there is nothing todo */
7370 err = 0;
878628fb 7371 if (net_eq(dev_net(dev), net))
ce286d32
EB
7372 goto out;
7373
7374 /* Pick the destination device name, and ensure
7375 * we can use it in the destination network namespace.
7376 */
7377 err = -EEXIST;
d9031024 7378 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
7379 /* We get here if we can't use the current device name */
7380 if (!pat)
7381 goto out;
828de4f6 7382 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
7383 goto out;
7384 }
7385
7386 /*
7387 * And now a mini version of register_netdevice unregister_netdevice.
7388 */
7389
7390 /* If device is running close it first. */
9b772652 7391 dev_close(dev);
ce286d32
EB
7392
7393 /* And unlink it from device chain */
7394 err = -ENODEV;
7395 unlist_netdevice(dev);
7396
7397 synchronize_net();
7398
7399 /* Shutdown queueing discipline. */
7400 dev_shutdown(dev);
7401
7402 /* Notify protocols, that we are about to destroy
7403 this device. They should clean all the things.
3b27e105
DL
7404
7405 Note that dev->reg_state stays at NETREG_REGISTERED.
7406 This is wanted because this way 8021q and macvlan know
7407 the device is just moving and can keep their slaves up.
ce286d32
EB
7408 */
7409 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
7410 rcu_barrier();
7411 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 7412 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
7413
7414 /*
7415 * Flush the unicast and multicast chains
7416 */
a748ee24 7417 dev_uc_flush(dev);
22bedad3 7418 dev_mc_flush(dev);
ce286d32 7419
4e66ae2e
SH
7420 /* Send a netdev-removed uevent to the old namespace */
7421 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 7422 netdev_adjacent_del_links(dev);
4e66ae2e 7423
ce286d32 7424 /* Actually switch the network namespace */
c346dca1 7425 dev_net_set(dev, net);
ce286d32 7426
ce286d32 7427 /* If there is an ifindex conflict assign a new one */
7a66bbc9 7428 if (__dev_get_by_index(net, dev->ifindex))
ce286d32 7429 dev->ifindex = dev_new_index(net);
ce286d32 7430
4e66ae2e
SH
7431 /* Send a netdev-add uevent to the new namespace */
7432 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 7433 netdev_adjacent_add_links(dev);
4e66ae2e 7434
8b41d188 7435 /* Fixup kobjects */
a1b3f594 7436 err = device_rename(&dev->dev, dev->name);
8b41d188 7437 WARN_ON(err);
ce286d32
EB
7438
7439 /* Add the device back in the hashes */
7440 list_netdevice(dev);
7441
7442 /* Notify protocols, that a new device appeared. */
7443 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7444
d90a909e
EB
7445 /*
7446 * Prevent userspace races by waiting until the network
7447 * device is fully setup before sending notifications.
7448 */
7f294054 7449 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 7450
ce286d32
EB
7451 synchronize_net();
7452 err = 0;
7453out:
7454 return err;
7455}
463d0183 7456EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 7457
1da177e4
LT
7458static int dev_cpu_callback(struct notifier_block *nfb,
7459 unsigned long action,
7460 void *ocpu)
7461{
7462 struct sk_buff **list_skb;
1da177e4
LT
7463 struct sk_buff *skb;
7464 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7465 struct softnet_data *sd, *oldsd;
7466
8bb78442 7467 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
7468 return NOTIFY_OK;
7469
7470 local_irq_disable();
7471 cpu = smp_processor_id();
7472 sd = &per_cpu(softnet_data, cpu);
7473 oldsd = &per_cpu(softnet_data, oldcpu);
7474
7475 /* Find end of our completion_queue. */
7476 list_skb = &sd->completion_queue;
7477 while (*list_skb)
7478 list_skb = &(*list_skb)->next;
7479 /* Append completion queue from offline CPU. */
7480 *list_skb = oldsd->completion_queue;
7481 oldsd->completion_queue = NULL;
7482
1da177e4 7483 /* Append output queue from offline CPU. */
a9cbd588
CG
7484 if (oldsd->output_queue) {
7485 *sd->output_queue_tailp = oldsd->output_queue;
7486 sd->output_queue_tailp = oldsd->output_queue_tailp;
7487 oldsd->output_queue = NULL;
7488 oldsd->output_queue_tailp = &oldsd->output_queue;
7489 }
ac64da0b
ED
7490 /* Append NAPI poll list from offline CPU, with one exception :
7491 * process_backlog() must be called by cpu owning percpu backlog.
7492 * We properly handle process_queue & input_pkt_queue later.
7493 */
7494 while (!list_empty(&oldsd->poll_list)) {
7495 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7496 struct napi_struct,
7497 poll_list);
7498
7499 list_del_init(&napi->poll_list);
7500 if (napi->poll == process_backlog)
7501 napi->state = 0;
7502 else
7503 ____napi_schedule(sd, napi);
264524d5 7504 }
1da177e4
LT
7505
7506 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7507 local_irq_enable();
7508
7509 /* Process offline CPU's input_pkt_queue */
76cc8b13 7510 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 7511 netif_rx_ni(skb);
76cc8b13 7512 input_queue_head_incr(oldsd);
fec5e652 7513 }
ac64da0b 7514 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 7515 netif_rx_ni(skb);
76cc8b13
TH
7516 input_queue_head_incr(oldsd);
7517 }
1da177e4
LT
7518
7519 return NOTIFY_OK;
7520}
1da177e4
LT
7521
7522
7f353bf2 7523/**
b63365a2
HX
7524 * netdev_increment_features - increment feature set by one
7525 * @all: current feature set
7526 * @one: new feature set
7527 * @mask: mask feature set
7f353bf2
HX
7528 *
7529 * Computes a new feature set after adding a device with feature set
b63365a2
HX
7530 * @one to the master device with current feature set @all. Will not
7531 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 7532 */
c8f44aff
MM
7533netdev_features_t netdev_increment_features(netdev_features_t all,
7534 netdev_features_t one, netdev_features_t mask)
b63365a2 7535{
1742f183
MM
7536 if (mask & NETIF_F_GEN_CSUM)
7537 mask |= NETIF_F_ALL_CSUM;
7538 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 7539
1742f183
MM
7540 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7541 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 7542
1742f183
MM
7543 /* If one device supports hw checksumming, set for all. */
7544 if (all & NETIF_F_GEN_CSUM)
7545 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7f353bf2
HX
7546
7547 return all;
7548}
b63365a2 7549EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 7550
430f03cd 7551static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
7552{
7553 int i;
7554 struct hlist_head *hash;
7555
7556 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7557 if (hash != NULL)
7558 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7559 INIT_HLIST_HEAD(&hash[i]);
7560
7561 return hash;
7562}
7563
881d966b 7564/* Initialize per network namespace state */
4665079c 7565static int __net_init netdev_init(struct net *net)
881d966b 7566{
734b6541
RM
7567 if (net != &init_net)
7568 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 7569
30d97d35
PE
7570 net->dev_name_head = netdev_create_hash();
7571 if (net->dev_name_head == NULL)
7572 goto err_name;
881d966b 7573
30d97d35
PE
7574 net->dev_index_head = netdev_create_hash();
7575 if (net->dev_index_head == NULL)
7576 goto err_idx;
881d966b
EB
7577
7578 return 0;
30d97d35
PE
7579
7580err_idx:
7581 kfree(net->dev_name_head);
7582err_name:
7583 return -ENOMEM;
881d966b
EB
7584}
7585
f0db275a
SH
7586/**
7587 * netdev_drivername - network driver for the device
7588 * @dev: network device
f0db275a
SH
7589 *
7590 * Determine network driver for device.
7591 */
3019de12 7592const char *netdev_drivername(const struct net_device *dev)
6579e57b 7593{
cf04a4c7
SH
7594 const struct device_driver *driver;
7595 const struct device *parent;
3019de12 7596 const char *empty = "";
6579e57b
AV
7597
7598 parent = dev->dev.parent;
6579e57b 7599 if (!parent)
3019de12 7600 return empty;
6579e57b
AV
7601
7602 driver = parent->driver;
7603 if (driver && driver->name)
3019de12
DM
7604 return driver->name;
7605 return empty;
6579e57b
AV
7606}
7607
6ea754eb
JP
7608static void __netdev_printk(const char *level, const struct net_device *dev,
7609 struct va_format *vaf)
256df2f3 7610{
b004ff49 7611 if (dev && dev->dev.parent) {
6ea754eb
JP
7612 dev_printk_emit(level[1] - '0',
7613 dev->dev.parent,
7614 "%s %s %s%s: %pV",
7615 dev_driver_string(dev->dev.parent),
7616 dev_name(dev->dev.parent),
7617 netdev_name(dev), netdev_reg_state(dev),
7618 vaf);
b004ff49 7619 } else if (dev) {
6ea754eb
JP
7620 printk("%s%s%s: %pV",
7621 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 7622 } else {
6ea754eb 7623 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 7624 }
256df2f3
JP
7625}
7626
6ea754eb
JP
7627void netdev_printk(const char *level, const struct net_device *dev,
7628 const char *format, ...)
256df2f3
JP
7629{
7630 struct va_format vaf;
7631 va_list args;
256df2f3
JP
7632
7633 va_start(args, format);
7634
7635 vaf.fmt = format;
7636 vaf.va = &args;
7637
6ea754eb 7638 __netdev_printk(level, dev, &vaf);
b004ff49 7639
256df2f3 7640 va_end(args);
256df2f3
JP
7641}
7642EXPORT_SYMBOL(netdev_printk);
7643
7644#define define_netdev_printk_level(func, level) \
6ea754eb 7645void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 7646{ \
256df2f3
JP
7647 struct va_format vaf; \
7648 va_list args; \
7649 \
7650 va_start(args, fmt); \
7651 \
7652 vaf.fmt = fmt; \
7653 vaf.va = &args; \
7654 \
6ea754eb 7655 __netdev_printk(level, dev, &vaf); \
b004ff49 7656 \
256df2f3 7657 va_end(args); \
256df2f3
JP
7658} \
7659EXPORT_SYMBOL(func);
7660
7661define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7662define_netdev_printk_level(netdev_alert, KERN_ALERT);
7663define_netdev_printk_level(netdev_crit, KERN_CRIT);
7664define_netdev_printk_level(netdev_err, KERN_ERR);
7665define_netdev_printk_level(netdev_warn, KERN_WARNING);
7666define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7667define_netdev_printk_level(netdev_info, KERN_INFO);
7668
4665079c 7669static void __net_exit netdev_exit(struct net *net)
881d966b
EB
7670{
7671 kfree(net->dev_name_head);
7672 kfree(net->dev_index_head);
7673}
7674
022cbae6 7675static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
7676 .init = netdev_init,
7677 .exit = netdev_exit,
7678};
7679
4665079c 7680static void __net_exit default_device_exit(struct net *net)
ce286d32 7681{
e008b5fc 7682 struct net_device *dev, *aux;
ce286d32 7683 /*
e008b5fc 7684 * Push all migratable network devices back to the
ce286d32
EB
7685 * initial network namespace
7686 */
7687 rtnl_lock();
e008b5fc 7688 for_each_netdev_safe(net, dev, aux) {
ce286d32 7689 int err;
aca51397 7690 char fb_name[IFNAMSIZ];
ce286d32
EB
7691
7692 /* Ignore unmoveable devices (i.e. loopback) */
7693 if (dev->features & NETIF_F_NETNS_LOCAL)
7694 continue;
7695
e008b5fc
EB
7696 /* Leave virtual devices for the generic cleanup */
7697 if (dev->rtnl_link_ops)
7698 continue;
d0c082ce 7699
25985edc 7700 /* Push remaining network devices to init_net */
aca51397
PE
7701 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7702 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 7703 if (err) {
7b6cd1ce
JP
7704 pr_emerg("%s: failed to move %s to init_net: %d\n",
7705 __func__, dev->name, err);
aca51397 7706 BUG();
ce286d32
EB
7707 }
7708 }
7709 rtnl_unlock();
7710}
7711
50624c93
EB
7712static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7713{
7714 /* Return with the rtnl_lock held when there are no network
7715 * devices unregistering in any network namespace in net_list.
7716 */
7717 struct net *net;
7718 bool unregistering;
ff960a73 7719 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 7720
ff960a73 7721 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 7722 for (;;) {
50624c93
EB
7723 unregistering = false;
7724 rtnl_lock();
7725 list_for_each_entry(net, net_list, exit_list) {
7726 if (net->dev_unreg_count > 0) {
7727 unregistering = true;
7728 break;
7729 }
7730 }
7731 if (!unregistering)
7732 break;
7733 __rtnl_unlock();
ff960a73
PZ
7734
7735 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 7736 }
ff960a73 7737 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
7738}
7739
04dc7f6b
EB
7740static void __net_exit default_device_exit_batch(struct list_head *net_list)
7741{
7742 /* At exit all network devices most be removed from a network
b595076a 7743 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
7744 * Do this across as many network namespaces as possible to
7745 * improve batching efficiency.
7746 */
7747 struct net_device *dev;
7748 struct net *net;
7749 LIST_HEAD(dev_kill_list);
7750
50624c93
EB
7751 /* To prevent network device cleanup code from dereferencing
7752 * loopback devices or network devices that have been freed
7753 * wait here for all pending unregistrations to complete,
7754 * before unregistring the loopback device and allowing the
7755 * network namespace be freed.
7756 *
7757 * The netdev todo list containing all network devices
7758 * unregistrations that happen in default_device_exit_batch
7759 * will run in the rtnl_unlock() at the end of
7760 * default_device_exit_batch.
7761 */
7762 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
7763 list_for_each_entry(net, net_list, exit_list) {
7764 for_each_netdev_reverse(net, dev) {
b0ab2fab 7765 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
7766 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7767 else
7768 unregister_netdevice_queue(dev, &dev_kill_list);
7769 }
7770 }
7771 unregister_netdevice_many(&dev_kill_list);
7772 rtnl_unlock();
7773}
7774
022cbae6 7775static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 7776 .exit = default_device_exit,
04dc7f6b 7777 .exit_batch = default_device_exit_batch,
ce286d32
EB
7778};
7779
1da177e4
LT
7780/*
7781 * Initialize the DEV module. At boot time this walks the device list and
7782 * unhooks any devices that fail to initialise (normally hardware not
7783 * present) and leaves us with a valid list of present and active devices.
7784 *
7785 */
7786
7787/*
7788 * This is called single threaded during boot, so no need
7789 * to take the rtnl semaphore.
7790 */
7791static int __init net_dev_init(void)
7792{
7793 int i, rc = -ENOMEM;
7794
7795 BUG_ON(!dev_boot_phase);
7796
1da177e4
LT
7797 if (dev_proc_init())
7798 goto out;
7799
8b41d188 7800 if (netdev_kobject_init())
1da177e4
LT
7801 goto out;
7802
7803 INIT_LIST_HEAD(&ptype_all);
82d8a867 7804 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
7805 INIT_LIST_HEAD(&ptype_base[i]);
7806
62532da9
VY
7807 INIT_LIST_HEAD(&offload_base);
7808
881d966b
EB
7809 if (register_pernet_subsys(&netdev_net_ops))
7810 goto out;
1da177e4
LT
7811
7812 /*
7813 * Initialise the packet receive queues.
7814 */
7815
6f912042 7816 for_each_possible_cpu(i) {
e36fa2f7 7817 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 7818
e36fa2f7 7819 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 7820 skb_queue_head_init(&sd->process_queue);
e36fa2f7 7821 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 7822 sd->output_queue_tailp = &sd->output_queue;
df334545 7823#ifdef CONFIG_RPS
e36fa2f7
ED
7824 sd->csd.func = rps_trigger_softirq;
7825 sd->csd.info = sd;
e36fa2f7 7826 sd->cpu = i;
1e94d72f 7827#endif
0a9627f2 7828
e36fa2f7
ED
7829 sd->backlog.poll = process_backlog;
7830 sd->backlog.weight = weight_p;
1da177e4
LT
7831 }
7832
1da177e4
LT
7833 dev_boot_phase = 0;
7834
505d4f73
EB
7835 /* The loopback device is special if any other network devices
7836 * is present in a network namespace the loopback device must
7837 * be present. Since we now dynamically allocate and free the
7838 * loopback device ensure this invariant is maintained by
7839 * keeping the loopback device as the first device on the
7840 * list of network devices. Ensuring the loopback devices
7841 * is the first device that appears and the last network device
7842 * that disappears.
7843 */
7844 if (register_pernet_device(&loopback_net_ops))
7845 goto out;
7846
7847 if (register_pernet_device(&default_device_ops))
7848 goto out;
7849
962cf36c
CM
7850 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7851 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
7852
7853 hotcpu_notifier(dev_cpu_callback, 0);
f38a9eb1 7854 dst_subsys_init();
1da177e4
LT
7855 rc = 0;
7856out:
7857 return rc;
7858}
7859
7860subsys_initcall(net_dev_init);