]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/core/dev.c
tcp: remove redundant SOCK_DONE checks
[mirror_ubuntu-jammy-kernel.git] / net / core / dev.c
CommitLineData
1da177e4 1/*
722c9a0c 2 * NET3 Protocol independent device support routines.
1da177e4
LT
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
722c9a0c 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
722c9a0c 24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
1da177e4
LT
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
722c9a0c 39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
1da177e4
LT
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
722c9a0c 49 * Alan Cox : Fixed nasty side effect of device close
1da177e4
LT
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
722c9a0c 70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
1da177e4
LT
72 * - netif_rx() feedback
73 */
74
7c0f6ba6 75#include <linux/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
f1083048 84#include <linux/sched/mm.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
a7862b45 98#include <linux/bpf.h>
b5cdae32 99#include <linux/bpf_trace.h>
457c4cbc 100#include <net/net_namespace.h>
1da177e4 101#include <net/sock.h>
02d62e86 102#include <net/busy_poll.h>
1da177e4 103#include <linux/rtnetlink.h>
1da177e4 104#include <linux/stat.h>
1da177e4 105#include <net/dst.h>
fc4099f1 106#include <net/dst_metadata.h>
1da177e4 107#include <net/pkt_sched.h>
87d83093 108#include <net/pkt_cls.h>
1da177e4 109#include <net/checksum.h>
44540960 110#include <net/xfrm.h>
1da177e4
LT
111#include <linux/highmem.h>
112#include <linux/init.h>
1da177e4 113#include <linux/module.h>
1da177e4
LT
114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
1da177e4 117#include <net/iw_handler.h>
1da177e4 118#include <asm/current.h>
5bdb9886 119#include <linux/audit.h>
db217334 120#include <linux/dmaengine.h>
f6a78bfc 121#include <linux/err.h>
c7fa9d18 122#include <linux/ctype.h>
723e98b7 123#include <linux/if_arp.h>
6de329e2 124#include <linux/if_vlan.h>
8f0f2223 125#include <linux/ip.h>
ad55dcaf 126#include <net/ip.h>
25cd9ba0 127#include <net/mpls.h>
8f0f2223
DM
128#include <linux/ipv6.h>
129#include <linux/in.h>
b6b2fed1
DM
130#include <linux/jhash.h>
131#include <linux/random.h>
9cbc1cb8 132#include <trace/events/napi.h>
cf66ba58 133#include <trace/events/net.h>
07dc22e7 134#include <trace/events/skb.h>
5acbbd42 135#include <linux/pci.h>
caeda9b9 136#include <linux/inetdevice.h>
c445477d 137#include <linux/cpu_rmap.h>
c5905afb 138#include <linux/static_key.h>
af12fa6e 139#include <linux/hashtable.h>
60877a32 140#include <linux/vmalloc.h>
529d0489 141#include <linux/if_macvlan.h>
e7fd2885 142#include <linux/errqueue.h>
3b47d303 143#include <linux/hrtimer.h>
e687ad60 144#include <linux/netfilter_ingress.h>
40e4e713 145#include <linux/crash_dump.h>
b72b5bf6 146#include <linux/sctp.h>
ae847f40 147#include <net/udp_tunnel.h>
6621dd29 148#include <linux/net_namespace.h>
1da177e4 149
342709ef
PE
150#include "net-sysfs.h"
151
d565b0a1
HX
152#define MAX_GRO_SKBS 8
153
5d38a079
HX
154/* This should be increased if a protocol with a bigger head is added. */
155#define GRO_MAX_HEAD (MAX_HEADER + 128)
156
1da177e4 157static DEFINE_SPINLOCK(ptype_lock);
62532da9 158static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
159struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160struct list_head ptype_all __read_mostly; /* Taps */
62532da9 161static struct list_head offload_base __read_mostly;
1da177e4 162
ae78dbfa 163static int netif_rx_internal(struct sk_buff *skb);
54951194 164static int call_netdevice_notifiers_info(unsigned long val,
54951194 165 struct netdev_notifier_info *info);
90b602f8 166static struct napi_struct *napi_by_id(unsigned int napi_id);
ae78dbfa 167
1da177e4 168/*
7562f876 169 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
170 * semaphore.
171 *
c6d14c84 172 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
173 *
174 * Writers must hold the rtnl semaphore while they loop through the
7562f876 175 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
176 * actual updates. This allows pure readers to access the list even
177 * while a writer is preparing to update it.
178 *
179 * To put it another way, dev_base_lock is held for writing only to
180 * protect against pure readers; the rtnl semaphore provides the
181 * protection against other writers.
182 *
183 * See, for example usages, register_netdevice() and
184 * unregister_netdevice(), which must be called with the rtnl
185 * semaphore held.
186 */
1da177e4 187DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
188EXPORT_SYMBOL(dev_base_lock);
189
6c557001
FW
190static DEFINE_MUTEX(ifalias_mutex);
191
af12fa6e
ET
192/* protects napi_hash addition/deletion and napi_gen_id */
193static DEFINE_SPINLOCK(napi_hash_lock);
194
52bd2d62 195static unsigned int napi_gen_id = NR_CPUS;
6180d9de 196static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
af12fa6e 197
18afa4b0 198static seqcount_t devnet_rename_seq;
c91f6df2 199
4e985ada
TG
200static inline void dev_base_seq_inc(struct net *net)
201{
643aa9cb 202 while (++net->dev_base_seq == 0)
203 ;
4e985ada
TG
204}
205
881d966b 206static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 207{
8387ff25 208 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
95c96174 209
08e9897d 210 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
211}
212
881d966b 213static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 214{
7c28bd0b 215 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
216}
217
e36fa2f7 218static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
219{
220#ifdef CONFIG_RPS
e36fa2f7 221 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
222#endif
223}
224
e36fa2f7 225static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
226{
227#ifdef CONFIG_RPS
e36fa2f7 228 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
229#endif
230}
231
ce286d32 232/* Device list insertion */
53759be9 233static void list_netdevice(struct net_device *dev)
ce286d32 234{
c346dca1 235 struct net *net = dev_net(dev);
ce286d32
EB
236
237 ASSERT_RTNL();
238
239 write_lock_bh(&dev_base_lock);
c6d14c84 240 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 241 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
242 hlist_add_head_rcu(&dev->index_hlist,
243 dev_index_hash(net, dev->ifindex));
ce286d32 244 write_unlock_bh(&dev_base_lock);
4e985ada
TG
245
246 dev_base_seq_inc(net);
ce286d32
EB
247}
248
fb699dfd
ED
249/* Device list removal
250 * caller must respect a RCU grace period before freeing/reusing dev
251 */
ce286d32
EB
252static void unlist_netdevice(struct net_device *dev)
253{
254 ASSERT_RTNL();
255
256 /* Unlink dev from the device chain */
257 write_lock_bh(&dev_base_lock);
c6d14c84 258 list_del_rcu(&dev->dev_list);
72c9528b 259 hlist_del_rcu(&dev->name_hlist);
fb699dfd 260 hlist_del_rcu(&dev->index_hlist);
ce286d32 261 write_unlock_bh(&dev_base_lock);
4e985ada
TG
262
263 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
264}
265
1da177e4
LT
266/*
267 * Our notifier list
268 */
269
f07d5b94 270static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
271
272/*
273 * Device drivers call our routines to queue packets here. We empty the
274 * queue in the local softnet handler.
275 */
bea3348e 276
9958da05 277DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 278EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 279
cf508b12 280#ifdef CONFIG_LOCKDEP
723e98b7 281/*
c773e847 282 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
283 * according to dev->type
284 */
643aa9cb 285static const unsigned short netdev_lock_type[] = {
286 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
723e98b7
JP
287 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
288 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
289 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
290 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
291 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
292 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
293 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
294 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
295 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
296 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
297 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
298 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
299 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
300 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 301
643aa9cb 302static const char *const netdev_lock_name[] = {
303 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
304 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
305 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
306 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
307 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
308 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
309 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
310 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
311 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
312 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
313 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
314 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
315 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
316 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
317 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
318
319static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 320static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
321
322static inline unsigned short netdev_lock_pos(unsigned short dev_type)
323{
324 int i;
325
326 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
327 if (netdev_lock_type[i] == dev_type)
328 return i;
329 /* the last key is used by default */
330 return ARRAY_SIZE(netdev_lock_type) - 1;
331}
332
cf508b12
DM
333static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
334 unsigned short dev_type)
723e98b7
JP
335{
336 int i;
337
338 i = netdev_lock_pos(dev_type);
339 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
340 netdev_lock_name[i]);
341}
cf508b12
DM
342
343static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
344{
345 int i;
346
347 i = netdev_lock_pos(dev->type);
348 lockdep_set_class_and_name(&dev->addr_list_lock,
349 &netdev_addr_lock_key[i],
350 netdev_lock_name[i]);
351}
723e98b7 352#else
cf508b12
DM
353static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
354 unsigned short dev_type)
355{
356}
357static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
358{
359}
360#endif
1da177e4
LT
361
362/*******************************************************************************
eb13da1a 363 *
364 * Protocol management and registration routines
365 *
366 *******************************************************************************/
1da177e4 367
1da177e4 368
1da177e4
LT
369/*
370 * Add a protocol ID to the list. Now that the input handler is
371 * smarter we can dispense with all the messy stuff that used to be
372 * here.
373 *
374 * BEWARE!!! Protocol handlers, mangling input packets,
375 * MUST BE last in hash buckets and checking protocol handlers
376 * MUST start from promiscuous ptype_all chain in net_bh.
377 * It is true now, do not change it.
378 * Explanation follows: if protocol handler, mangling packet, will
379 * be the first on list, it is not able to sense, that packet
380 * is cloned and should be copied-on-write, so that it will
381 * change it and subsequent readers will get broken packet.
382 * --ANK (980803)
383 */
384
c07b68e8
ED
385static inline struct list_head *ptype_head(const struct packet_type *pt)
386{
387 if (pt->type == htons(ETH_P_ALL))
7866a621 388 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 389 else
7866a621
SN
390 return pt->dev ? &pt->dev->ptype_specific :
391 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
392}
393
1da177e4
LT
394/**
395 * dev_add_pack - add packet handler
396 * @pt: packet type declaration
397 *
398 * Add a protocol handler to the networking stack. The passed &packet_type
399 * is linked into kernel lists and may not be freed until it has been
400 * removed from the kernel lists.
401 *
4ec93edb 402 * This call does not sleep therefore it can not
1da177e4
LT
403 * guarantee all CPU's that are in middle of receiving packets
404 * will see the new packet type (until the next received packet).
405 */
406
407void dev_add_pack(struct packet_type *pt)
408{
c07b68e8 409 struct list_head *head = ptype_head(pt);
1da177e4 410
c07b68e8
ED
411 spin_lock(&ptype_lock);
412 list_add_rcu(&pt->list, head);
413 spin_unlock(&ptype_lock);
1da177e4 414}
d1b19dff 415EXPORT_SYMBOL(dev_add_pack);
1da177e4 416
1da177e4
LT
417/**
418 * __dev_remove_pack - remove packet handler
419 * @pt: packet type declaration
420 *
421 * Remove a protocol handler that was previously added to the kernel
422 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
423 * from the kernel lists and can be freed or reused once this function
4ec93edb 424 * returns.
1da177e4
LT
425 *
426 * The packet type might still be in use by receivers
427 * and must not be freed until after all the CPU's have gone
428 * through a quiescent state.
429 */
430void __dev_remove_pack(struct packet_type *pt)
431{
c07b68e8 432 struct list_head *head = ptype_head(pt);
1da177e4
LT
433 struct packet_type *pt1;
434
c07b68e8 435 spin_lock(&ptype_lock);
1da177e4
LT
436
437 list_for_each_entry(pt1, head, list) {
438 if (pt == pt1) {
439 list_del_rcu(&pt->list);
440 goto out;
441 }
442 }
443
7b6cd1ce 444 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 445out:
c07b68e8 446 spin_unlock(&ptype_lock);
1da177e4 447}
d1b19dff
ED
448EXPORT_SYMBOL(__dev_remove_pack);
449
1da177e4
LT
450/**
451 * dev_remove_pack - remove packet handler
452 * @pt: packet type declaration
453 *
454 * Remove a protocol handler that was previously added to the kernel
455 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
456 * from the kernel lists and can be freed or reused once this function
457 * returns.
458 *
459 * This call sleeps to guarantee that no CPU is looking at the packet
460 * type after return.
461 */
462void dev_remove_pack(struct packet_type *pt)
463{
464 __dev_remove_pack(pt);
4ec93edb 465
1da177e4
LT
466 synchronize_net();
467}
d1b19dff 468EXPORT_SYMBOL(dev_remove_pack);
1da177e4 469
62532da9
VY
470
471/**
472 * dev_add_offload - register offload handlers
473 * @po: protocol offload declaration
474 *
475 * Add protocol offload handlers to the networking stack. The passed
476 * &proto_offload is linked into kernel lists and may not be freed until
477 * it has been removed from the kernel lists.
478 *
479 * This call does not sleep therefore it can not
480 * guarantee all CPU's that are in middle of receiving packets
481 * will see the new offload handlers (until the next received packet).
482 */
483void dev_add_offload(struct packet_offload *po)
484{
bdef7de4 485 struct packet_offload *elem;
62532da9
VY
486
487 spin_lock(&offload_lock);
bdef7de4
DM
488 list_for_each_entry(elem, &offload_base, list) {
489 if (po->priority < elem->priority)
490 break;
491 }
492 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
493 spin_unlock(&offload_lock);
494}
495EXPORT_SYMBOL(dev_add_offload);
496
497/**
498 * __dev_remove_offload - remove offload handler
499 * @po: packet offload declaration
500 *
501 * Remove a protocol offload handler that was previously added to the
502 * kernel offload handlers by dev_add_offload(). The passed &offload_type
503 * is removed from the kernel lists and can be freed or reused once this
504 * function returns.
505 *
506 * The packet type might still be in use by receivers
507 * and must not be freed until after all the CPU's have gone
508 * through a quiescent state.
509 */
1d143d9f 510static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
511{
512 struct list_head *head = &offload_base;
513 struct packet_offload *po1;
514
c53aa505 515 spin_lock(&offload_lock);
62532da9
VY
516
517 list_for_each_entry(po1, head, list) {
518 if (po == po1) {
519 list_del_rcu(&po->list);
520 goto out;
521 }
522 }
523
524 pr_warn("dev_remove_offload: %p not found\n", po);
525out:
c53aa505 526 spin_unlock(&offload_lock);
62532da9 527}
62532da9
VY
528
529/**
530 * dev_remove_offload - remove packet offload handler
531 * @po: packet offload declaration
532 *
533 * Remove a packet offload handler that was previously added to the kernel
534 * offload handlers by dev_add_offload(). The passed &offload_type is
535 * removed from the kernel lists and can be freed or reused once this
536 * function returns.
537 *
538 * This call sleeps to guarantee that no CPU is looking at the packet
539 * type after return.
540 */
541void dev_remove_offload(struct packet_offload *po)
542{
543 __dev_remove_offload(po);
544
545 synchronize_net();
546}
547EXPORT_SYMBOL(dev_remove_offload);
548
1da177e4 549/******************************************************************************
eb13da1a 550 *
551 * Device Boot-time Settings Routines
552 *
553 ******************************************************************************/
1da177e4
LT
554
555/* Boot time configuration table */
556static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
557
558/**
559 * netdev_boot_setup_add - add new setup entry
560 * @name: name of the device
561 * @map: configured settings for the device
562 *
563 * Adds new setup entry to the dev_boot_setup list. The function
564 * returns 0 on error and 1 on success. This is a generic routine to
565 * all netdevices.
566 */
567static int netdev_boot_setup_add(char *name, struct ifmap *map)
568{
569 struct netdev_boot_setup *s;
570 int i;
571
572 s = dev_boot_setup;
573 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
574 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
575 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 576 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
577 memcpy(&s[i].map, map, sizeof(s[i].map));
578 break;
579 }
580 }
581
582 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
583}
584
585/**
722c9a0c 586 * netdev_boot_setup_check - check boot time settings
587 * @dev: the netdevice
1da177e4 588 *
722c9a0c 589 * Check boot time settings for the device.
590 * The found settings are set for the device to be used
591 * later in the device probing.
592 * Returns 0 if no settings found, 1 if they are.
1da177e4
LT
593 */
594int netdev_boot_setup_check(struct net_device *dev)
595{
596 struct netdev_boot_setup *s = dev_boot_setup;
597 int i;
598
599 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
600 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 601 !strcmp(dev->name, s[i].name)) {
722c9a0c 602 dev->irq = s[i].map.irq;
603 dev->base_addr = s[i].map.base_addr;
604 dev->mem_start = s[i].map.mem_start;
605 dev->mem_end = s[i].map.mem_end;
1da177e4
LT
606 return 1;
607 }
608 }
609 return 0;
610}
d1b19dff 611EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
612
613
614/**
722c9a0c 615 * netdev_boot_base - get address from boot time settings
616 * @prefix: prefix for network device
617 * @unit: id for network device
618 *
619 * Check boot time settings for the base address of device.
620 * The found settings are set for the device to be used
621 * later in the device probing.
622 * Returns 0 if no settings found.
1da177e4
LT
623 */
624unsigned long netdev_boot_base(const char *prefix, int unit)
625{
626 const struct netdev_boot_setup *s = dev_boot_setup;
627 char name[IFNAMSIZ];
628 int i;
629
630 sprintf(name, "%s%d", prefix, unit);
631
632 /*
633 * If device already registered then return base of 1
634 * to indicate not to probe for this interface
635 */
881d966b 636 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
637 return 1;
638
639 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
640 if (!strcmp(name, s[i].name))
641 return s[i].map.base_addr;
642 return 0;
643}
644
645/*
646 * Saves at boot time configured settings for any netdevice.
647 */
648int __init netdev_boot_setup(char *str)
649{
650 int ints[5];
651 struct ifmap map;
652
653 str = get_options(str, ARRAY_SIZE(ints), ints);
654 if (!str || !*str)
655 return 0;
656
657 /* Save settings */
658 memset(&map, 0, sizeof(map));
659 if (ints[0] > 0)
660 map.irq = ints[1];
661 if (ints[0] > 1)
662 map.base_addr = ints[2];
663 if (ints[0] > 2)
664 map.mem_start = ints[3];
665 if (ints[0] > 3)
666 map.mem_end = ints[4];
667
668 /* Add new entry to the list */
669 return netdev_boot_setup_add(str, &map);
670}
671
672__setup("netdev=", netdev_boot_setup);
673
674/*******************************************************************************
eb13da1a 675 *
676 * Device Interface Subroutines
677 *
678 *******************************************************************************/
1da177e4 679
a54acb3a
ND
680/**
681 * dev_get_iflink - get 'iflink' value of a interface
682 * @dev: targeted interface
683 *
684 * Indicates the ifindex the interface is linked to.
685 * Physical interfaces have the same 'ifindex' and 'iflink' values.
686 */
687
688int dev_get_iflink(const struct net_device *dev)
689{
690 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
691 return dev->netdev_ops->ndo_get_iflink(dev);
692
7a66bbc9 693 return dev->ifindex;
a54acb3a
ND
694}
695EXPORT_SYMBOL(dev_get_iflink);
696
fc4099f1
PS
697/**
698 * dev_fill_metadata_dst - Retrieve tunnel egress information.
699 * @dev: targeted interface
700 * @skb: The packet.
701 *
702 * For better visibility of tunnel traffic OVS needs to retrieve
703 * egress tunnel information for a packet. Following API allows
704 * user to get this info.
705 */
706int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
707{
708 struct ip_tunnel_info *info;
709
710 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
711 return -EINVAL;
712
713 info = skb_tunnel_info_unclone(skb);
714 if (!info)
715 return -ENOMEM;
716 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
717 return -EINVAL;
718
719 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
720}
721EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
722
1da177e4
LT
723/**
724 * __dev_get_by_name - find a device by its name
c4ea43c5 725 * @net: the applicable net namespace
1da177e4
LT
726 * @name: name to find
727 *
728 * Find an interface by name. Must be called under RTNL semaphore
729 * or @dev_base_lock. If the name is found a pointer to the device
730 * is returned. If the name is not found then %NULL is returned. The
731 * reference counters are not incremented so the caller must be
732 * careful with locks.
733 */
734
881d966b 735struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 736{
0bd8d536
ED
737 struct net_device *dev;
738 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 739
b67bfe0d 740 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
741 if (!strncmp(dev->name, name, IFNAMSIZ))
742 return dev;
0bd8d536 743
1da177e4
LT
744 return NULL;
745}
d1b19dff 746EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 747
72c9528b 748/**
722c9a0c 749 * dev_get_by_name_rcu - find a device by its name
750 * @net: the applicable net namespace
751 * @name: name to find
752 *
753 * Find an interface by name.
754 * If the name is found a pointer to the device is returned.
755 * If the name is not found then %NULL is returned.
756 * The reference counters are not incremented so the caller must be
757 * careful with locks. The caller must hold RCU lock.
72c9528b
ED
758 */
759
760struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
761{
72c9528b
ED
762 struct net_device *dev;
763 struct hlist_head *head = dev_name_hash(net, name);
764
b67bfe0d 765 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
766 if (!strncmp(dev->name, name, IFNAMSIZ))
767 return dev;
768
769 return NULL;
770}
771EXPORT_SYMBOL(dev_get_by_name_rcu);
772
1da177e4
LT
773/**
774 * dev_get_by_name - find a device by its name
c4ea43c5 775 * @net: the applicable net namespace
1da177e4
LT
776 * @name: name to find
777 *
778 * Find an interface by name. This can be called from any
779 * context and does its own locking. The returned handle has
780 * the usage count incremented and the caller must use dev_put() to
781 * release it when it is no longer needed. %NULL is returned if no
782 * matching device is found.
783 */
784
881d966b 785struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
786{
787 struct net_device *dev;
788
72c9528b
ED
789 rcu_read_lock();
790 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
791 if (dev)
792 dev_hold(dev);
72c9528b 793 rcu_read_unlock();
1da177e4
LT
794 return dev;
795}
d1b19dff 796EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
797
798/**
799 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 800 * @net: the applicable net namespace
1da177e4
LT
801 * @ifindex: index of device
802 *
803 * Search for an interface by index. Returns %NULL if the device
804 * is not found or a pointer to the device. The device has not
805 * had its reference counter increased so the caller must be careful
806 * about locking. The caller must hold either the RTNL semaphore
807 * or @dev_base_lock.
808 */
809
881d966b 810struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 811{
0bd8d536
ED
812 struct net_device *dev;
813 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 814
b67bfe0d 815 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
816 if (dev->ifindex == ifindex)
817 return dev;
0bd8d536 818
1da177e4
LT
819 return NULL;
820}
d1b19dff 821EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 822
fb699dfd
ED
823/**
824 * dev_get_by_index_rcu - find a device by its ifindex
825 * @net: the applicable net namespace
826 * @ifindex: index of device
827 *
828 * Search for an interface by index. Returns %NULL if the device
829 * is not found or a pointer to the device. The device has not
830 * had its reference counter increased so the caller must be careful
831 * about locking. The caller must hold RCU lock.
832 */
833
834struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
835{
fb699dfd
ED
836 struct net_device *dev;
837 struct hlist_head *head = dev_index_hash(net, ifindex);
838
b67bfe0d 839 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
840 if (dev->ifindex == ifindex)
841 return dev;
842
843 return NULL;
844}
845EXPORT_SYMBOL(dev_get_by_index_rcu);
846
1da177e4
LT
847
848/**
849 * dev_get_by_index - find a device by its ifindex
c4ea43c5 850 * @net: the applicable net namespace
1da177e4
LT
851 * @ifindex: index of device
852 *
853 * Search for an interface by index. Returns NULL if the device
854 * is not found or a pointer to the device. The device returned has
855 * had a reference added and the pointer is safe until the user calls
856 * dev_put to indicate they have finished with it.
857 */
858
881d966b 859struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
860{
861 struct net_device *dev;
862
fb699dfd
ED
863 rcu_read_lock();
864 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
865 if (dev)
866 dev_hold(dev);
fb699dfd 867 rcu_read_unlock();
1da177e4
LT
868 return dev;
869}
d1b19dff 870EXPORT_SYMBOL(dev_get_by_index);
1da177e4 871
90b602f8
ML
872/**
873 * dev_get_by_napi_id - find a device by napi_id
874 * @napi_id: ID of the NAPI struct
875 *
876 * Search for an interface by NAPI ID. Returns %NULL if the device
877 * is not found or a pointer to the device. The device has not had
878 * its reference counter increased so the caller must be careful
879 * about locking. The caller must hold RCU lock.
880 */
881
882struct net_device *dev_get_by_napi_id(unsigned int napi_id)
883{
884 struct napi_struct *napi;
885
886 WARN_ON_ONCE(!rcu_read_lock_held());
887
888 if (napi_id < MIN_NAPI_ID)
889 return NULL;
890
891 napi = napi_by_id(napi_id);
892
893 return napi ? napi->dev : NULL;
894}
895EXPORT_SYMBOL(dev_get_by_napi_id);
896
5dbe7c17
NS
897/**
898 * netdev_get_name - get a netdevice name, knowing its ifindex.
899 * @net: network namespace
900 * @name: a pointer to the buffer where the name will be stored.
901 * @ifindex: the ifindex of the interface to get the name from.
902 *
903 * The use of raw_seqcount_begin() and cond_resched() before
904 * retrying is required as we want to give the writers a chance
905 * to complete when CONFIG_PREEMPT is not set.
906 */
907int netdev_get_name(struct net *net, char *name, int ifindex)
908{
909 struct net_device *dev;
910 unsigned int seq;
911
912retry:
913 seq = raw_seqcount_begin(&devnet_rename_seq);
914 rcu_read_lock();
915 dev = dev_get_by_index_rcu(net, ifindex);
916 if (!dev) {
917 rcu_read_unlock();
918 return -ENODEV;
919 }
920
921 strcpy(name, dev->name);
922 rcu_read_unlock();
923 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
924 cond_resched();
925 goto retry;
926 }
927
928 return 0;
929}
930
1da177e4 931/**
941666c2 932 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 933 * @net: the applicable net namespace
1da177e4
LT
934 * @type: media type of device
935 * @ha: hardware address
936 *
937 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
938 * is not found or a pointer to the device.
939 * The caller must hold RCU or RTNL.
941666c2 940 * The returned device has not had its ref count increased
1da177e4
LT
941 * and the caller must therefore be careful about locking
942 *
1da177e4
LT
943 */
944
941666c2
ED
945struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
946 const char *ha)
1da177e4
LT
947{
948 struct net_device *dev;
949
941666c2 950 for_each_netdev_rcu(net, dev)
1da177e4
LT
951 if (dev->type == type &&
952 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
953 return dev;
954
955 return NULL;
1da177e4 956}
941666c2 957EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 958
881d966b 959struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
960{
961 struct net_device *dev;
962
4e9cac2b 963 ASSERT_RTNL();
881d966b 964 for_each_netdev(net, dev)
4e9cac2b 965 if (dev->type == type)
7562f876
PE
966 return dev;
967
968 return NULL;
4e9cac2b 969}
4e9cac2b
PM
970EXPORT_SYMBOL(__dev_getfirstbyhwtype);
971
881d966b 972struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 973{
99fe3c39 974 struct net_device *dev, *ret = NULL;
4e9cac2b 975
99fe3c39
ED
976 rcu_read_lock();
977 for_each_netdev_rcu(net, dev)
978 if (dev->type == type) {
979 dev_hold(dev);
980 ret = dev;
981 break;
982 }
983 rcu_read_unlock();
984 return ret;
1da177e4 985}
1da177e4
LT
986EXPORT_SYMBOL(dev_getfirstbyhwtype);
987
988/**
6c555490 989 * __dev_get_by_flags - find any device with given flags
c4ea43c5 990 * @net: the applicable net namespace
1da177e4
LT
991 * @if_flags: IFF_* values
992 * @mask: bitmask of bits in if_flags to check
993 *
994 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 995 * is not found or a pointer to the device. Must be called inside
6c555490 996 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
997 */
998
6c555490
WC
999struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1000 unsigned short mask)
1da177e4 1001{
7562f876 1002 struct net_device *dev, *ret;
1da177e4 1003
6c555490
WC
1004 ASSERT_RTNL();
1005
7562f876 1006 ret = NULL;
6c555490 1007 for_each_netdev(net, dev) {
1da177e4 1008 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 1009 ret = dev;
1da177e4
LT
1010 break;
1011 }
1012 }
7562f876 1013 return ret;
1da177e4 1014}
6c555490 1015EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
1016
1017/**
1018 * dev_valid_name - check if name is okay for network device
1019 * @name: name string
1020 *
1021 * Network device names need to be valid file names to
c7fa9d18
DM
1022 * to allow sysfs to work. We also disallow any kind of
1023 * whitespace.
1da177e4 1024 */
95f050bf 1025bool dev_valid_name(const char *name)
1da177e4 1026{
c7fa9d18 1027 if (*name == '\0')
95f050bf 1028 return false;
a9d48205 1029 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
95f050bf 1030 return false;
c7fa9d18 1031 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 1032 return false;
c7fa9d18
DM
1033
1034 while (*name) {
a4176a93 1035 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 1036 return false;
c7fa9d18
DM
1037 name++;
1038 }
95f050bf 1039 return true;
1da177e4 1040}
d1b19dff 1041EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
1042
1043/**
b267b179
EB
1044 * __dev_alloc_name - allocate a name for a device
1045 * @net: network namespace to allocate the device name in
1da177e4 1046 * @name: name format string
b267b179 1047 * @buf: scratch buffer and result name string
1da177e4
LT
1048 *
1049 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
1050 * id. It scans list of devices to build up a free map, then chooses
1051 * the first empty slot. The caller must hold the dev_base or rtnl lock
1052 * while allocating the name and adding the device in order to avoid
1053 * duplicates.
1054 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
1056 */
1057
b267b179 1058static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
1059{
1060 int i = 0;
1da177e4
LT
1061 const char *p;
1062 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 1063 unsigned long *inuse;
1da177e4
LT
1064 struct net_device *d;
1065
93809105
RV
1066 if (!dev_valid_name(name))
1067 return -EINVAL;
1068
51f299dd 1069 p = strchr(name, '%');
1da177e4
LT
1070 if (p) {
1071 /*
1072 * Verify the string as this thing may have come from
1073 * the user. There must be either one "%d" and no other "%"
1074 * characters.
1075 */
1076 if (p[1] != 'd' || strchr(p + 2, '%'))
1077 return -EINVAL;
1078
1079 /* Use one page as a bit array of possible slots */
cfcabdcc 1080 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1081 if (!inuse)
1082 return -ENOMEM;
1083
881d966b 1084 for_each_netdev(net, d) {
1da177e4
LT
1085 if (!sscanf(d->name, name, &i))
1086 continue;
1087 if (i < 0 || i >= max_netdevices)
1088 continue;
1089
1090 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1091 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1092 if (!strncmp(buf, d->name, IFNAMSIZ))
1093 set_bit(i, inuse);
1094 }
1095
1096 i = find_first_zero_bit(inuse, max_netdevices);
1097 free_page((unsigned long) inuse);
1098 }
1099
6224abda 1100 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1101 if (!__dev_get_by_name(net, buf))
1da177e4 1102 return i;
1da177e4
LT
1103
1104 /* It is possible to run out of possible slots
1105 * when the name is long and there isn't enough space left
1106 * for the digits, or if all bits are used.
1107 */
029b6d14 1108 return -ENFILE;
1da177e4
LT
1109}
1110
2c88b855
RV
1111static int dev_alloc_name_ns(struct net *net,
1112 struct net_device *dev,
1113 const char *name)
1114{
1115 char buf[IFNAMSIZ];
1116 int ret;
1117
c46d7642 1118 BUG_ON(!net);
2c88b855
RV
1119 ret = __dev_alloc_name(net, name, buf);
1120 if (ret >= 0)
1121 strlcpy(dev->name, buf, IFNAMSIZ);
1122 return ret;
1da177e4
LT
1123}
1124
b267b179
EB
1125/**
1126 * dev_alloc_name - allocate a name for a device
1127 * @dev: device
1128 * @name: name format string
1129 *
1130 * Passed a format string - eg "lt%d" it will try and find a suitable
1131 * id. It scans list of devices to build up a free map, then chooses
1132 * the first empty slot. The caller must hold the dev_base or rtnl lock
1133 * while allocating the name and adding the device in order to avoid
1134 * duplicates.
1135 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1136 * Returns the number of the unit assigned or a negative errno code.
1137 */
1138
1139int dev_alloc_name(struct net_device *dev, const char *name)
1140{
c46d7642 1141 return dev_alloc_name_ns(dev_net(dev), dev, name);
b267b179 1142}
d1b19dff 1143EXPORT_SYMBOL(dev_alloc_name);
b267b179 1144
0ad646c8
CW
1145int dev_get_valid_name(struct net *net, struct net_device *dev,
1146 const char *name)
828de4f6 1147{
55a5ec9b
DM
1148 BUG_ON(!net);
1149
1150 if (!dev_valid_name(name))
1151 return -EINVAL;
1152
1153 if (strchr(name, '%'))
1154 return dev_alloc_name_ns(net, dev, name);
1155 else if (__dev_get_by_name(net, name))
1156 return -EEXIST;
1157 else if (dev->name != name)
1158 strlcpy(dev->name, name, IFNAMSIZ);
1159
1160 return 0;
d9031024 1161}
0ad646c8 1162EXPORT_SYMBOL(dev_get_valid_name);
1da177e4
LT
1163
1164/**
1165 * dev_change_name - change name of a device
1166 * @dev: device
1167 * @newname: name (or format string) must be at least IFNAMSIZ
1168 *
1169 * Change name of a device, can pass format strings "eth%d".
1170 * for wildcarding.
1171 */
cf04a4c7 1172int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1173{
238fa362 1174 unsigned char old_assign_type;
fcc5a03a 1175 char oldname[IFNAMSIZ];
1da177e4 1176 int err = 0;
fcc5a03a 1177 int ret;
881d966b 1178 struct net *net;
1da177e4
LT
1179
1180 ASSERT_RTNL();
c346dca1 1181 BUG_ON(!dev_net(dev));
1da177e4 1182
c346dca1 1183 net = dev_net(dev);
1da177e4
LT
1184 if (dev->flags & IFF_UP)
1185 return -EBUSY;
1186
30e6c9fa 1187 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1188
1189 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1190 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1191 return 0;
c91f6df2 1192 }
c8d90dca 1193
fcc5a03a
HX
1194 memcpy(oldname, dev->name, IFNAMSIZ);
1195
828de4f6 1196 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1197 if (err < 0) {
30e6c9fa 1198 write_seqcount_end(&devnet_rename_seq);
d9031024 1199 return err;
c91f6df2 1200 }
1da177e4 1201
6fe82a39
VF
1202 if (oldname[0] && !strchr(oldname, '%'))
1203 netdev_info(dev, "renamed from %s\n", oldname);
1204
238fa362
TG
1205 old_assign_type = dev->name_assign_type;
1206 dev->name_assign_type = NET_NAME_RENAMED;
1207
fcc5a03a 1208rollback:
a1b3f594
EB
1209 ret = device_rename(&dev->dev, dev->name);
1210 if (ret) {
1211 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1212 dev->name_assign_type = old_assign_type;
30e6c9fa 1213 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1214 return ret;
dcc99773 1215 }
7f988eab 1216
30e6c9fa 1217 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1218
5bb025fa
VF
1219 netdev_adjacent_rename_links(dev, oldname);
1220
7f988eab 1221 write_lock_bh(&dev_base_lock);
372b2312 1222 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1223 write_unlock_bh(&dev_base_lock);
1224
1225 synchronize_rcu();
1226
1227 write_lock_bh(&dev_base_lock);
1228 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1229 write_unlock_bh(&dev_base_lock);
1230
056925ab 1231 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1232 ret = notifier_to_errno(ret);
1233
1234 if (ret) {
91e9c07b
ED
1235 /* err >= 0 after dev_alloc_name() or stores the first errno */
1236 if (err >= 0) {
fcc5a03a 1237 err = ret;
30e6c9fa 1238 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1239 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1240 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1241 dev->name_assign_type = old_assign_type;
1242 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1243 goto rollback;
91e9c07b 1244 } else {
7b6cd1ce 1245 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1246 dev->name, ret);
fcc5a03a
HX
1247 }
1248 }
1da177e4
LT
1249
1250 return err;
1251}
1252
0b815a1a
SH
1253/**
1254 * dev_set_alias - change ifalias of a device
1255 * @dev: device
1256 * @alias: name up to IFALIASZ
f0db275a 1257 * @len: limit of bytes to copy from info
0b815a1a
SH
1258 *
1259 * Set ifalias for a device,
1260 */
1261int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1262{
6c557001 1263 struct dev_ifalias *new_alias = NULL;
0b815a1a
SH
1264
1265 if (len >= IFALIASZ)
1266 return -EINVAL;
1267
6c557001
FW
1268 if (len) {
1269 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1270 if (!new_alias)
1271 return -ENOMEM;
1272
1273 memcpy(new_alias->ifalias, alias, len);
1274 new_alias->ifalias[len] = 0;
96ca4a2c
OH
1275 }
1276
6c557001
FW
1277 mutex_lock(&ifalias_mutex);
1278 rcu_swap_protected(dev->ifalias, new_alias,
1279 mutex_is_locked(&ifalias_mutex));
1280 mutex_unlock(&ifalias_mutex);
1281
1282 if (new_alias)
1283 kfree_rcu(new_alias, rcuhead);
0b815a1a 1284
0b815a1a
SH
1285 return len;
1286}
0fe554a4 1287EXPORT_SYMBOL(dev_set_alias);
0b815a1a 1288
6c557001
FW
1289/**
1290 * dev_get_alias - get ifalias of a device
1291 * @dev: device
20e88320 1292 * @name: buffer to store name of ifalias
6c557001
FW
1293 * @len: size of buffer
1294 *
1295 * get ifalias for a device. Caller must make sure dev cannot go
1296 * away, e.g. rcu read lock or own a reference count to device.
1297 */
1298int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1299{
1300 const struct dev_ifalias *alias;
1301 int ret = 0;
1302
1303 rcu_read_lock();
1304 alias = rcu_dereference(dev->ifalias);
1305 if (alias)
1306 ret = snprintf(name, len, "%s", alias->ifalias);
1307 rcu_read_unlock();
1308
1309 return ret;
1310}
0b815a1a 1311
d8a33ac4 1312/**
3041a069 1313 * netdev_features_change - device changes features
d8a33ac4
SH
1314 * @dev: device to cause notification
1315 *
1316 * Called to indicate a device has changed features.
1317 */
1318void netdev_features_change(struct net_device *dev)
1319{
056925ab 1320 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1321}
1322EXPORT_SYMBOL(netdev_features_change);
1323
1da177e4
LT
1324/**
1325 * netdev_state_change - device changes state
1326 * @dev: device to cause notification
1327 *
1328 * Called to indicate a device has changed state. This function calls
1329 * the notifier chains for netdev_chain and sends a NEWLINK message
1330 * to the routing socket.
1331 */
1332void netdev_state_change(struct net_device *dev)
1333{
1334 if (dev->flags & IFF_UP) {
51d0c047
DA
1335 struct netdev_notifier_change_info change_info = {
1336 .info.dev = dev,
1337 };
54951194 1338
51d0c047 1339 call_netdevice_notifiers_info(NETDEV_CHANGE,
54951194 1340 &change_info.info);
7f294054 1341 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1342 }
1343}
d1b19dff 1344EXPORT_SYMBOL(netdev_state_change);
1da177e4 1345
ee89bab1 1346/**
722c9a0c 1347 * netdev_notify_peers - notify network peers about existence of @dev
1348 * @dev: network device
ee89bab1
AW
1349 *
1350 * Generate traffic such that interested network peers are aware of
1351 * @dev, such as by generating a gratuitous ARP. This may be used when
1352 * a device wants to inform the rest of the network about some sort of
1353 * reconfiguration such as a failover event or virtual machine
1354 * migration.
1355 */
1356void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1357{
ee89bab1
AW
1358 rtnl_lock();
1359 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
37c343b4 1360 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
ee89bab1 1361 rtnl_unlock();
c1da4ac7 1362}
ee89bab1 1363EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1364
bd380811 1365static int __dev_open(struct net_device *dev)
1da177e4 1366{
d314774c 1367 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1368 int ret;
1da177e4 1369
e46b66bc
BH
1370 ASSERT_RTNL();
1371
1da177e4
LT
1372 if (!netif_device_present(dev))
1373 return -ENODEV;
1374
ca99ca14
NH
1375 /* Block netpoll from trying to do any rx path servicing.
1376 * If we don't do this there is a chance ndo_poll_controller
1377 * or ndo_poll may be running while we open the device
1378 */
66b5552f 1379 netpoll_poll_disable(dev);
ca99ca14 1380
3b8bcfd5
JB
1381 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1382 ret = notifier_to_errno(ret);
1383 if (ret)
1384 return ret;
1385
1da177e4 1386 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1387
d314774c
SH
1388 if (ops->ndo_validate_addr)
1389 ret = ops->ndo_validate_addr(dev);
bada339b 1390
d314774c
SH
1391 if (!ret && ops->ndo_open)
1392 ret = ops->ndo_open(dev);
1da177e4 1393
66b5552f 1394 netpoll_poll_enable(dev);
ca99ca14 1395
bada339b
JG
1396 if (ret)
1397 clear_bit(__LINK_STATE_START, &dev->state);
1398 else {
1da177e4 1399 dev->flags |= IFF_UP;
4417da66 1400 dev_set_rx_mode(dev);
1da177e4 1401 dev_activate(dev);
7bf23575 1402 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1403 }
bada339b 1404
1da177e4
LT
1405 return ret;
1406}
1407
1408/**
bd380811
PM
1409 * dev_open - prepare an interface for use.
1410 * @dev: device to open
1da177e4 1411 *
bd380811
PM
1412 * Takes a device from down to up state. The device's private open
1413 * function is invoked and then the multicast lists are loaded. Finally
1414 * the device is moved into the up state and a %NETDEV_UP message is
1415 * sent to the netdev notifier chain.
1416 *
1417 * Calling this function on an active interface is a nop. On a failure
1418 * a negative errno code is returned.
1da177e4 1419 */
bd380811
PM
1420int dev_open(struct net_device *dev)
1421{
1422 int ret;
1423
bd380811
PM
1424 if (dev->flags & IFF_UP)
1425 return 0;
1426
bd380811
PM
1427 ret = __dev_open(dev);
1428 if (ret < 0)
1429 return ret;
1430
7f294054 1431 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1432 call_netdevice_notifiers(NETDEV_UP, dev);
1433
1434 return ret;
1435}
1436EXPORT_SYMBOL(dev_open);
1437
7051b88a 1438static void __dev_close_many(struct list_head *head)
1da177e4 1439{
44345724 1440 struct net_device *dev;
e46b66bc 1441
bd380811 1442 ASSERT_RTNL();
9d5010db
DM
1443 might_sleep();
1444
5cde2829 1445 list_for_each_entry(dev, head, close_list) {
3f4df206 1446 /* Temporarily disable netpoll until the interface is down */
66b5552f 1447 netpoll_poll_disable(dev);
3f4df206 1448
44345724 1449 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1450
44345724 1451 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1452
44345724
OP
1453 /* Synchronize to scheduled poll. We cannot touch poll list, it
1454 * can be even on different cpu. So just clear netif_running().
1455 *
1456 * dev->stop() will invoke napi_disable() on all of it's
1457 * napi_struct instances on this device.
1458 */
4e857c58 1459 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1460 }
1da177e4 1461
44345724 1462 dev_deactivate_many(head);
d8b2a4d2 1463
5cde2829 1464 list_for_each_entry(dev, head, close_list) {
44345724 1465 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1466
44345724
OP
1467 /*
1468 * Call the device specific close. This cannot fail.
1469 * Only if device is UP
1470 *
1471 * We allow it to be called even after a DETACH hot-plug
1472 * event.
1473 */
1474 if (ops->ndo_stop)
1475 ops->ndo_stop(dev);
1476
44345724 1477 dev->flags &= ~IFF_UP;
66b5552f 1478 netpoll_poll_enable(dev);
44345724 1479 }
44345724
OP
1480}
1481
7051b88a 1482static void __dev_close(struct net_device *dev)
44345724
OP
1483{
1484 LIST_HEAD(single);
1485
5cde2829 1486 list_add(&dev->close_list, &single);
7051b88a 1487 __dev_close_many(&single);
f87e6f47 1488 list_del(&single);
44345724
OP
1489}
1490
7051b88a 1491void dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1492{
1493 struct net_device *dev, *tmp;
1da177e4 1494
5cde2829
EB
1495 /* Remove the devices that don't need to be closed */
1496 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1497 if (!(dev->flags & IFF_UP))
5cde2829 1498 list_del_init(&dev->close_list);
44345724
OP
1499
1500 __dev_close_many(head);
1da177e4 1501
5cde2829 1502 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1503 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1504 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1505 if (unlink)
1506 list_del_init(&dev->close_list);
44345724 1507 }
bd380811 1508}
99c4a26a 1509EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1510
1511/**
1512 * dev_close - shutdown an interface.
1513 * @dev: device to shutdown
1514 *
1515 * This function moves an active device into down state. A
1516 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1517 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1518 * chain.
1519 */
7051b88a 1520void dev_close(struct net_device *dev)
bd380811 1521{
e14a5993
ED
1522 if (dev->flags & IFF_UP) {
1523 LIST_HEAD(single);
1da177e4 1524
5cde2829 1525 list_add(&dev->close_list, &single);
99c4a26a 1526 dev_close_many(&single, true);
e14a5993
ED
1527 list_del(&single);
1528 }
1da177e4 1529}
d1b19dff 1530EXPORT_SYMBOL(dev_close);
1da177e4
LT
1531
1532
0187bdfb
BH
1533/**
1534 * dev_disable_lro - disable Large Receive Offload on a device
1535 * @dev: device
1536 *
1537 * Disable Large Receive Offload (LRO) on a net device. Must be
1538 * called under RTNL. This is needed if received packets may be
1539 * forwarded to another interface.
1540 */
1541void dev_disable_lro(struct net_device *dev)
1542{
fbe168ba
MK
1543 struct net_device *lower_dev;
1544 struct list_head *iter;
529d0489 1545
bc5787c6
MM
1546 dev->wanted_features &= ~NETIF_F_LRO;
1547 netdev_update_features(dev);
27660515 1548
22d5969f
MM
1549 if (unlikely(dev->features & NETIF_F_LRO))
1550 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1551
1552 netdev_for_each_lower_dev(dev, lower_dev, iter)
1553 dev_disable_lro(lower_dev);
0187bdfb
BH
1554}
1555EXPORT_SYMBOL(dev_disable_lro);
1556
56f5aa77
MC
1557/**
1558 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1559 * @dev: device
1560 *
1561 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1562 * called under RTNL. This is needed if Generic XDP is installed on
1563 * the device.
1564 */
1565static void dev_disable_gro_hw(struct net_device *dev)
1566{
1567 dev->wanted_features &= ~NETIF_F_GRO_HW;
1568 netdev_update_features(dev);
1569
1570 if (unlikely(dev->features & NETIF_F_GRO_HW))
1571 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1572}
1573
ede2762d
KT
1574const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1575{
1576#define N(val) \
1577 case NETDEV_##val: \
1578 return "NETDEV_" __stringify(val);
1579 switch (cmd) {
1580 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1581 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1582 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1583 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1584 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1585 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1586 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
9daae9bd
GP
1587 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1588 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
3f5ecd8a 1589 }
ede2762d
KT
1590#undef N
1591 return "UNKNOWN_NETDEV_EVENT";
1592}
1593EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1594
351638e7
JP
1595static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1596 struct net_device *dev)
1597{
51d0c047
DA
1598 struct netdev_notifier_info info = {
1599 .dev = dev,
1600 };
351638e7 1601
351638e7
JP
1602 return nb->notifier_call(nb, val, &info);
1603}
0187bdfb 1604
881d966b
EB
1605static int dev_boot_phase = 1;
1606
1da177e4 1607/**
722c9a0c 1608 * register_netdevice_notifier - register a network notifier block
1609 * @nb: notifier
1da177e4 1610 *
722c9a0c 1611 * Register a notifier to be called when network device events occur.
1612 * The notifier passed is linked into the kernel structures and must
1613 * not be reused until it has been unregistered. A negative errno code
1614 * is returned on a failure.
1da177e4 1615 *
722c9a0c 1616 * When registered all registration and up events are replayed
1617 * to the new notifier to allow device to have a race free
1618 * view of the network device list.
1da177e4
LT
1619 */
1620
1621int register_netdevice_notifier(struct notifier_block *nb)
1622{
1623 struct net_device *dev;
fcc5a03a 1624 struct net_device *last;
881d966b 1625 struct net *net;
1da177e4
LT
1626 int err;
1627
328fbe74
KT
1628 /* Close race with setup_net() and cleanup_net() */
1629 down_write(&pernet_ops_rwsem);
1da177e4 1630 rtnl_lock();
f07d5b94 1631 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1632 if (err)
1633 goto unlock;
881d966b
EB
1634 if (dev_boot_phase)
1635 goto unlock;
1636 for_each_net(net) {
1637 for_each_netdev(net, dev) {
351638e7 1638 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1639 err = notifier_to_errno(err);
1640 if (err)
1641 goto rollback;
1642
1643 if (!(dev->flags & IFF_UP))
1644 continue;
1da177e4 1645
351638e7 1646 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1647 }
1da177e4 1648 }
fcc5a03a
HX
1649
1650unlock:
1da177e4 1651 rtnl_unlock();
328fbe74 1652 up_write(&pernet_ops_rwsem);
1da177e4 1653 return err;
fcc5a03a
HX
1654
1655rollback:
1656 last = dev;
881d966b
EB
1657 for_each_net(net) {
1658 for_each_netdev(net, dev) {
1659 if (dev == last)
8f891489 1660 goto outroll;
fcc5a03a 1661
881d966b 1662 if (dev->flags & IFF_UP) {
351638e7
JP
1663 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1664 dev);
1665 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1666 }
351638e7 1667 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1668 }
fcc5a03a 1669 }
c67625a1 1670
8f891489 1671outroll:
c67625a1 1672 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1673 goto unlock;
1da177e4 1674}
d1b19dff 1675EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1676
1677/**
722c9a0c 1678 * unregister_netdevice_notifier - unregister a network notifier block
1679 * @nb: notifier
1da177e4 1680 *
722c9a0c 1681 * Unregister a notifier previously registered by
1682 * register_netdevice_notifier(). The notifier is unlinked into the
1683 * kernel structures and may then be reused. A negative errno code
1684 * is returned on a failure.
7d3d43da 1685 *
722c9a0c 1686 * After unregistering unregister and down device events are synthesized
1687 * for all devices on the device list to the removed notifier to remove
1688 * the need for special case cleanup code.
1da177e4
LT
1689 */
1690
1691int unregister_netdevice_notifier(struct notifier_block *nb)
1692{
7d3d43da
EB
1693 struct net_device *dev;
1694 struct net *net;
9f514950
HX
1695 int err;
1696
328fbe74
KT
1697 /* Close race with setup_net() and cleanup_net() */
1698 down_write(&pernet_ops_rwsem);
9f514950 1699 rtnl_lock();
f07d5b94 1700 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1701 if (err)
1702 goto unlock;
1703
1704 for_each_net(net) {
1705 for_each_netdev(net, dev) {
1706 if (dev->flags & IFF_UP) {
351638e7
JP
1707 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1708 dev);
1709 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1710 }
351638e7 1711 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1712 }
1713 }
1714unlock:
9f514950 1715 rtnl_unlock();
328fbe74 1716 up_write(&pernet_ops_rwsem);
9f514950 1717 return err;
1da177e4 1718}
d1b19dff 1719EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1720
351638e7
JP
1721/**
1722 * call_netdevice_notifiers_info - call all network notifier blocks
1723 * @val: value passed unmodified to notifier function
351638e7
JP
1724 * @info: notifier information data
1725 *
1726 * Call all network notifier blocks. Parameters and return value
1727 * are as for raw_notifier_call_chain().
1728 */
1729
1d143d9f 1730static int call_netdevice_notifiers_info(unsigned long val,
1d143d9f 1731 struct netdev_notifier_info *info)
351638e7
JP
1732{
1733 ASSERT_RTNL();
351638e7
JP
1734 return raw_notifier_call_chain(&netdev_chain, val, info);
1735}
351638e7 1736
1da177e4
LT
1737/**
1738 * call_netdevice_notifiers - call all network notifier blocks
1739 * @val: value passed unmodified to notifier function
c4ea43c5 1740 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1741 *
1742 * Call all network notifier blocks. Parameters and return value
f07d5b94 1743 * are as for raw_notifier_call_chain().
1da177e4
LT
1744 */
1745
ad7379d4 1746int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1747{
51d0c047
DA
1748 struct netdev_notifier_info info = {
1749 .dev = dev,
1750 };
351638e7 1751
51d0c047 1752 return call_netdevice_notifiers_info(val, &info);
1da177e4 1753}
edf947f1 1754EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1755
1cf51900 1756#ifdef CONFIG_NET_INGRESS
aabf6772 1757static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
4577139b
DB
1758
1759void net_inc_ingress_queue(void)
1760{
aabf6772 1761 static_branch_inc(&ingress_needed_key);
4577139b
DB
1762}
1763EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1764
1765void net_dec_ingress_queue(void)
1766{
aabf6772 1767 static_branch_dec(&ingress_needed_key);
4577139b
DB
1768}
1769EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1770#endif
1771
1f211a1b 1772#ifdef CONFIG_NET_EGRESS
aabf6772 1773static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1f211a1b
DB
1774
1775void net_inc_egress_queue(void)
1776{
aabf6772 1777 static_branch_inc(&egress_needed_key);
1f211a1b
DB
1778}
1779EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1780
1781void net_dec_egress_queue(void)
1782{
aabf6772 1783 static_branch_dec(&egress_needed_key);
1f211a1b
DB
1784}
1785EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1786#endif
1787
39e83922 1788static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
b90e5794 1789#ifdef HAVE_JUMP_LABEL
b90e5794 1790static atomic_t netstamp_needed_deferred;
13baa00a 1791static atomic_t netstamp_wanted;
5fa8bbda 1792static void netstamp_clear(struct work_struct *work)
1da177e4 1793{
b90e5794 1794 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
13baa00a 1795 int wanted;
b90e5794 1796
13baa00a
ED
1797 wanted = atomic_add_return(deferred, &netstamp_wanted);
1798 if (wanted > 0)
39e83922 1799 static_branch_enable(&netstamp_needed_key);
13baa00a 1800 else
39e83922 1801 static_branch_disable(&netstamp_needed_key);
5fa8bbda
ED
1802}
1803static DECLARE_WORK(netstamp_work, netstamp_clear);
b90e5794 1804#endif
5fa8bbda
ED
1805
1806void net_enable_timestamp(void)
1807{
13baa00a
ED
1808#ifdef HAVE_JUMP_LABEL
1809 int wanted;
1810
1811 while (1) {
1812 wanted = atomic_read(&netstamp_wanted);
1813 if (wanted <= 0)
1814 break;
1815 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1816 return;
1817 }
1818 atomic_inc(&netstamp_needed_deferred);
1819 schedule_work(&netstamp_work);
1820#else
39e83922 1821 static_branch_inc(&netstamp_needed_key);
13baa00a 1822#endif
1da177e4 1823}
d1b19dff 1824EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1825
1826void net_disable_timestamp(void)
1827{
b90e5794 1828#ifdef HAVE_JUMP_LABEL
13baa00a
ED
1829 int wanted;
1830
1831 while (1) {
1832 wanted = atomic_read(&netstamp_wanted);
1833 if (wanted <= 1)
1834 break;
1835 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1836 return;
1837 }
1838 atomic_dec(&netstamp_needed_deferred);
5fa8bbda
ED
1839 schedule_work(&netstamp_work);
1840#else
39e83922 1841 static_branch_dec(&netstamp_needed_key);
5fa8bbda 1842#endif
1da177e4 1843}
d1b19dff 1844EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1845
3b098e2d 1846static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1847{
2456e855 1848 skb->tstamp = 0;
39e83922 1849 if (static_branch_unlikely(&netstamp_needed_key))
a61bbcf2 1850 __net_timestamp(skb);
1da177e4
LT
1851}
1852
39e83922
DB
1853#define net_timestamp_check(COND, SKB) \
1854 if (static_branch_unlikely(&netstamp_needed_key)) { \
1855 if ((COND) && !(SKB)->tstamp) \
1856 __net_timestamp(SKB); \
1857 } \
3b098e2d 1858
f4b05d27 1859bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
79b569f0
DL
1860{
1861 unsigned int len;
1862
1863 if (!(dev->flags & IFF_UP))
1864 return false;
1865
1866 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1867 if (skb->len <= len)
1868 return true;
1869
1870 /* if TSO is enabled, we don't care about the length as the packet
1871 * could be forwarded without being segmented before
1872 */
1873 if (skb_is_gso(skb))
1874 return true;
1875
1876 return false;
1877}
1ee481fb 1878EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1879
a0265d28
HX
1880int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1881{
4e3264d2 1882 int ret = ____dev_forward_skb(dev, skb);
a0265d28 1883
4e3264d2
MKL
1884 if (likely(!ret)) {
1885 skb->protocol = eth_type_trans(skb, dev);
1886 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1887 }
a0265d28 1888
4e3264d2 1889 return ret;
a0265d28
HX
1890}
1891EXPORT_SYMBOL_GPL(__dev_forward_skb);
1892
44540960
AB
1893/**
1894 * dev_forward_skb - loopback an skb to another netif
1895 *
1896 * @dev: destination network device
1897 * @skb: buffer to forward
1898 *
1899 * return values:
1900 * NET_RX_SUCCESS (no congestion)
6ec82562 1901 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1902 *
1903 * dev_forward_skb can be used for injecting an skb from the
1904 * start_xmit function of one device into the receive queue
1905 * of another device.
1906 *
1907 * The receiving device may be in another namespace, so
1908 * we have to clear all information in the skb that could
1909 * impact namespace isolation.
1910 */
1911int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1912{
a0265d28 1913 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1914}
1915EXPORT_SYMBOL_GPL(dev_forward_skb);
1916
71d9dec2
CG
1917static inline int deliver_skb(struct sk_buff *skb,
1918 struct packet_type *pt_prev,
1919 struct net_device *orig_dev)
1920{
1f8b977a 1921 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1080e512 1922 return -ENOMEM;
63354797 1923 refcount_inc(&skb->users);
71d9dec2
CG
1924 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1925}
1926
7866a621
SN
1927static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1928 struct packet_type **pt,
fbcb2170
JP
1929 struct net_device *orig_dev,
1930 __be16 type,
7866a621
SN
1931 struct list_head *ptype_list)
1932{
1933 struct packet_type *ptype, *pt_prev = *pt;
1934
1935 list_for_each_entry_rcu(ptype, ptype_list, list) {
1936 if (ptype->type != type)
1937 continue;
1938 if (pt_prev)
fbcb2170 1939 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
1940 pt_prev = ptype;
1941 }
1942 *pt = pt_prev;
1943}
1944
c0de08d0
EL
1945static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1946{
a3d744e9 1947 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1948 return false;
1949
1950 if (ptype->id_match)
1951 return ptype->id_match(ptype, skb->sk);
1952 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1953 return true;
1954
1955 return false;
1956}
1957
1da177e4
LT
1958/*
1959 * Support routine. Sends outgoing frames to any network
1960 * taps currently in use.
1961 */
1962
74b20582 1963void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1964{
1965 struct packet_type *ptype;
71d9dec2
CG
1966 struct sk_buff *skb2 = NULL;
1967 struct packet_type *pt_prev = NULL;
7866a621 1968 struct list_head *ptype_list = &ptype_all;
a61bbcf2 1969
1da177e4 1970 rcu_read_lock();
7866a621
SN
1971again:
1972 list_for_each_entry_rcu(ptype, ptype_list, list) {
1da177e4
LT
1973 /* Never send packets back to the socket
1974 * they originated from - MvS (miquels@drinkel.ow.org)
1975 */
7866a621
SN
1976 if (skb_loop_sk(ptype, skb))
1977 continue;
71d9dec2 1978
7866a621
SN
1979 if (pt_prev) {
1980 deliver_skb(skb2, pt_prev, skb->dev);
1981 pt_prev = ptype;
1982 continue;
1983 }
1da177e4 1984
7866a621
SN
1985 /* need to clone skb, done only once */
1986 skb2 = skb_clone(skb, GFP_ATOMIC);
1987 if (!skb2)
1988 goto out_unlock;
70978182 1989
7866a621 1990 net_timestamp_set(skb2);
1da177e4 1991
7866a621
SN
1992 /* skb->nh should be correctly
1993 * set by sender, so that the second statement is
1994 * just protection against buggy protocols.
1995 */
1996 skb_reset_mac_header(skb2);
1997
1998 if (skb_network_header(skb2) < skb2->data ||
1999 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2000 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2001 ntohs(skb2->protocol),
2002 dev->name);
2003 skb_reset_network_header(skb2);
1da177e4 2004 }
7866a621
SN
2005
2006 skb2->transport_header = skb2->network_header;
2007 skb2->pkt_type = PACKET_OUTGOING;
2008 pt_prev = ptype;
2009 }
2010
2011 if (ptype_list == &ptype_all) {
2012 ptype_list = &dev->ptype_all;
2013 goto again;
1da177e4 2014 }
7866a621 2015out_unlock:
581fe0ea
WB
2016 if (pt_prev) {
2017 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2018 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2019 else
2020 kfree_skb(skb2);
2021 }
1da177e4
LT
2022 rcu_read_unlock();
2023}
74b20582 2024EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
1da177e4 2025
2c53040f
BH
2026/**
2027 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
2028 * @dev: Network device
2029 * @txq: number of queues available
2030 *
2031 * If real_num_tx_queues is changed the tc mappings may no longer be
2032 * valid. To resolve this verify the tc mapping remains valid and if
2033 * not NULL the mapping. With no priorities mapping to this
2034 * offset/count pair it will no longer be used. In the worst case TC0
2035 * is invalid nothing can be done so disable priority mappings. If is
2036 * expected that drivers will fix this mapping if they can before
2037 * calling netif_set_real_num_tx_queues.
2038 */
bb134d22 2039static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
2040{
2041 int i;
2042 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2043
2044 /* If TC0 is invalidated disable TC mapping */
2045 if (tc->offset + tc->count > txq) {
7b6cd1ce 2046 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
2047 dev->num_tc = 0;
2048 return;
2049 }
2050
2051 /* Invalidated prio to tc mappings set to TC0 */
2052 for (i = 1; i < TC_BITMASK + 1; i++) {
2053 int q = netdev_get_prio_tc_map(dev, i);
2054
2055 tc = &dev->tc_to_txq[q];
2056 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
2057 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2058 i, q);
4f57c087
JF
2059 netdev_set_prio_tc_map(dev, i, 0);
2060 }
2061 }
2062}
2063
8d059b0f
AD
2064int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2065{
2066 if (dev->num_tc) {
2067 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2068 int i;
2069
2070 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2071 if ((txq - tc->offset) < tc->count)
2072 return i;
2073 }
2074
2075 return -1;
2076 }
2077
2078 return 0;
2079}
8a5f2166 2080EXPORT_SYMBOL(netdev_txq_to_tc);
8d059b0f 2081
537c00de 2082#ifdef CONFIG_XPS
04157469
AN
2083struct static_key xps_needed __read_mostly;
2084EXPORT_SYMBOL(xps_needed);
2085struct static_key xps_rxqs_needed __read_mostly;
2086EXPORT_SYMBOL(xps_rxqs_needed);
537c00de
AD
2087static DEFINE_MUTEX(xps_map_mutex);
2088#define xmap_dereference(P) \
2089 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2090
6234f874
AD
2091static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2092 int tci, u16 index)
537c00de 2093{
10cdc3f3
AD
2094 struct xps_map *map = NULL;
2095 int pos;
537c00de 2096
10cdc3f3 2097 if (dev_maps)
80d19669 2098 map = xmap_dereference(dev_maps->attr_map[tci]);
6234f874
AD
2099 if (!map)
2100 return false;
537c00de 2101
6234f874
AD
2102 for (pos = map->len; pos--;) {
2103 if (map->queues[pos] != index)
2104 continue;
2105
2106 if (map->len > 1) {
2107 map->queues[pos] = map->queues[--map->len];
10cdc3f3 2108 break;
537c00de 2109 }
6234f874 2110
80d19669 2111 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
6234f874
AD
2112 kfree_rcu(map, rcu);
2113 return false;
537c00de
AD
2114 }
2115
6234f874 2116 return true;
10cdc3f3
AD
2117}
2118
6234f874
AD
2119static bool remove_xps_queue_cpu(struct net_device *dev,
2120 struct xps_dev_maps *dev_maps,
2121 int cpu, u16 offset, u16 count)
2122{
184c449f
AD
2123 int num_tc = dev->num_tc ? : 1;
2124 bool active = false;
2125 int tci;
6234f874 2126
184c449f
AD
2127 for (tci = cpu * num_tc; num_tc--; tci++) {
2128 int i, j;
2129
2130 for (i = count, j = offset; i--; j++) {
6358d49a 2131 if (!remove_xps_queue(dev_maps, tci, j))
184c449f
AD
2132 break;
2133 }
2134
2135 active |= i < 0;
6234f874
AD
2136 }
2137
184c449f 2138 return active;
6234f874
AD
2139}
2140
80d19669
AN
2141static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2142 struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2143 u16 offset, u16 count, bool is_rxqs_map)
2144{
2145 bool active = false;
2146 int i, j;
2147
2148 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2149 j < nr_ids;)
2150 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2151 count);
2152 if (!active) {
2153 if (is_rxqs_map) {
2154 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2155 } else {
2156 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2157
2158 for (i = offset + (count - 1); count--; i--)
2159 netdev_queue_numa_node_write(
2160 netdev_get_tx_queue(dev, i),
2161 NUMA_NO_NODE);
2162 }
2163 kfree_rcu(dev_maps, rcu);
2164 }
2165}
2166
6234f874
AD
2167static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2168 u16 count)
10cdc3f3 2169{
80d19669 2170 const unsigned long *possible_mask = NULL;
10cdc3f3 2171 struct xps_dev_maps *dev_maps;
80d19669 2172 unsigned int nr_ids;
10cdc3f3 2173
04157469
AN
2174 if (!static_key_false(&xps_needed))
2175 return;
10cdc3f3 2176
04157469 2177 mutex_lock(&xps_map_mutex);
10cdc3f3 2178
04157469
AN
2179 if (static_key_false(&xps_rxqs_needed)) {
2180 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2181 if (dev_maps) {
2182 nr_ids = dev->num_rx_queues;
2183 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2184 offset, count, true);
2185 }
537c00de
AD
2186 }
2187
80d19669
AN
2188 dev_maps = xmap_dereference(dev->xps_cpus_map);
2189 if (!dev_maps)
2190 goto out_no_maps;
2191
2192 if (num_possible_cpus() > 1)
2193 possible_mask = cpumask_bits(cpu_possible_mask);
2194 nr_ids = nr_cpu_ids;
2195 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2196 false);
024e9679 2197
537c00de 2198out_no_maps:
04157469
AN
2199 if (static_key_enabled(&xps_rxqs_needed))
2200 static_key_slow_dec(&xps_rxqs_needed);
2201
2202 static_key_slow_dec(&xps_needed);
537c00de
AD
2203 mutex_unlock(&xps_map_mutex);
2204}
2205
6234f874
AD
2206static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2207{
2208 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2209}
2210
80d19669
AN
2211static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2212 u16 index, bool is_rxqs_map)
01c5f864
AD
2213{
2214 struct xps_map *new_map;
2215 int alloc_len = XPS_MIN_MAP_ALLOC;
2216 int i, pos;
2217
2218 for (pos = 0; map && pos < map->len; pos++) {
2219 if (map->queues[pos] != index)
2220 continue;
2221 return map;
2222 }
2223
80d19669 2224 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
01c5f864
AD
2225 if (map) {
2226 if (pos < map->alloc_len)
2227 return map;
2228
2229 alloc_len = map->alloc_len * 2;
2230 }
2231
80d19669
AN
2232 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2233 * map
2234 */
2235 if (is_rxqs_map)
2236 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2237 else
2238 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2239 cpu_to_node(attr_index));
01c5f864
AD
2240 if (!new_map)
2241 return NULL;
2242
2243 for (i = 0; i < pos; i++)
2244 new_map->queues[i] = map->queues[i];
2245 new_map->alloc_len = alloc_len;
2246 new_map->len = pos;
2247
2248 return new_map;
2249}
2250
80d19669
AN
2251int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2252 u16 index, bool is_rxqs_map)
537c00de 2253{
80d19669 2254 const unsigned long *online_mask = NULL, *possible_mask = NULL;
01c5f864 2255 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
80d19669 2256 int i, j, tci, numa_node_id = -2;
184c449f 2257 int maps_sz, num_tc = 1, tc = 0;
537c00de 2258 struct xps_map *map, *new_map;
01c5f864 2259 bool active = false;
80d19669 2260 unsigned int nr_ids;
537c00de 2261
184c449f
AD
2262 if (dev->num_tc) {
2263 num_tc = dev->num_tc;
2264 tc = netdev_txq_to_tc(dev, index);
2265 if (tc < 0)
2266 return -EINVAL;
2267 }
2268
537c00de 2269 mutex_lock(&xps_map_mutex);
80d19669
AN
2270 if (is_rxqs_map) {
2271 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2272 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2273 nr_ids = dev->num_rx_queues;
2274 } else {
2275 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2276 if (num_possible_cpus() > 1) {
2277 online_mask = cpumask_bits(cpu_online_mask);
2278 possible_mask = cpumask_bits(cpu_possible_mask);
2279 }
2280 dev_maps = xmap_dereference(dev->xps_cpus_map);
2281 nr_ids = nr_cpu_ids;
2282 }
537c00de 2283
80d19669
AN
2284 if (maps_sz < L1_CACHE_BYTES)
2285 maps_sz = L1_CACHE_BYTES;
537c00de 2286
01c5f864 2287 /* allocate memory for queue storage */
80d19669
AN
2288 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2289 j < nr_ids;) {
01c5f864
AD
2290 if (!new_dev_maps)
2291 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2292 if (!new_dev_maps) {
2293 mutex_unlock(&xps_map_mutex);
01c5f864 2294 return -ENOMEM;
2bb60cb9 2295 }
01c5f864 2296
80d19669
AN
2297 tci = j * num_tc + tc;
2298 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
01c5f864
AD
2299 NULL;
2300
80d19669 2301 map = expand_xps_map(map, j, index, is_rxqs_map);
01c5f864
AD
2302 if (!map)
2303 goto error;
2304
80d19669 2305 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
01c5f864
AD
2306 }
2307
2308 if (!new_dev_maps)
2309 goto out_no_new_maps;
2310
04157469
AN
2311 static_key_slow_inc(&xps_needed);
2312 if (is_rxqs_map)
2313 static_key_slow_inc(&xps_rxqs_needed);
2314
80d19669
AN
2315 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2316 j < nr_ids;) {
184c449f 2317 /* copy maps belonging to foreign traffic classes */
80d19669 2318 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
184c449f 2319 /* fill in the new device map from the old device map */
80d19669
AN
2320 map = xmap_dereference(dev_maps->attr_map[tci]);
2321 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
184c449f
AD
2322 }
2323
2324 /* We need to explicitly update tci as prevous loop
2325 * could break out early if dev_maps is NULL.
2326 */
80d19669 2327 tci = j * num_tc + tc;
184c449f 2328
80d19669
AN
2329 if (netif_attr_test_mask(j, mask, nr_ids) &&
2330 netif_attr_test_online(j, online_mask, nr_ids)) {
2331 /* add tx-queue to CPU/rx-queue maps */
01c5f864
AD
2332 int pos = 0;
2333
80d19669 2334 map = xmap_dereference(new_dev_maps->attr_map[tci]);
01c5f864
AD
2335 while ((pos < map->len) && (map->queues[pos] != index))
2336 pos++;
2337
2338 if (pos == map->len)
2339 map->queues[map->len++] = index;
537c00de 2340#ifdef CONFIG_NUMA
80d19669
AN
2341 if (!is_rxqs_map) {
2342 if (numa_node_id == -2)
2343 numa_node_id = cpu_to_node(j);
2344 else if (numa_node_id != cpu_to_node(j))
2345 numa_node_id = -1;
2346 }
537c00de 2347#endif
01c5f864
AD
2348 } else if (dev_maps) {
2349 /* fill in the new device map from the old device map */
80d19669
AN
2350 map = xmap_dereference(dev_maps->attr_map[tci]);
2351 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
537c00de 2352 }
01c5f864 2353
184c449f
AD
2354 /* copy maps belonging to foreign traffic classes */
2355 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2356 /* fill in the new device map from the old device map */
80d19669
AN
2357 map = xmap_dereference(dev_maps->attr_map[tci]);
2358 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
184c449f 2359 }
537c00de
AD
2360 }
2361
80d19669
AN
2362 if (is_rxqs_map)
2363 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2364 else
2365 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
01c5f864 2366
537c00de 2367 /* Cleanup old maps */
184c449f
AD
2368 if (!dev_maps)
2369 goto out_no_old_maps;
2370
80d19669
AN
2371 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2372 j < nr_ids;) {
2373 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2374 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2375 map = xmap_dereference(dev_maps->attr_map[tci]);
01c5f864
AD
2376 if (map && map != new_map)
2377 kfree_rcu(map, rcu);
2378 }
537c00de
AD
2379 }
2380
184c449f
AD
2381 kfree_rcu(dev_maps, rcu);
2382
2383out_no_old_maps:
01c5f864
AD
2384 dev_maps = new_dev_maps;
2385 active = true;
537c00de 2386
01c5f864 2387out_no_new_maps:
80d19669
AN
2388 if (!is_rxqs_map) {
2389 /* update Tx queue numa node */
2390 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2391 (numa_node_id >= 0) ?
2392 numa_node_id : NUMA_NO_NODE);
2393 }
537c00de 2394
01c5f864
AD
2395 if (!dev_maps)
2396 goto out_no_maps;
2397
80d19669
AN
2398 /* removes tx-queue from unused CPUs/rx-queues */
2399 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2400 j < nr_ids;) {
2401 for (i = tc, tci = j * num_tc; i--; tci++)
184c449f 2402 active |= remove_xps_queue(dev_maps, tci, index);
80d19669
AN
2403 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2404 !netif_attr_test_online(j, online_mask, nr_ids))
184c449f
AD
2405 active |= remove_xps_queue(dev_maps, tci, index);
2406 for (i = num_tc - tc, tci++; --i; tci++)
2407 active |= remove_xps_queue(dev_maps, tci, index);
01c5f864
AD
2408 }
2409
2410 /* free map if not active */
2411 if (!active) {
80d19669
AN
2412 if (is_rxqs_map)
2413 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2414 else
2415 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
01c5f864
AD
2416 kfree_rcu(dev_maps, rcu);
2417 }
2418
2419out_no_maps:
537c00de
AD
2420 mutex_unlock(&xps_map_mutex);
2421
2422 return 0;
2423error:
01c5f864 2424 /* remove any maps that we added */
80d19669
AN
2425 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2426 j < nr_ids;) {
2427 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2428 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
184c449f 2429 map = dev_maps ?
80d19669 2430 xmap_dereference(dev_maps->attr_map[tci]) :
184c449f
AD
2431 NULL;
2432 if (new_map && new_map != map)
2433 kfree(new_map);
2434 }
01c5f864
AD
2435 }
2436
537c00de
AD
2437 mutex_unlock(&xps_map_mutex);
2438
537c00de
AD
2439 kfree(new_dev_maps);
2440 return -ENOMEM;
2441}
80d19669
AN
2442
2443int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2444 u16 index)
2445{
2446 return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2447}
537c00de
AD
2448EXPORT_SYMBOL(netif_set_xps_queue);
2449
2450#endif
9cf1f6a8
AD
2451void netdev_reset_tc(struct net_device *dev)
2452{
6234f874
AD
2453#ifdef CONFIG_XPS
2454 netif_reset_xps_queues_gt(dev, 0);
2455#endif
9cf1f6a8
AD
2456 dev->num_tc = 0;
2457 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2458 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2459}
2460EXPORT_SYMBOL(netdev_reset_tc);
2461
2462int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2463{
2464 if (tc >= dev->num_tc)
2465 return -EINVAL;
2466
6234f874
AD
2467#ifdef CONFIG_XPS
2468 netif_reset_xps_queues(dev, offset, count);
2469#endif
9cf1f6a8
AD
2470 dev->tc_to_txq[tc].count = count;
2471 dev->tc_to_txq[tc].offset = offset;
2472 return 0;
2473}
2474EXPORT_SYMBOL(netdev_set_tc_queue);
2475
2476int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2477{
2478 if (num_tc > TC_MAX_QUEUE)
2479 return -EINVAL;
2480
6234f874
AD
2481#ifdef CONFIG_XPS
2482 netif_reset_xps_queues_gt(dev, 0);
2483#endif
9cf1f6a8
AD
2484 dev->num_tc = num_tc;
2485 return 0;
2486}
2487EXPORT_SYMBOL(netdev_set_num_tc);
2488
f0796d5c
JF
2489/*
2490 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
3a053b1a 2491 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
f0796d5c 2492 */
e6484930 2493int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2494{
ac5b7019 2495 bool disabling;
1d24eb48
TH
2496 int rc;
2497
ac5b7019
JK
2498 disabling = txq < dev->real_num_tx_queues;
2499
e6484930
TH
2500 if (txq < 1 || txq > dev->num_tx_queues)
2501 return -EINVAL;
f0796d5c 2502
5c56580b
BH
2503 if (dev->reg_state == NETREG_REGISTERED ||
2504 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2505 ASSERT_RTNL();
2506
1d24eb48
TH
2507 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2508 txq);
bf264145
TH
2509 if (rc)
2510 return rc;
2511
4f57c087
JF
2512 if (dev->num_tc)
2513 netif_setup_tc(dev, txq);
2514
ac5b7019
JK
2515 dev->real_num_tx_queues = txq;
2516
2517 if (disabling) {
2518 synchronize_net();
e6484930 2519 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2520#ifdef CONFIG_XPS
2521 netif_reset_xps_queues_gt(dev, txq);
2522#endif
2523 }
ac5b7019
JK
2524 } else {
2525 dev->real_num_tx_queues = txq;
f0796d5c 2526 }
e6484930 2527
e6484930 2528 return 0;
f0796d5c
JF
2529}
2530EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2531
a953be53 2532#ifdef CONFIG_SYSFS
62fe0b40
BH
2533/**
2534 * netif_set_real_num_rx_queues - set actual number of RX queues used
2535 * @dev: Network device
2536 * @rxq: Actual number of RX queues
2537 *
2538 * This must be called either with the rtnl_lock held or before
2539 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2540 * negative error code. If called before registration, it always
2541 * succeeds.
62fe0b40
BH
2542 */
2543int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2544{
2545 int rc;
2546
bd25fa7b
TH
2547 if (rxq < 1 || rxq > dev->num_rx_queues)
2548 return -EINVAL;
2549
62fe0b40
BH
2550 if (dev->reg_state == NETREG_REGISTERED) {
2551 ASSERT_RTNL();
2552
62fe0b40
BH
2553 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2554 rxq);
2555 if (rc)
2556 return rc;
62fe0b40
BH
2557 }
2558
2559 dev->real_num_rx_queues = rxq;
2560 return 0;
2561}
2562EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2563#endif
2564
2c53040f
BH
2565/**
2566 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2567 *
2568 * This routine should set an upper limit on the number of RSS queues
2569 * used by default by multiqueue devices.
2570 */
a55b138b 2571int netif_get_num_default_rss_queues(void)
16917b87 2572{
40e4e713
HS
2573 return is_kdump_kernel() ?
2574 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
16917b87
YM
2575}
2576EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2577
3bcb846c 2578static void __netif_reschedule(struct Qdisc *q)
56079431 2579{
def82a1d
JP
2580 struct softnet_data *sd;
2581 unsigned long flags;
56079431 2582
def82a1d 2583 local_irq_save(flags);
903ceff7 2584 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2585 q->next_sched = NULL;
2586 *sd->output_queue_tailp = q;
2587 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2588 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2589 local_irq_restore(flags);
2590}
2591
2592void __netif_schedule(struct Qdisc *q)
2593{
2594 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2595 __netif_reschedule(q);
56079431
DV
2596}
2597EXPORT_SYMBOL(__netif_schedule);
2598
e6247027
ED
2599struct dev_kfree_skb_cb {
2600 enum skb_free_reason reason;
2601};
2602
2603static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2604{
e6247027
ED
2605 return (struct dev_kfree_skb_cb *)skb->cb;
2606}
2607
46e5da40
JF
2608void netif_schedule_queue(struct netdev_queue *txq)
2609{
2610 rcu_read_lock();
2611 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2612 struct Qdisc *q = rcu_dereference(txq->qdisc);
2613
2614 __netif_schedule(q);
2615 }
2616 rcu_read_unlock();
2617}
2618EXPORT_SYMBOL(netif_schedule_queue);
2619
46e5da40
JF
2620void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2621{
2622 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2623 struct Qdisc *q;
2624
2625 rcu_read_lock();
2626 q = rcu_dereference(dev_queue->qdisc);
2627 __netif_schedule(q);
2628 rcu_read_unlock();
2629 }
2630}
2631EXPORT_SYMBOL(netif_tx_wake_queue);
2632
e6247027 2633void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2634{
e6247027 2635 unsigned long flags;
56079431 2636
9899886d
MJ
2637 if (unlikely(!skb))
2638 return;
2639
63354797 2640 if (likely(refcount_read(&skb->users) == 1)) {
e6247027 2641 smp_rmb();
63354797
RE
2642 refcount_set(&skb->users, 0);
2643 } else if (likely(!refcount_dec_and_test(&skb->users))) {
e6247027 2644 return;
bea3348e 2645 }
e6247027
ED
2646 get_kfree_skb_cb(skb)->reason = reason;
2647 local_irq_save(flags);
2648 skb->next = __this_cpu_read(softnet_data.completion_queue);
2649 __this_cpu_write(softnet_data.completion_queue, skb);
2650 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2651 local_irq_restore(flags);
56079431 2652}
e6247027 2653EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2654
e6247027 2655void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2656{
2657 if (in_irq() || irqs_disabled())
e6247027 2658 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2659 else
2660 dev_kfree_skb(skb);
2661}
e6247027 2662EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2663
2664
bea3348e
SH
2665/**
2666 * netif_device_detach - mark device as removed
2667 * @dev: network device
2668 *
2669 * Mark device as removed from system and therefore no longer available.
2670 */
56079431
DV
2671void netif_device_detach(struct net_device *dev)
2672{
2673 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2674 netif_running(dev)) {
d543103a 2675 netif_tx_stop_all_queues(dev);
56079431
DV
2676 }
2677}
2678EXPORT_SYMBOL(netif_device_detach);
2679
bea3348e
SH
2680/**
2681 * netif_device_attach - mark device as attached
2682 * @dev: network device
2683 *
2684 * Mark device as attached from system and restart if needed.
2685 */
56079431
DV
2686void netif_device_attach(struct net_device *dev)
2687{
2688 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2689 netif_running(dev)) {
d543103a 2690 netif_tx_wake_all_queues(dev);
4ec93edb 2691 __netdev_watchdog_up(dev);
56079431
DV
2692 }
2693}
2694EXPORT_SYMBOL(netif_device_attach);
2695
5605c762
JP
2696/*
2697 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2698 * to be used as a distribution range.
2699 */
1b837d48 2700static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb)
5605c762
JP
2701{
2702 u32 hash;
2703 u16 qoffset = 0;
1b837d48 2704 u16 qcount = dev->real_num_tx_queues;
5605c762
JP
2705
2706 if (skb_rx_queue_recorded(skb)) {
2707 hash = skb_get_rx_queue(skb);
1b837d48
AD
2708 while (unlikely(hash >= qcount))
2709 hash -= qcount;
5605c762
JP
2710 return hash;
2711 }
2712
2713 if (dev->num_tc) {
2714 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
f4563a75 2715
5605c762
JP
2716 qoffset = dev->tc_to_txq[tc].offset;
2717 qcount = dev->tc_to_txq[tc].count;
2718 }
2719
2720 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2721}
5605c762 2722
36c92474
BH
2723static void skb_warn_bad_offload(const struct sk_buff *skb)
2724{
84d15ae5 2725 static const netdev_features_t null_features;
36c92474 2726 struct net_device *dev = skb->dev;
88ad4175 2727 const char *name = "";
36c92474 2728
c846ad9b
BG
2729 if (!net_ratelimit())
2730 return;
2731
88ad4175
BM
2732 if (dev) {
2733 if (dev->dev.parent)
2734 name = dev_driver_string(dev->dev.parent);
2735 else
2736 name = netdev_name(dev);
2737 }
36c92474
BH
2738 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2739 "gso_type=%d ip_summed=%d\n",
88ad4175 2740 name, dev ? &dev->features : &null_features,
65e9d2fa 2741 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2742 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2743 skb_shinfo(skb)->gso_type, skb->ip_summed);
2744}
2745
1da177e4
LT
2746/*
2747 * Invalidate hardware checksum when packet is to be mangled, and
2748 * complete checksum manually on outgoing path.
2749 */
84fa7933 2750int skb_checksum_help(struct sk_buff *skb)
1da177e4 2751{
d3bc23e7 2752 __wsum csum;
663ead3b 2753 int ret = 0, offset;
1da177e4 2754
84fa7933 2755 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2756 goto out_set_summed;
2757
2758 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2759 skb_warn_bad_offload(skb);
2760 return -EINVAL;
1da177e4
LT
2761 }
2762
cef401de
ED
2763 /* Before computing a checksum, we should make sure no frag could
2764 * be modified by an external entity : checksum could be wrong.
2765 */
2766 if (skb_has_shared_frag(skb)) {
2767 ret = __skb_linearize(skb);
2768 if (ret)
2769 goto out;
2770 }
2771
55508d60 2772 offset = skb_checksum_start_offset(skb);
a030847e
HX
2773 BUG_ON(offset >= skb_headlen(skb));
2774 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2775
2776 offset += skb->csum_offset;
2777 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2778
2779 if (skb_cloned(skb) &&
2780 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2781 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2782 if (ret)
2783 goto out;
2784 }
2785
4f2e4ad5 2786 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
a430a43d 2787out_set_summed:
1da177e4 2788 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2789out:
1da177e4
LT
2790 return ret;
2791}
d1b19dff 2792EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2793
b72b5bf6
DC
2794int skb_crc32c_csum_help(struct sk_buff *skb)
2795{
2796 __le32 crc32c_csum;
2797 int ret = 0, offset, start;
2798
2799 if (skb->ip_summed != CHECKSUM_PARTIAL)
2800 goto out;
2801
2802 if (unlikely(skb_is_gso(skb)))
2803 goto out;
2804
2805 /* Before computing a checksum, we should make sure no frag could
2806 * be modified by an external entity : checksum could be wrong.
2807 */
2808 if (unlikely(skb_has_shared_frag(skb))) {
2809 ret = __skb_linearize(skb);
2810 if (ret)
2811 goto out;
2812 }
2813 start = skb_checksum_start_offset(skb);
2814 offset = start + offsetof(struct sctphdr, checksum);
2815 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2816 ret = -EINVAL;
2817 goto out;
2818 }
2819 if (skb_cloned(skb) &&
2820 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2821 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2822 if (ret)
2823 goto out;
2824 }
2825 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2826 skb->len - start, ~(__u32)0,
2827 crc32c_csum_stub));
2828 *(__le32 *)(skb->data + offset) = crc32c_csum;
2829 skb->ip_summed = CHECKSUM_NONE;
dba00306 2830 skb->csum_not_inet = 0;
b72b5bf6
DC
2831out:
2832 return ret;
2833}
2834
53d6471c 2835__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2836{
252e3346 2837 __be16 type = skb->protocol;
f6a78bfc 2838
19acc327
PS
2839 /* Tunnel gso handlers can set protocol to ethernet. */
2840 if (type == htons(ETH_P_TEB)) {
2841 struct ethhdr *eth;
2842
2843 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2844 return 0;
2845
1dfe82eb 2846 eth = (struct ethhdr *)skb->data;
19acc327
PS
2847 type = eth->h_proto;
2848 }
2849
d4bcef3f 2850 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
2851}
2852
2853/**
2854 * skb_mac_gso_segment - mac layer segmentation handler.
2855 * @skb: buffer to segment
2856 * @features: features for the output path (see dev->features)
2857 */
2858struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2859 netdev_features_t features)
2860{
2861 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2862 struct packet_offload *ptype;
53d6471c
VY
2863 int vlan_depth = skb->mac_len;
2864 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2865
2866 if (unlikely(!type))
2867 return ERR_PTR(-EINVAL);
2868
53d6471c 2869 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2870
2871 rcu_read_lock();
22061d80 2872 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2873 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2874 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2875 break;
2876 }
2877 }
2878 rcu_read_unlock();
2879
98e399f8 2880 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2881
f6a78bfc
HX
2882 return segs;
2883}
05e8ef4a
PS
2884EXPORT_SYMBOL(skb_mac_gso_segment);
2885
2886
2887/* openvswitch calls this on rx path, so we need a different check.
2888 */
2889static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2890{
2891 if (tx_path)
0c19f846
WB
2892 return skb->ip_summed != CHECKSUM_PARTIAL &&
2893 skb->ip_summed != CHECKSUM_UNNECESSARY;
6e7bc478
ED
2894
2895 return skb->ip_summed == CHECKSUM_NONE;
05e8ef4a
PS
2896}
2897
2898/**
2899 * __skb_gso_segment - Perform segmentation on skb.
2900 * @skb: buffer to segment
2901 * @features: features for the output path (see dev->features)
2902 * @tx_path: whether it is called in TX path
2903 *
2904 * This function segments the given skb and returns a list of segments.
2905 *
2906 * It may return NULL if the skb requires no segmentation. This is
2907 * only possible when GSO is used for verifying header integrity.
9207f9d4
KK
2908 *
2909 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
05e8ef4a
PS
2910 */
2911struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2912 netdev_features_t features, bool tx_path)
2913{
b2504a5d
ED
2914 struct sk_buff *segs;
2915
05e8ef4a
PS
2916 if (unlikely(skb_needs_check(skb, tx_path))) {
2917 int err;
2918
b2504a5d 2919 /* We're going to init ->check field in TCP or UDP header */
a40e0a66 2920 err = skb_cow_head(skb, 0);
2921 if (err < 0)
05e8ef4a
PS
2922 return ERR_PTR(err);
2923 }
2924
802ab55a
AD
2925 /* Only report GSO partial support if it will enable us to
2926 * support segmentation on this frame without needing additional
2927 * work.
2928 */
2929 if (features & NETIF_F_GSO_PARTIAL) {
2930 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2931 struct net_device *dev = skb->dev;
2932
2933 partial_features |= dev->features & dev->gso_partial_features;
2934 if (!skb_gso_ok(skb, features | partial_features))
2935 features &= ~NETIF_F_GSO_PARTIAL;
2936 }
2937
9207f9d4
KK
2938 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2939 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2940
68c33163 2941 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2942 SKB_GSO_CB(skb)->encap_level = 0;
2943
05e8ef4a
PS
2944 skb_reset_mac_header(skb);
2945 skb_reset_mac_len(skb);
2946
b2504a5d
ED
2947 segs = skb_mac_gso_segment(skb, features);
2948
8d74e9f8 2949 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
b2504a5d
ED
2950 skb_warn_bad_offload(skb);
2951
2952 return segs;
05e8ef4a 2953}
12b0004d 2954EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2955
fb286bb2
HX
2956/* Take action when hardware reception checksum errors are detected. */
2957#ifdef CONFIG_BUG
2958void netdev_rx_csum_fault(struct net_device *dev)
2959{
2960 if (net_ratelimit()) {
7b6cd1ce 2961 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2962 dump_stack();
2963 }
2964}
2965EXPORT_SYMBOL(netdev_rx_csum_fault);
2966#endif
2967
ab74cfeb 2968/* XXX: check that highmem exists at all on the given machine. */
c1e756bf 2969static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2970{
3d3a8533 2971#ifdef CONFIG_HIGHMEM
1da177e4 2972 int i;
f4563a75 2973
5acbbd42 2974 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2975 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2976 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
f4563a75 2977
ea2ab693 2978 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2979 return 1;
ea2ab693 2980 }
5acbbd42 2981 }
3d3a8533 2982#endif
1da177e4
LT
2983 return 0;
2984}
1da177e4 2985
3b392ddb
SH
2986/* If MPLS offload request, verify we are testing hardware MPLS features
2987 * instead of standard features for the netdev.
2988 */
d0edc7bf 2989#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2990static netdev_features_t net_mpls_features(struct sk_buff *skb,
2991 netdev_features_t features,
2992 __be16 type)
2993{
25cd9ba0 2994 if (eth_p_mpls(type))
3b392ddb
SH
2995 features &= skb->dev->mpls_features;
2996
2997 return features;
2998}
2999#else
3000static netdev_features_t net_mpls_features(struct sk_buff *skb,
3001 netdev_features_t features,
3002 __be16 type)
3003{
3004 return features;
3005}
3006#endif
3007
c8f44aff 3008static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 3009 netdev_features_t features)
f01a5236 3010{
53d6471c 3011 int tmp;
3b392ddb
SH
3012 __be16 type;
3013
3014 type = skb_network_protocol(skb, &tmp);
3015 features = net_mpls_features(skb, features, type);
53d6471c 3016
c0d680e5 3017 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 3018 !can_checksum_protocol(features, type)) {
996e8021 3019 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
f01a5236 3020 }
7be2c82c
ED
3021 if (illegal_highdma(skb->dev, skb))
3022 features &= ~NETIF_F_SG;
f01a5236
JG
3023
3024 return features;
3025}
3026
e38f3025
TM
3027netdev_features_t passthru_features_check(struct sk_buff *skb,
3028 struct net_device *dev,
3029 netdev_features_t features)
3030{
3031 return features;
3032}
3033EXPORT_SYMBOL(passthru_features_check);
3034
7ce23672 3035static netdev_features_t dflt_features_check(struct sk_buff *skb,
8cb65d00
TM
3036 struct net_device *dev,
3037 netdev_features_t features)
3038{
3039 return vlan_features_check(skb, features);
3040}
3041
cbc53e08
AD
3042static netdev_features_t gso_features_check(const struct sk_buff *skb,
3043 struct net_device *dev,
3044 netdev_features_t features)
3045{
3046 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3047
3048 if (gso_segs > dev->gso_max_segs)
3049 return features & ~NETIF_F_GSO_MASK;
3050
802ab55a
AD
3051 /* Support for GSO partial features requires software
3052 * intervention before we can actually process the packets
3053 * so we need to strip support for any partial features now
3054 * and we can pull them back in after we have partially
3055 * segmented the frame.
3056 */
3057 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3058 features &= ~dev->gso_partial_features;
3059
3060 /* Make sure to clear the IPv4 ID mangling feature if the
3061 * IPv4 header has the potential to be fragmented.
cbc53e08
AD
3062 */
3063 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3064 struct iphdr *iph = skb->encapsulation ?
3065 inner_ip_hdr(skb) : ip_hdr(skb);
3066
3067 if (!(iph->frag_off & htons(IP_DF)))
3068 features &= ~NETIF_F_TSO_MANGLEID;
3069 }
3070
3071 return features;
3072}
3073
c1e756bf 3074netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 3075{
5f35227e 3076 struct net_device *dev = skb->dev;
fcbeb976 3077 netdev_features_t features = dev->features;
58e998c6 3078
cbc53e08
AD
3079 if (skb_is_gso(skb))
3080 features = gso_features_check(skb, dev, features);
30b678d8 3081
5f35227e
JG
3082 /* If encapsulation offload request, verify we are testing
3083 * hardware encapsulation features instead of standard
3084 * features for the netdev
3085 */
3086 if (skb->encapsulation)
3087 features &= dev->hw_enc_features;
3088
f5a7fb88
TM
3089 if (skb_vlan_tagged(skb))
3090 features = netdev_intersect_features(features,
3091 dev->vlan_features |
3092 NETIF_F_HW_VLAN_CTAG_TX |
3093 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 3094
5f35227e
JG
3095 if (dev->netdev_ops->ndo_features_check)
3096 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3097 features);
8cb65d00
TM
3098 else
3099 features &= dflt_features_check(skb, dev, features);
5f35227e 3100
c1e756bf 3101 return harmonize_features(skb, features);
58e998c6 3102}
c1e756bf 3103EXPORT_SYMBOL(netif_skb_features);
58e998c6 3104
2ea25513 3105static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 3106 struct netdev_queue *txq, bool more)
f6a78bfc 3107{
2ea25513
DM
3108 unsigned int len;
3109 int rc;
00829823 3110
7866a621 3111 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2ea25513 3112 dev_queue_xmit_nit(skb, dev);
fc741216 3113
2ea25513
DM
3114 len = skb->len;
3115 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 3116 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 3117 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 3118
2ea25513
DM
3119 return rc;
3120}
7b9c6090 3121
8dcda22a
DM
3122struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3123 struct netdev_queue *txq, int *ret)
7f2e870f
DM
3124{
3125 struct sk_buff *skb = first;
3126 int rc = NETDEV_TX_OK;
7b9c6090 3127
7f2e870f
DM
3128 while (skb) {
3129 struct sk_buff *next = skb->next;
fc70fb64 3130
7f2e870f 3131 skb->next = NULL;
95f6b3dd 3132 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
3133 if (unlikely(!dev_xmit_complete(rc))) {
3134 skb->next = next;
3135 goto out;
3136 }
6afff0ca 3137
7f2e870f
DM
3138 skb = next;
3139 if (netif_xmit_stopped(txq) && skb) {
3140 rc = NETDEV_TX_BUSY;
3141 break;
9ccb8975 3142 }
7f2e870f 3143 }
9ccb8975 3144
7f2e870f
DM
3145out:
3146 *ret = rc;
3147 return skb;
3148}
b40863c6 3149
1ff0dc94
ED
3150static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3151 netdev_features_t features)
f6a78bfc 3152{
df8a39de 3153 if (skb_vlan_tag_present(skb) &&
5968250c
JP
3154 !vlan_hw_offload_capable(features, skb->vlan_proto))
3155 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
3156 return skb;
3157}
f6a78bfc 3158
43c26a1a
DC
3159int skb_csum_hwoffload_help(struct sk_buff *skb,
3160 const netdev_features_t features)
3161{
3162 if (unlikely(skb->csum_not_inet))
3163 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3164 skb_crc32c_csum_help(skb);
3165
3166 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3167}
3168EXPORT_SYMBOL(skb_csum_hwoffload_help);
3169
f53c7239 3170static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
eae3f88e
DM
3171{
3172 netdev_features_t features;
f6a78bfc 3173
eae3f88e
DM
3174 features = netif_skb_features(skb);
3175 skb = validate_xmit_vlan(skb, features);
3176 if (unlikely(!skb))
3177 goto out_null;
7b9c6090 3178
ebf4e808
IL
3179 skb = sk_validate_xmit_skb(skb, dev);
3180 if (unlikely(!skb))
3181 goto out_null;
3182
8b86a61d 3183 if (netif_needs_gso(skb, features)) {
ce93718f
DM
3184 struct sk_buff *segs;
3185
3186 segs = skb_gso_segment(skb, features);
cecda693 3187 if (IS_ERR(segs)) {
af6dabc9 3188 goto out_kfree_skb;
cecda693
JW
3189 } else if (segs) {
3190 consume_skb(skb);
3191 skb = segs;
f6a78bfc 3192 }
eae3f88e
DM
3193 } else {
3194 if (skb_needs_linearize(skb, features) &&
3195 __skb_linearize(skb))
3196 goto out_kfree_skb;
4ec93edb 3197
eae3f88e
DM
3198 /* If packet is not checksummed and device does not
3199 * support checksumming for this protocol, complete
3200 * checksumming here.
3201 */
3202 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3203 if (skb->encapsulation)
3204 skb_set_inner_transport_header(skb,
3205 skb_checksum_start_offset(skb));
3206 else
3207 skb_set_transport_header(skb,
3208 skb_checksum_start_offset(skb));
43c26a1a 3209 if (skb_csum_hwoffload_help(skb, features))
eae3f88e 3210 goto out_kfree_skb;
7b9c6090 3211 }
0c772159 3212 }
7b9c6090 3213
f53c7239 3214 skb = validate_xmit_xfrm(skb, features, again);
3dca3f38 3215
eae3f88e 3216 return skb;
fc70fb64 3217
f6a78bfc
HX
3218out_kfree_skb:
3219 kfree_skb(skb);
eae3f88e 3220out_null:
d21fd63e 3221 atomic_long_inc(&dev->tx_dropped);
eae3f88e
DM
3222 return NULL;
3223}
6afff0ca 3224
f53c7239 3225struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
55a93b3e
ED
3226{
3227 struct sk_buff *next, *head = NULL, *tail;
3228
bec3cfdc 3229 for (; skb != NULL; skb = next) {
55a93b3e
ED
3230 next = skb->next;
3231 skb->next = NULL;
bec3cfdc
ED
3232
3233 /* in case skb wont be segmented, point to itself */
3234 skb->prev = skb;
3235
f53c7239 3236 skb = validate_xmit_skb(skb, dev, again);
bec3cfdc
ED
3237 if (!skb)
3238 continue;
55a93b3e 3239
bec3cfdc
ED
3240 if (!head)
3241 head = skb;
3242 else
3243 tail->next = skb;
3244 /* If skb was segmented, skb->prev points to
3245 * the last segment. If not, it still contains skb.
3246 */
3247 tail = skb->prev;
55a93b3e
ED
3248 }
3249 return head;
f6a78bfc 3250}
104ba78c 3251EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
f6a78bfc 3252
1def9238
ED
3253static void qdisc_pkt_len_init(struct sk_buff *skb)
3254{
3255 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3256
3257 qdisc_skb_cb(skb)->pkt_len = skb->len;
3258
3259 /* To get more precise estimation of bytes sent on wire,
3260 * we add to pkt_len the headers size of all segments
3261 */
3262 if (shinfo->gso_size) {
757b8b1d 3263 unsigned int hdr_len;
15e5a030 3264 u16 gso_segs = shinfo->gso_segs;
1def9238 3265
757b8b1d
ED
3266 /* mac layer + network layer */
3267 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3268
3269 /* + transport layer */
7c68d1a6
ED
3270 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3271 const struct tcphdr *th;
3272 struct tcphdr _tcphdr;
3273
3274 th = skb_header_pointer(skb, skb_transport_offset(skb),
3275 sizeof(_tcphdr), &_tcphdr);
3276 if (likely(th))
3277 hdr_len += __tcp_hdrlen(th);
3278 } else {
3279 struct udphdr _udphdr;
3280
3281 if (skb_header_pointer(skb, skb_transport_offset(skb),
3282 sizeof(_udphdr), &_udphdr))
3283 hdr_len += sizeof(struct udphdr);
3284 }
15e5a030
JW
3285
3286 if (shinfo->gso_type & SKB_GSO_DODGY)
3287 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3288 shinfo->gso_size);
3289
3290 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
3291 }
3292}
3293
bbd8a0d3
KK
3294static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3295 struct net_device *dev,
3296 struct netdev_queue *txq)
3297{
3298 spinlock_t *root_lock = qdisc_lock(q);
520ac30f 3299 struct sk_buff *to_free = NULL;
a2da570d 3300 bool contended;
bbd8a0d3
KK
3301 int rc;
3302
a2da570d 3303 qdisc_calculate_pkt_len(skb, q);
6b3ba914
JF
3304
3305 if (q->flags & TCQ_F_NOLOCK) {
3306 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3307 __qdisc_drop(skb, &to_free);
3308 rc = NET_XMIT_DROP;
3309 } else {
3310 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
32f7b44d 3311 qdisc_run(q);
6b3ba914
JF
3312 }
3313
3314 if (unlikely(to_free))
3315 kfree_skb_list(to_free);
3316 return rc;
3317 }
3318
79640a4c
ED
3319 /*
3320 * Heuristic to force contended enqueues to serialize on a
3321 * separate lock before trying to get qdisc main lock.
f9eb8aea 3322 * This permits qdisc->running owner to get the lock more
9bf2b8c2 3323 * often and dequeue packets faster.
79640a4c 3324 */
a2da570d 3325 contended = qdisc_is_running(q);
79640a4c
ED
3326 if (unlikely(contended))
3327 spin_lock(&q->busylock);
3328
bbd8a0d3
KK
3329 spin_lock(root_lock);
3330 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
520ac30f 3331 __qdisc_drop(skb, &to_free);
bbd8a0d3
KK
3332 rc = NET_XMIT_DROP;
3333 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 3334 qdisc_run_begin(q)) {
bbd8a0d3
KK
3335 /*
3336 * This is a work-conserving queue; there are no old skbs
3337 * waiting to be sent out; and the qdisc is not running -
3338 * xmit the skb directly.
3339 */
bfe0d029 3340
bfe0d029
ED
3341 qdisc_bstats_update(q, skb);
3342
55a93b3e 3343 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
3344 if (unlikely(contended)) {
3345 spin_unlock(&q->busylock);
3346 contended = false;
3347 }
bbd8a0d3 3348 __qdisc_run(q);
6c148184 3349 }
bbd8a0d3 3350
6c148184 3351 qdisc_run_end(q);
bbd8a0d3
KK
3352 rc = NET_XMIT_SUCCESS;
3353 } else {
520ac30f 3354 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
79640a4c
ED
3355 if (qdisc_run_begin(q)) {
3356 if (unlikely(contended)) {
3357 spin_unlock(&q->busylock);
3358 contended = false;
3359 }
3360 __qdisc_run(q);
6c148184 3361 qdisc_run_end(q);
79640a4c 3362 }
bbd8a0d3
KK
3363 }
3364 spin_unlock(root_lock);
520ac30f
ED
3365 if (unlikely(to_free))
3366 kfree_skb_list(to_free);
79640a4c
ED
3367 if (unlikely(contended))
3368 spin_unlock(&q->busylock);
bbd8a0d3
KK
3369 return rc;
3370}
3371
86f8515f 3372#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
3373static void skb_update_prio(struct sk_buff *skb)
3374{
4dcb31d4
ED
3375 const struct netprio_map *map;
3376 const struct sock *sk;
3377 unsigned int prioidx;
5bc1421e 3378
4dcb31d4
ED
3379 if (skb->priority)
3380 return;
3381 map = rcu_dereference_bh(skb->dev->priomap);
3382 if (!map)
3383 return;
3384 sk = skb_to_full_sk(skb);
3385 if (!sk)
3386 return;
91c68ce2 3387
4dcb31d4
ED
3388 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3389
3390 if (prioidx < map->priomap_len)
3391 skb->priority = map->priomap[prioidx];
5bc1421e
NH
3392}
3393#else
3394#define skb_update_prio(skb)
3395#endif
3396
f60e5990 3397DEFINE_PER_CPU(int, xmit_recursion);
3398EXPORT_SYMBOL(xmit_recursion);
3399
95603e22
MM
3400/**
3401 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
3402 * @net: network namespace this loopback is happening in
3403 * @sk: sk needed to be a netfilter okfn
95603e22
MM
3404 * @skb: buffer to transmit
3405 */
0c4b51f0 3406int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
3407{
3408 skb_reset_mac_header(skb);
3409 __skb_pull(skb, skb_network_offset(skb));
3410 skb->pkt_type = PACKET_LOOPBACK;
3411 skb->ip_summed = CHECKSUM_UNNECESSARY;
3412 WARN_ON(!skb_dst(skb));
3413 skb_dst_force(skb);
3414 netif_rx_ni(skb);
3415 return 0;
3416}
3417EXPORT_SYMBOL(dev_loopback_xmit);
3418
1f211a1b
DB
3419#ifdef CONFIG_NET_EGRESS
3420static struct sk_buff *
3421sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3422{
46209401 3423 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
1f211a1b
DB
3424 struct tcf_result cl_res;
3425
46209401 3426 if (!miniq)
1f211a1b
DB
3427 return skb;
3428
8dc07fdb 3429 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
46209401 3430 mini_qdisc_bstats_cpu_update(miniq, skb);
1f211a1b 3431
46209401 3432 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
1f211a1b
DB
3433 case TC_ACT_OK:
3434 case TC_ACT_RECLASSIFY:
3435 skb->tc_index = TC_H_MIN(cl_res.classid);
3436 break;
3437 case TC_ACT_SHOT:
46209401 3438 mini_qdisc_qstats_cpu_drop(miniq);
1f211a1b 3439 *ret = NET_XMIT_DROP;
7e2c3aea
DB
3440 kfree_skb(skb);
3441 return NULL;
1f211a1b
DB
3442 case TC_ACT_STOLEN:
3443 case TC_ACT_QUEUED:
e25ea21f 3444 case TC_ACT_TRAP:
1f211a1b 3445 *ret = NET_XMIT_SUCCESS;
7e2c3aea 3446 consume_skb(skb);
1f211a1b
DB
3447 return NULL;
3448 case TC_ACT_REDIRECT:
3449 /* No need to push/pop skb's mac_header here on egress! */
3450 skb_do_redirect(skb);
3451 *ret = NET_XMIT_SUCCESS;
3452 return NULL;
3453 default:
3454 break;
3455 }
3456
3457 return skb;
3458}
3459#endif /* CONFIG_NET_EGRESS */
3460
fc9bab24
AN
3461#ifdef CONFIG_XPS
3462static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3463 struct xps_dev_maps *dev_maps, unsigned int tci)
3464{
3465 struct xps_map *map;
3466 int queue_index = -1;
3467
3468 if (dev->num_tc) {
3469 tci *= dev->num_tc;
3470 tci += netdev_get_prio_tc_map(dev, skb->priority);
3471 }
3472
3473 map = rcu_dereference(dev_maps->attr_map[tci]);
3474 if (map) {
3475 if (map->len == 1)
3476 queue_index = map->queues[0];
3477 else
3478 queue_index = map->queues[reciprocal_scale(
3479 skb_get_hash(skb), map->len)];
3480 if (unlikely(queue_index >= dev->real_num_tx_queues))
3481 queue_index = -1;
3482 }
3483 return queue_index;
3484}
3485#endif
3486
3487static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
638b2a69
JP
3488{
3489#ifdef CONFIG_XPS
3490 struct xps_dev_maps *dev_maps;
fc9bab24 3491 struct sock *sk = skb->sk;
638b2a69
JP
3492 int queue_index = -1;
3493
04157469
AN
3494 if (!static_key_false(&xps_needed))
3495 return -1;
3496
638b2a69 3497 rcu_read_lock();
fc9bab24
AN
3498 if (!static_key_false(&xps_rxqs_needed))
3499 goto get_cpus_map;
3500
3501 dev_maps = rcu_dereference(dev->xps_rxqs_map);
638b2a69 3502 if (dev_maps) {
fc9bab24 3503 int tci = sk_rx_queue_get(sk);
184c449f 3504
fc9bab24
AN
3505 if (tci >= 0 && tci < dev->num_rx_queues)
3506 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3507 tci);
3508 }
184c449f 3509
fc9bab24
AN
3510get_cpus_map:
3511 if (queue_index < 0) {
3512 dev_maps = rcu_dereference(dev->xps_cpus_map);
3513 if (dev_maps) {
3514 unsigned int tci = skb->sender_cpu - 1;
3515
3516 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3517 tci);
638b2a69
JP
3518 }
3519 }
3520 rcu_read_unlock();
3521
3522 return queue_index;
3523#else
3524 return -1;
3525#endif
3526}
3527
3528static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3529{
3530 struct sock *sk = skb->sk;
3531 int queue_index = sk_tx_queue_get(sk);
3532
3533 if (queue_index < 0 || skb->ooo_okay ||
3534 queue_index >= dev->real_num_tx_queues) {
3535 int new_index = get_xps_queue(dev, skb);
f4563a75 3536
638b2a69
JP
3537 if (new_index < 0)
3538 new_index = skb_tx_hash(dev, skb);
3539
3540 if (queue_index != new_index && sk &&
004a5d01 3541 sk_fullsock(sk) &&
638b2a69
JP
3542 rcu_access_pointer(sk->sk_dst_cache))
3543 sk_tx_queue_set(sk, new_index);
3544
3545 queue_index = new_index;
3546 }
3547
3548 return queue_index;
3549}
3550
3551struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3552 struct sk_buff *skb,
3553 void *accel_priv)
3554{
3555 int queue_index = 0;
3556
3557#ifdef CONFIG_XPS
52bd2d62
ED
3558 u32 sender_cpu = skb->sender_cpu - 1;
3559
3560 if (sender_cpu >= (u32)NR_CPUS)
638b2a69
JP
3561 skb->sender_cpu = raw_smp_processor_id() + 1;
3562#endif
3563
3564 if (dev->real_num_tx_queues != 1) {
3565 const struct net_device_ops *ops = dev->netdev_ops;
f4563a75 3566
638b2a69
JP
3567 if (ops->ndo_select_queue)
3568 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3569 __netdev_pick_tx);
3570 else
3571 queue_index = __netdev_pick_tx(dev, skb);
3572
d584527c 3573 queue_index = netdev_cap_txqueue(dev, queue_index);
638b2a69
JP
3574 }
3575
3576 skb_set_queue_mapping(skb, queue_index);
3577 return netdev_get_tx_queue(dev, queue_index);
3578}
3579
d29f749e 3580/**
9d08dd3d 3581 * __dev_queue_xmit - transmit a buffer
d29f749e 3582 * @skb: buffer to transmit
9d08dd3d 3583 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
3584 *
3585 * Queue a buffer for transmission to a network device. The caller must
3586 * have set the device and priority and built the buffer before calling
3587 * this function. The function can be called from an interrupt.
3588 *
3589 * A negative errno code is returned on a failure. A success does not
3590 * guarantee the frame will be transmitted as it may be dropped due
3591 * to congestion or traffic shaping.
3592 *
3593 * -----------------------------------------------------------------------------------
3594 * I notice this method can also return errors from the queue disciplines,
3595 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3596 * be positive.
3597 *
3598 * Regardless of the return value, the skb is consumed, so it is currently
3599 * difficult to retry a send to this method. (You can bump the ref count
3600 * before sending to hold a reference for retry if you are careful.)
3601 *
3602 * When calling this method, interrupts MUST be enabled. This is because
3603 * the BH enable code must have IRQs enabled so that it will not deadlock.
3604 * --BLG
3605 */
0a59f3a9 3606static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
3607{
3608 struct net_device *dev = skb->dev;
dc2b4847 3609 struct netdev_queue *txq;
1da177e4
LT
3610 struct Qdisc *q;
3611 int rc = -ENOMEM;
f53c7239 3612 bool again = false;
1da177e4 3613
6d1ccff6
ED
3614 skb_reset_mac_header(skb);
3615
e7fd2885
WB
3616 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3617 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3618
4ec93edb
YH
3619 /* Disable soft irqs for various locks below. Also
3620 * stops preemption for RCU.
1da177e4 3621 */
4ec93edb 3622 rcu_read_lock_bh();
1da177e4 3623
5bc1421e
NH
3624 skb_update_prio(skb);
3625
1f211a1b
DB
3626 qdisc_pkt_len_init(skb);
3627#ifdef CONFIG_NET_CLS_ACT
8dc07fdb 3628 skb->tc_at_ingress = 0;
1f211a1b 3629# ifdef CONFIG_NET_EGRESS
aabf6772 3630 if (static_branch_unlikely(&egress_needed_key)) {
1f211a1b
DB
3631 skb = sch_handle_egress(skb, &rc, dev);
3632 if (!skb)
3633 goto out;
3634 }
3635# endif
3636#endif
02875878
ED
3637 /* If device/qdisc don't need skb->dst, release it right now while
3638 * its hot in this cpu cache.
3639 */
3640 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3641 skb_dst_drop(skb);
3642 else
3643 skb_dst_force(skb);
3644
f663dd9a 3645 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 3646 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3647
cf66ba58 3648 trace_net_dev_queue(skb);
1da177e4 3649 if (q->enqueue) {
bbd8a0d3 3650 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3651 goto out;
1da177e4
LT
3652 }
3653
3654 /* The device has no queue. Common case for software devices:
eb13da1a 3655 * loopback, all the sorts of tunnels...
1da177e4 3656
eb13da1a 3657 * Really, it is unlikely that netif_tx_lock protection is necessary
3658 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3659 * counters.)
3660 * However, it is possible, that they rely on protection
3661 * made by us here.
1da177e4 3662
eb13da1a 3663 * Check this and shot the lock. It is not prone from deadlocks.
3664 *Either shot noqueue qdisc, it is even simpler 8)
1da177e4
LT
3665 */
3666 if (dev->flags & IFF_UP) {
3667 int cpu = smp_processor_id(); /* ok because BHs are off */
3668
c773e847 3669 if (txq->xmit_lock_owner != cpu) {
a70b506e
DB
3670 if (unlikely(__this_cpu_read(xmit_recursion) >
3671 XMIT_RECURSION_LIMIT))
745e20f1
ED
3672 goto recursion_alert;
3673
f53c7239 3674 skb = validate_xmit_skb(skb, dev, &again);
1f59533f 3675 if (!skb)
d21fd63e 3676 goto out;
1f59533f 3677
c773e847 3678 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3679
73466498 3680 if (!netif_xmit_stopped(txq)) {
745e20f1 3681 __this_cpu_inc(xmit_recursion);
ce93718f 3682 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 3683 __this_cpu_dec(xmit_recursion);
572a9d7b 3684 if (dev_xmit_complete(rc)) {
c773e847 3685 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3686 goto out;
3687 }
3688 }
c773e847 3689 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3690 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3691 dev->name);
1da177e4
LT
3692 } else {
3693 /* Recursion is detected! It is possible,
745e20f1
ED
3694 * unfortunately
3695 */
3696recursion_alert:
e87cc472
JP
3697 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3698 dev->name);
1da177e4
LT
3699 }
3700 }
3701
3702 rc = -ENETDOWN;
d4828d85 3703 rcu_read_unlock_bh();
1da177e4 3704
015f0688 3705 atomic_long_inc(&dev->tx_dropped);
1f59533f 3706 kfree_skb_list(skb);
1da177e4
LT
3707 return rc;
3708out:
d4828d85 3709 rcu_read_unlock_bh();
1da177e4
LT
3710 return rc;
3711}
f663dd9a 3712
2b4aa3ce 3713int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
3714{
3715 return __dev_queue_xmit(skb, NULL);
3716}
2b4aa3ce 3717EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 3718
f663dd9a
JW
3719int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3720{
3721 return __dev_queue_xmit(skb, accel_priv);
3722}
3723EXPORT_SYMBOL(dev_queue_xmit_accel);
3724
865b03f2
MK
3725int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3726{
3727 struct net_device *dev = skb->dev;
3728 struct sk_buff *orig_skb = skb;
3729 struct netdev_queue *txq;
3730 int ret = NETDEV_TX_BUSY;
3731 bool again = false;
3732
3733 if (unlikely(!netif_running(dev) ||
3734 !netif_carrier_ok(dev)))
3735 goto drop;
3736
3737 skb = validate_xmit_skb_list(skb, dev, &again);
3738 if (skb != orig_skb)
3739 goto drop;
3740
3741 skb_set_queue_mapping(skb, queue_id);
3742 txq = skb_get_tx_queue(dev, skb);
3743
3744 local_bh_disable();
3745
3746 HARD_TX_LOCK(dev, txq, smp_processor_id());
3747 if (!netif_xmit_frozen_or_drv_stopped(txq))
3748 ret = netdev_start_xmit(skb, dev, txq, false);
3749 HARD_TX_UNLOCK(dev, txq);
3750
3751 local_bh_enable();
3752
3753 if (!dev_xmit_complete(ret))
3754 kfree_skb(skb);
3755
3756 return ret;
3757drop:
3758 atomic_long_inc(&dev->tx_dropped);
3759 kfree_skb_list(skb);
3760 return NET_XMIT_DROP;
3761}
3762EXPORT_SYMBOL(dev_direct_xmit);
1da177e4 3763
eb13da1a 3764/*************************************************************************
3765 * Receiver routines
3766 *************************************************************************/
1da177e4 3767
6b2bedc3 3768int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3769EXPORT_SYMBOL(netdev_max_backlog);
3770
3b098e2d 3771int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3 3772int netdev_budget __read_mostly = 300;
7acf8a1e 3773unsigned int __read_mostly netdev_budget_usecs = 2000;
3d48b53f
MT
3774int weight_p __read_mostly = 64; /* old backlog weight */
3775int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3776int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3777int dev_rx_weight __read_mostly = 64;
3778int dev_tx_weight __read_mostly = 64;
1da177e4 3779
eecfd7c4
ED
3780/* Called with irq disabled */
3781static inline void ____napi_schedule(struct softnet_data *sd,
3782 struct napi_struct *napi)
3783{
3784 list_add_tail(&napi->poll_list, &sd->poll_list);
3785 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3786}
3787
bfb564e7
KK
3788#ifdef CONFIG_RPS
3789
3790/* One global table that all flow-based protocols share. */
6e3f7faf 3791struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 3792EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
3793u32 rps_cpu_mask __read_mostly;
3794EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 3795
c5905afb 3796struct static_key rps_needed __read_mostly;
3df97ba8 3797EXPORT_SYMBOL(rps_needed);
13bfff25
ED
3798struct static_key rfs_needed __read_mostly;
3799EXPORT_SYMBOL(rfs_needed);
adc9300e 3800
c445477d
BH
3801static struct rps_dev_flow *
3802set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3803 struct rps_dev_flow *rflow, u16 next_cpu)
3804{
a31196b0 3805 if (next_cpu < nr_cpu_ids) {
c445477d
BH
3806#ifdef CONFIG_RFS_ACCEL
3807 struct netdev_rx_queue *rxqueue;
3808 struct rps_dev_flow_table *flow_table;
3809 struct rps_dev_flow *old_rflow;
3810 u32 flow_id;
3811 u16 rxq_index;
3812 int rc;
3813
3814 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3815 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3816 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3817 goto out;
3818 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3819 if (rxq_index == skb_get_rx_queue(skb))
3820 goto out;
3821
3822 rxqueue = dev->_rx + rxq_index;
3823 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3824 if (!flow_table)
3825 goto out;
61b905da 3826 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3827 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3828 rxq_index, flow_id);
3829 if (rc < 0)
3830 goto out;
3831 old_rflow = rflow;
3832 rflow = &flow_table->flows[flow_id];
c445477d
BH
3833 rflow->filter = rc;
3834 if (old_rflow->filter == rflow->filter)
3835 old_rflow->filter = RPS_NO_FILTER;
3836 out:
3837#endif
3838 rflow->last_qtail =
09994d1b 3839 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3840 }
3841
09994d1b 3842 rflow->cpu = next_cpu;
c445477d
BH
3843 return rflow;
3844}
3845
bfb564e7
KK
3846/*
3847 * get_rps_cpu is called from netif_receive_skb and returns the target
3848 * CPU from the RPS map of the receiving queue for a given skb.
3849 * rcu_read_lock must be held on entry.
3850 */
3851static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3852 struct rps_dev_flow **rflowp)
3853{
567e4b79
ED
3854 const struct rps_sock_flow_table *sock_flow_table;
3855 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 3856 struct rps_dev_flow_table *flow_table;
567e4b79 3857 struct rps_map *map;
bfb564e7 3858 int cpu = -1;
567e4b79 3859 u32 tcpu;
61b905da 3860 u32 hash;
bfb564e7
KK
3861
3862 if (skb_rx_queue_recorded(skb)) {
3863 u16 index = skb_get_rx_queue(skb);
567e4b79 3864
62fe0b40
BH
3865 if (unlikely(index >= dev->real_num_rx_queues)) {
3866 WARN_ONCE(dev->real_num_rx_queues > 1,
3867 "%s received packet on queue %u, but number "
3868 "of RX queues is %u\n",
3869 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3870 goto done;
3871 }
567e4b79
ED
3872 rxqueue += index;
3873 }
bfb564e7 3874
567e4b79
ED
3875 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3876
3877 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 3878 map = rcu_dereference(rxqueue->rps_map);
567e4b79 3879 if (!flow_table && !map)
bfb564e7
KK
3880 goto done;
3881
2d47b459 3882 skb_reset_network_header(skb);
61b905da
TH
3883 hash = skb_get_hash(skb);
3884 if (!hash)
bfb564e7
KK
3885 goto done;
3886
fec5e652
TH
3887 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3888 if (flow_table && sock_flow_table) {
fec5e652 3889 struct rps_dev_flow *rflow;
567e4b79
ED
3890 u32 next_cpu;
3891 u32 ident;
3892
3893 /* First check into global flow table if there is a match */
3894 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3895 if ((ident ^ hash) & ~rps_cpu_mask)
3896 goto try_rps;
fec5e652 3897
567e4b79
ED
3898 next_cpu = ident & rps_cpu_mask;
3899
3900 /* OK, now we know there is a match,
3901 * we can look at the local (per receive queue) flow table
3902 */
61b905da 3903 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3904 tcpu = rflow->cpu;
3905
fec5e652
TH
3906 /*
3907 * If the desired CPU (where last recvmsg was done) is
3908 * different from current CPU (one in the rx-queue flow
3909 * table entry), switch if one of the following holds:
a31196b0 3910 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
3911 * - Current CPU is offline.
3912 * - The current CPU's queue tail has advanced beyond the
3913 * last packet that was enqueued using this table entry.
3914 * This guarantees that all previous packets for the flow
3915 * have been dequeued, thus preserving in order delivery.
3916 */
3917 if (unlikely(tcpu != next_cpu) &&
a31196b0 3918 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 3919 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3920 rflow->last_qtail)) >= 0)) {
3921 tcpu = next_cpu;
c445477d 3922 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3923 }
c445477d 3924
a31196b0 3925 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
3926 *rflowp = rflow;
3927 cpu = tcpu;
3928 goto done;
3929 }
3930 }
3931
567e4b79
ED
3932try_rps:
3933
0a9627f2 3934 if (map) {
8fc54f68 3935 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3936 if (cpu_online(tcpu)) {
3937 cpu = tcpu;
3938 goto done;
3939 }
3940 }
3941
3942done:
0a9627f2
TH
3943 return cpu;
3944}
3945
c445477d
BH
3946#ifdef CONFIG_RFS_ACCEL
3947
3948/**
3949 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3950 * @dev: Device on which the filter was set
3951 * @rxq_index: RX queue index
3952 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3953 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3954 *
3955 * Drivers that implement ndo_rx_flow_steer() should periodically call
3956 * this function for each installed filter and remove the filters for
3957 * which it returns %true.
3958 */
3959bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3960 u32 flow_id, u16 filter_id)
3961{
3962 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3963 struct rps_dev_flow_table *flow_table;
3964 struct rps_dev_flow *rflow;
3965 bool expire = true;
a31196b0 3966 unsigned int cpu;
c445477d
BH
3967
3968 rcu_read_lock();
3969 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3970 if (flow_table && flow_id <= flow_table->mask) {
3971 rflow = &flow_table->flows[flow_id];
6aa7de05 3972 cpu = READ_ONCE(rflow->cpu);
a31196b0 3973 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
3974 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3975 rflow->last_qtail) <
3976 (int)(10 * flow_table->mask)))
3977 expire = false;
3978 }
3979 rcu_read_unlock();
3980 return expire;
3981}
3982EXPORT_SYMBOL(rps_may_expire_flow);
3983
3984#endif /* CONFIG_RFS_ACCEL */
3985
0a9627f2 3986/* Called from hardirq (IPI) context */
e36fa2f7 3987static void rps_trigger_softirq(void *data)
0a9627f2 3988{
e36fa2f7
ED
3989 struct softnet_data *sd = data;
3990
eecfd7c4 3991 ____napi_schedule(sd, &sd->backlog);
dee42870 3992 sd->received_rps++;
0a9627f2 3993}
e36fa2f7 3994
fec5e652 3995#endif /* CONFIG_RPS */
0a9627f2 3996
e36fa2f7
ED
3997/*
3998 * Check if this softnet_data structure is another cpu one
3999 * If yes, queue it to our IPI list and return 1
4000 * If no, return 0
4001 */
4002static int rps_ipi_queued(struct softnet_data *sd)
4003{
4004#ifdef CONFIG_RPS
903ceff7 4005 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
4006
4007 if (sd != mysd) {
4008 sd->rps_ipi_next = mysd->rps_ipi_list;
4009 mysd->rps_ipi_list = sd;
4010
4011 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4012 return 1;
4013 }
4014#endif /* CONFIG_RPS */
4015 return 0;
4016}
4017
99bbc707
WB
4018#ifdef CONFIG_NET_FLOW_LIMIT
4019int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4020#endif
4021
4022static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4023{
4024#ifdef CONFIG_NET_FLOW_LIMIT
4025 struct sd_flow_limit *fl;
4026 struct softnet_data *sd;
4027 unsigned int old_flow, new_flow;
4028
4029 if (qlen < (netdev_max_backlog >> 1))
4030 return false;
4031
903ceff7 4032 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
4033
4034 rcu_read_lock();
4035 fl = rcu_dereference(sd->flow_limit);
4036 if (fl) {
3958afa1 4037 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
4038 old_flow = fl->history[fl->history_head];
4039 fl->history[fl->history_head] = new_flow;
4040
4041 fl->history_head++;
4042 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4043
4044 if (likely(fl->buckets[old_flow]))
4045 fl->buckets[old_flow]--;
4046
4047 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4048 fl->count++;
4049 rcu_read_unlock();
4050 return true;
4051 }
4052 }
4053 rcu_read_unlock();
4054#endif
4055 return false;
4056}
4057
0a9627f2
TH
4058/*
4059 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4060 * queue (may be a remote CPU queue).
4061 */
fec5e652
TH
4062static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4063 unsigned int *qtail)
0a9627f2 4064{
e36fa2f7 4065 struct softnet_data *sd;
0a9627f2 4066 unsigned long flags;
99bbc707 4067 unsigned int qlen;
0a9627f2 4068
e36fa2f7 4069 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
4070
4071 local_irq_save(flags);
0a9627f2 4072
e36fa2f7 4073 rps_lock(sd);
e9e4dd32
JA
4074 if (!netif_running(skb->dev))
4075 goto drop;
99bbc707
WB
4076 qlen = skb_queue_len(&sd->input_pkt_queue);
4077 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 4078 if (qlen) {
0a9627f2 4079enqueue:
e36fa2f7 4080 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 4081 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 4082 rps_unlock(sd);
152102c7 4083 local_irq_restore(flags);
0a9627f2
TH
4084 return NET_RX_SUCCESS;
4085 }
4086
ebda37c2
ED
4087 /* Schedule NAPI for backlog device
4088 * We can use non atomic operation since we own the queue lock
4089 */
4090 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 4091 if (!rps_ipi_queued(sd))
eecfd7c4 4092 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
4093 }
4094 goto enqueue;
4095 }
4096
e9e4dd32 4097drop:
dee42870 4098 sd->dropped++;
e36fa2f7 4099 rps_unlock(sd);
0a9627f2 4100
0a9627f2
TH
4101 local_irq_restore(flags);
4102
caf586e5 4103 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
4104 kfree_skb(skb);
4105 return NET_RX_DROP;
4106}
1da177e4 4107
e817f856
JDB
4108static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4109{
4110 struct net_device *dev = skb->dev;
4111 struct netdev_rx_queue *rxqueue;
4112
4113 rxqueue = dev->_rx;
4114
4115 if (skb_rx_queue_recorded(skb)) {
4116 u16 index = skb_get_rx_queue(skb);
4117
4118 if (unlikely(index >= dev->real_num_rx_queues)) {
4119 WARN_ONCE(dev->real_num_rx_queues > 1,
4120 "%s received packet on queue %u, but number "
4121 "of RX queues is %u\n",
4122 dev->name, index, dev->real_num_rx_queues);
4123
4124 return rxqueue; /* Return first rxqueue */
4125 }
4126 rxqueue += index;
4127 }
4128 return rxqueue;
4129}
4130
d4455169 4131static u32 netif_receive_generic_xdp(struct sk_buff *skb,
02671e23 4132 struct xdp_buff *xdp,
d4455169
JF
4133 struct bpf_prog *xdp_prog)
4134{
e817f856 4135 struct netdev_rx_queue *rxqueue;
198d83bb 4136 void *orig_data, *orig_data_end;
de8f3a83 4137 u32 metalen, act = XDP_DROP;
d4455169
JF
4138 int hlen, off;
4139 u32 mac_len;
4140
4141 /* Reinjected packets coming from act_mirred or similar should
4142 * not get XDP generic processing.
4143 */
4144 if (skb_cloned(skb))
4145 return XDP_PASS;
4146
de8f3a83
DB
4147 /* XDP packets must be linear and must have sufficient headroom
4148 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4149 * native XDP provides, thus we need to do it here as well.
4150 */
4151 if (skb_is_nonlinear(skb) ||
4152 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4153 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4154 int troom = skb->tail + skb->data_len - skb->end;
4155
4156 /* In case we have to go down the path and also linearize,
4157 * then lets do the pskb_expand_head() work just once here.
4158 */
4159 if (pskb_expand_head(skb,
4160 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4161 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4162 goto do_drop;
2d17d8d7 4163 if (skb_linearize(skb))
de8f3a83
DB
4164 goto do_drop;
4165 }
d4455169
JF
4166
4167 /* The XDP program wants to see the packet starting at the MAC
4168 * header.
4169 */
4170 mac_len = skb->data - skb_mac_header(skb);
4171 hlen = skb_headlen(skb) + mac_len;
02671e23
BT
4172 xdp->data = skb->data - mac_len;
4173 xdp->data_meta = xdp->data;
4174 xdp->data_end = xdp->data + hlen;
4175 xdp->data_hard_start = skb->data - skb_headroom(skb);
4176 orig_data_end = xdp->data_end;
4177 orig_data = xdp->data;
d4455169 4178
e817f856 4179 rxqueue = netif_get_rxqueue(skb);
02671e23 4180 xdp->rxq = &rxqueue->xdp_rxq;
e817f856 4181
02671e23 4182 act = bpf_prog_run_xdp(xdp_prog, xdp);
d4455169 4183
02671e23 4184 off = xdp->data - orig_data;
d4455169
JF
4185 if (off > 0)
4186 __skb_pull(skb, off);
4187 else if (off < 0)
4188 __skb_push(skb, -off);
92dd5452 4189 skb->mac_header += off;
d4455169 4190
198d83bb
NS
4191 /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4192 * pckt.
4193 */
02671e23 4194 off = orig_data_end - xdp->data_end;
f7613120 4195 if (off != 0) {
02671e23 4196 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
f7613120 4197 skb->len -= off;
02671e23 4198
f7613120 4199 }
198d83bb 4200
d4455169 4201 switch (act) {
6103aa96 4202 case XDP_REDIRECT:
d4455169
JF
4203 case XDP_TX:
4204 __skb_push(skb, mac_len);
de8f3a83 4205 break;
d4455169 4206 case XDP_PASS:
02671e23 4207 metalen = xdp->data - xdp->data_meta;
de8f3a83
DB
4208 if (metalen)
4209 skb_metadata_set(skb, metalen);
d4455169 4210 break;
d4455169
JF
4211 default:
4212 bpf_warn_invalid_xdp_action(act);
4213 /* fall through */
4214 case XDP_ABORTED:
4215 trace_xdp_exception(skb->dev, xdp_prog, act);
4216 /* fall through */
4217 case XDP_DROP:
4218 do_drop:
4219 kfree_skb(skb);
4220 break;
4221 }
4222
4223 return act;
4224}
4225
4226/* When doing generic XDP we have to bypass the qdisc layer and the
4227 * network taps in order to match in-driver-XDP behavior.
4228 */
7c497478 4229void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
d4455169
JF
4230{
4231 struct net_device *dev = skb->dev;
4232 struct netdev_queue *txq;
4233 bool free_skb = true;
4234 int cpu, rc;
4235
4236 txq = netdev_pick_tx(dev, skb, NULL);
4237 cpu = smp_processor_id();
4238 HARD_TX_LOCK(dev, txq, cpu);
4239 if (!netif_xmit_stopped(txq)) {
4240 rc = netdev_start_xmit(skb, dev, txq, 0);
4241 if (dev_xmit_complete(rc))
4242 free_skb = false;
4243 }
4244 HARD_TX_UNLOCK(dev, txq);
4245 if (free_skb) {
4246 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4247 kfree_skb(skb);
4248 }
4249}
7c497478 4250EXPORT_SYMBOL_GPL(generic_xdp_tx);
d4455169 4251
02786475 4252static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
d4455169 4253
7c497478 4254int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
d4455169 4255{
d4455169 4256 if (xdp_prog) {
02671e23
BT
4257 struct xdp_buff xdp;
4258 u32 act;
6103aa96 4259 int err;
d4455169 4260
02671e23 4261 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
d4455169 4262 if (act != XDP_PASS) {
6103aa96
JF
4263 switch (act) {
4264 case XDP_REDIRECT:
2facaad6 4265 err = xdp_do_generic_redirect(skb->dev, skb,
02671e23 4266 &xdp, xdp_prog);
6103aa96
JF
4267 if (err)
4268 goto out_redir;
02671e23 4269 break;
6103aa96 4270 case XDP_TX:
d4455169 4271 generic_xdp_tx(skb, xdp_prog);
6103aa96
JF
4272 break;
4273 }
d4455169
JF
4274 return XDP_DROP;
4275 }
4276 }
4277 return XDP_PASS;
6103aa96 4278out_redir:
6103aa96
JF
4279 kfree_skb(skb);
4280 return XDP_DROP;
d4455169 4281}
7c497478 4282EXPORT_SYMBOL_GPL(do_xdp_generic);
d4455169 4283
ae78dbfa 4284static int netif_rx_internal(struct sk_buff *skb)
1da177e4 4285{
b0e28f1e 4286 int ret;
1da177e4 4287
588f0330 4288 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 4289
cf66ba58 4290 trace_netif_rx(skb);
d4455169 4291
02786475 4292 if (static_branch_unlikely(&generic_xdp_needed_key)) {
bbbe211c
JF
4293 int ret;
4294
4295 preempt_disable();
4296 rcu_read_lock();
4297 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4298 rcu_read_unlock();
4299 preempt_enable();
d4455169 4300
6103aa96
JF
4301 /* Consider XDP consuming the packet a success from
4302 * the netdev point of view we do not want to count
4303 * this as an error.
4304 */
d4455169 4305 if (ret != XDP_PASS)
6103aa96 4306 return NET_RX_SUCCESS;
d4455169
JF
4307 }
4308
df334545 4309#ifdef CONFIG_RPS
c5905afb 4310 if (static_key_false(&rps_needed)) {
fec5e652 4311 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
4312 int cpu;
4313
cece1945 4314 preempt_disable();
b0e28f1e 4315 rcu_read_lock();
fec5e652
TH
4316
4317 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
4318 if (cpu < 0)
4319 cpu = smp_processor_id();
fec5e652
TH
4320
4321 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4322
b0e28f1e 4323 rcu_read_unlock();
cece1945 4324 preempt_enable();
adc9300e
ED
4325 } else
4326#endif
fec5e652
TH
4327 {
4328 unsigned int qtail;
f4563a75 4329
fec5e652
TH
4330 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4331 put_cpu();
4332 }
b0e28f1e 4333 return ret;
1da177e4 4334}
ae78dbfa
BH
4335
4336/**
4337 * netif_rx - post buffer to the network code
4338 * @skb: buffer to post
4339 *
4340 * This function receives a packet from a device driver and queues it for
4341 * the upper (protocol) levels to process. It always succeeds. The buffer
4342 * may be dropped during processing for congestion control or by the
4343 * protocol layers.
4344 *
4345 * return values:
4346 * NET_RX_SUCCESS (no congestion)
4347 * NET_RX_DROP (packet was dropped)
4348 *
4349 */
4350
4351int netif_rx(struct sk_buff *skb)
4352{
4353 trace_netif_rx_entry(skb);
4354
4355 return netif_rx_internal(skb);
4356}
d1b19dff 4357EXPORT_SYMBOL(netif_rx);
1da177e4
LT
4358
4359int netif_rx_ni(struct sk_buff *skb)
4360{
4361 int err;
4362
ae78dbfa
BH
4363 trace_netif_rx_ni_entry(skb);
4364
1da177e4 4365 preempt_disable();
ae78dbfa 4366 err = netif_rx_internal(skb);
1da177e4
LT
4367 if (local_softirq_pending())
4368 do_softirq();
4369 preempt_enable();
4370
4371 return err;
4372}
1da177e4
LT
4373EXPORT_SYMBOL(netif_rx_ni);
4374
0766f788 4375static __latent_entropy void net_tx_action(struct softirq_action *h)
1da177e4 4376{
903ceff7 4377 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
4378
4379 if (sd->completion_queue) {
4380 struct sk_buff *clist;
4381
4382 local_irq_disable();
4383 clist = sd->completion_queue;
4384 sd->completion_queue = NULL;
4385 local_irq_enable();
4386
4387 while (clist) {
4388 struct sk_buff *skb = clist;
f4563a75 4389
1da177e4
LT
4390 clist = clist->next;
4391
63354797 4392 WARN_ON(refcount_read(&skb->users));
e6247027
ED
4393 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4394 trace_consume_skb(skb);
4395 else
4396 trace_kfree_skb(skb, net_tx_action);
15fad714
JDB
4397
4398 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4399 __kfree_skb(skb);
4400 else
4401 __kfree_skb_defer(skb);
1da177e4 4402 }
15fad714
JDB
4403
4404 __kfree_skb_flush();
1da177e4
LT
4405 }
4406
4407 if (sd->output_queue) {
37437bb2 4408 struct Qdisc *head;
1da177e4
LT
4409
4410 local_irq_disable();
4411 head = sd->output_queue;
4412 sd->output_queue = NULL;
a9cbd588 4413 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
4414 local_irq_enable();
4415
4416 while (head) {
37437bb2 4417 struct Qdisc *q = head;
6b3ba914 4418 spinlock_t *root_lock = NULL;
37437bb2 4419
1da177e4
LT
4420 head = head->next_sched;
4421
6b3ba914
JF
4422 if (!(q->flags & TCQ_F_NOLOCK)) {
4423 root_lock = qdisc_lock(q);
4424 spin_lock(root_lock);
4425 }
3bcb846c
ED
4426 /* We need to make sure head->next_sched is read
4427 * before clearing __QDISC_STATE_SCHED
4428 */
4429 smp_mb__before_atomic();
4430 clear_bit(__QDISC_STATE_SCHED, &q->state);
4431 qdisc_run(q);
6b3ba914
JF
4432 if (root_lock)
4433 spin_unlock(root_lock);
1da177e4
LT
4434 }
4435 }
f53c7239
SK
4436
4437 xfrm_dev_backlog(sd);
1da177e4
LT
4438}
4439
181402a5 4440#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
da678292
MM
4441/* This hook is defined here for ATM LANE */
4442int (*br_fdb_test_addr_hook)(struct net_device *dev,
4443 unsigned char *addr) __read_mostly;
4fb019a0 4444EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 4445#endif
1da177e4 4446
1f211a1b
DB
4447static inline struct sk_buff *
4448sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4449 struct net_device *orig_dev)
f697c3e8 4450{
e7582bab 4451#ifdef CONFIG_NET_CLS_ACT
46209401 4452 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
d2788d34 4453 struct tcf_result cl_res;
24824a09 4454
c9e99fd0
DB
4455 /* If there's at least one ingress present somewhere (so
4456 * we get here via enabled static key), remaining devices
4457 * that are not configured with an ingress qdisc will bail
d2788d34 4458 * out here.
c9e99fd0 4459 */
46209401 4460 if (!miniq)
4577139b 4461 return skb;
46209401 4462
f697c3e8
HX
4463 if (*pt_prev) {
4464 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4465 *pt_prev = NULL;
1da177e4
LT
4466 }
4467
3365495c 4468 qdisc_skb_cb(skb)->pkt_len = skb->len;
8dc07fdb 4469 skb->tc_at_ingress = 1;
46209401 4470 mini_qdisc_bstats_cpu_update(miniq, skb);
c9e99fd0 4471
46209401 4472 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
d2788d34
DB
4473 case TC_ACT_OK:
4474 case TC_ACT_RECLASSIFY:
4475 skb->tc_index = TC_H_MIN(cl_res.classid);
4476 break;
4477 case TC_ACT_SHOT:
46209401 4478 mini_qdisc_qstats_cpu_drop(miniq);
8a3a4c6e
ED
4479 kfree_skb(skb);
4480 return NULL;
d2788d34
DB
4481 case TC_ACT_STOLEN:
4482 case TC_ACT_QUEUED:
e25ea21f 4483 case TC_ACT_TRAP:
8a3a4c6e 4484 consume_skb(skb);
d2788d34 4485 return NULL;
27b29f63
AS
4486 case TC_ACT_REDIRECT:
4487 /* skb_mac_header check was done by cls/act_bpf, so
4488 * we can safely push the L2 header back before
4489 * redirecting to another netdev
4490 */
4491 __skb_push(skb, skb->mac_len);
4492 skb_do_redirect(skb);
4493 return NULL;
d2788d34
DB
4494 default:
4495 break;
f697c3e8 4496 }
e7582bab 4497#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
4498 return skb;
4499}
1da177e4 4500
24b27fc4
MB
4501/**
4502 * netdev_is_rx_handler_busy - check if receive handler is registered
4503 * @dev: device to check
4504 *
4505 * Check if a receive handler is already registered for a given device.
4506 * Return true if there one.
4507 *
4508 * The caller must hold the rtnl_mutex.
4509 */
4510bool netdev_is_rx_handler_busy(struct net_device *dev)
4511{
4512 ASSERT_RTNL();
4513 return dev && rtnl_dereference(dev->rx_handler);
4514}
4515EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4516
ab95bfe0
JP
4517/**
4518 * netdev_rx_handler_register - register receive handler
4519 * @dev: device to register a handler for
4520 * @rx_handler: receive handler to register
93e2c32b 4521 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 4522 *
e227867f 4523 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
4524 * called from __netif_receive_skb. A negative errno code is returned
4525 * on a failure.
4526 *
4527 * The caller must hold the rtnl_mutex.
8a4eb573
JP
4528 *
4529 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
4530 */
4531int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
4532 rx_handler_func_t *rx_handler,
4533 void *rx_handler_data)
ab95bfe0 4534{
1b7cd004 4535 if (netdev_is_rx_handler_busy(dev))
ab95bfe0
JP
4536 return -EBUSY;
4537
f5426250
PA
4538 if (dev->priv_flags & IFF_NO_RX_HANDLER)
4539 return -EINVAL;
4540
00cfec37 4541 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 4542 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
4543 rcu_assign_pointer(dev->rx_handler, rx_handler);
4544
4545 return 0;
4546}
4547EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4548
4549/**
4550 * netdev_rx_handler_unregister - unregister receive handler
4551 * @dev: device to unregister a handler from
4552 *
166ec369 4553 * Unregister a receive handler from a device.
ab95bfe0
JP
4554 *
4555 * The caller must hold the rtnl_mutex.
4556 */
4557void netdev_rx_handler_unregister(struct net_device *dev)
4558{
4559
4560 ASSERT_RTNL();
a9b3cd7f 4561 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
4562 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4563 * section has a guarantee to see a non NULL rx_handler_data
4564 * as well.
4565 */
4566 synchronize_net();
a9b3cd7f 4567 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
4568}
4569EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4570
b4b9e355
MG
4571/*
4572 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4573 * the special handling of PFMEMALLOC skbs.
4574 */
4575static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4576{
4577 switch (skb->protocol) {
2b8837ae
JP
4578 case htons(ETH_P_ARP):
4579 case htons(ETH_P_IP):
4580 case htons(ETH_P_IPV6):
4581 case htons(ETH_P_8021Q):
4582 case htons(ETH_P_8021AD):
b4b9e355
MG
4583 return true;
4584 default:
4585 return false;
4586 }
4587}
4588
e687ad60
PN
4589static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4590 int *ret, struct net_device *orig_dev)
4591{
e7582bab 4592#ifdef CONFIG_NETFILTER_INGRESS
e687ad60 4593 if (nf_hook_ingress_active(skb)) {
2c1e2703
AC
4594 int ingress_retval;
4595
e687ad60
PN
4596 if (*pt_prev) {
4597 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4598 *pt_prev = NULL;
4599 }
4600
2c1e2703
AC
4601 rcu_read_lock();
4602 ingress_retval = nf_hook_ingress(skb);
4603 rcu_read_unlock();
4604 return ingress_retval;
e687ad60 4605 }
e7582bab 4606#endif /* CONFIG_NETFILTER_INGRESS */
e687ad60
PN
4607 return 0;
4608}
e687ad60 4609
88eb1944
EC
4610static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
4611 struct packet_type **ppt_prev)
1da177e4
LT
4612{
4613 struct packet_type *ptype, *pt_prev;
ab95bfe0 4614 rx_handler_func_t *rx_handler;
f2ccd8fa 4615 struct net_device *orig_dev;
8a4eb573 4616 bool deliver_exact = false;
1da177e4 4617 int ret = NET_RX_DROP;
252e3346 4618 __be16 type;
1da177e4 4619
588f0330 4620 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 4621
cf66ba58 4622 trace_netif_receive_skb(skb);
9b22ea56 4623
cc9bd5ce 4624 orig_dev = skb->dev;
8f903c70 4625
c1d2bbe1 4626 skb_reset_network_header(skb);
fda55eca
ED
4627 if (!skb_transport_header_was_set(skb))
4628 skb_reset_transport_header(skb);
0b5c9db1 4629 skb_reset_mac_len(skb);
1da177e4
LT
4630
4631 pt_prev = NULL;
4632
63d8ea7f 4633another_round:
b6858177 4634 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
4635
4636 __this_cpu_inc(softnet_data.processed);
4637
8ad227ff
PM
4638 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4639 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 4640 skb = skb_vlan_untag(skb);
bcc6d479 4641 if (unlikely(!skb))
2c17d27c 4642 goto out;
bcc6d479
JP
4643 }
4644
e7246e12
WB
4645 if (skb_skip_tc_classify(skb))
4646 goto skip_classify;
1da177e4 4647
9754e293 4648 if (pfmemalloc)
b4b9e355
MG
4649 goto skip_taps;
4650
1da177e4 4651 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
4652 if (pt_prev)
4653 ret = deliver_skb(skb, pt_prev, orig_dev);
4654 pt_prev = ptype;
4655 }
4656
4657 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4658 if (pt_prev)
4659 ret = deliver_skb(skb, pt_prev, orig_dev);
4660 pt_prev = ptype;
1da177e4
LT
4661 }
4662
b4b9e355 4663skip_taps:
1cf51900 4664#ifdef CONFIG_NET_INGRESS
aabf6772 4665 if (static_branch_unlikely(&ingress_needed_key)) {
1f211a1b 4666 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4577139b 4667 if (!skb)
2c17d27c 4668 goto out;
e687ad60
PN
4669
4670 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 4671 goto out;
4577139b 4672 }
1cf51900 4673#endif
a5135bcf 4674 skb_reset_tc(skb);
e7246e12 4675skip_classify:
9754e293 4676 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
4677 goto drop;
4678
df8a39de 4679 if (skb_vlan_tag_present(skb)) {
2425717b
JF
4680 if (pt_prev) {
4681 ret = deliver_skb(skb, pt_prev, orig_dev);
4682 pt_prev = NULL;
4683 }
48cc32d3 4684 if (vlan_do_receive(&skb))
2425717b
JF
4685 goto another_round;
4686 else if (unlikely(!skb))
2c17d27c 4687 goto out;
2425717b
JF
4688 }
4689
48cc32d3 4690 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
4691 if (rx_handler) {
4692 if (pt_prev) {
4693 ret = deliver_skb(skb, pt_prev, orig_dev);
4694 pt_prev = NULL;
4695 }
8a4eb573
JP
4696 switch (rx_handler(&skb)) {
4697 case RX_HANDLER_CONSUMED:
3bc1b1ad 4698 ret = NET_RX_SUCCESS;
2c17d27c 4699 goto out;
8a4eb573 4700 case RX_HANDLER_ANOTHER:
63d8ea7f 4701 goto another_round;
8a4eb573
JP
4702 case RX_HANDLER_EXACT:
4703 deliver_exact = true;
4704 case RX_HANDLER_PASS:
4705 break;
4706 default:
4707 BUG();
4708 }
ab95bfe0 4709 }
1da177e4 4710
df8a39de
JP
4711 if (unlikely(skb_vlan_tag_present(skb))) {
4712 if (skb_vlan_tag_get_id(skb))
d4b812de
ED
4713 skb->pkt_type = PACKET_OTHERHOST;
4714 /* Note: we might in the future use prio bits
4715 * and set skb->priority like in vlan_do_receive()
4716 * For the time being, just ignore Priority Code Point
4717 */
4718 skb->vlan_tci = 0;
4719 }
48cc32d3 4720
7866a621
SN
4721 type = skb->protocol;
4722
63d8ea7f 4723 /* deliver only exact match when indicated */
7866a621
SN
4724 if (likely(!deliver_exact)) {
4725 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4726 &ptype_base[ntohs(type) &
4727 PTYPE_HASH_MASK]);
4728 }
1f3c8804 4729
7866a621
SN
4730 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4731 &orig_dev->ptype_specific);
4732
4733 if (unlikely(skb->dev != orig_dev)) {
4734 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4735 &skb->dev->ptype_specific);
1da177e4
LT
4736 }
4737
4738 if (pt_prev) {
1f8b977a 4739 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
0e698bf6 4740 goto drop;
88eb1944 4741 *ppt_prev = pt_prev;
1da177e4 4742 } else {
b4b9e355 4743drop:
6e7333d3
JW
4744 if (!deliver_exact)
4745 atomic_long_inc(&skb->dev->rx_dropped);
4746 else
4747 atomic_long_inc(&skb->dev->rx_nohandler);
1da177e4
LT
4748 kfree_skb(skb);
4749 /* Jamal, now you will not able to escape explaining
4750 * me how you were going to use this. :-)
4751 */
4752 ret = NET_RX_DROP;
4753 }
4754
2c17d27c 4755out:
9754e293
DM
4756 return ret;
4757}
4758
88eb1944
EC
4759static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
4760{
4761 struct net_device *orig_dev = skb->dev;
4762 struct packet_type *pt_prev = NULL;
4763 int ret;
4764
4765 ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
4766 if (pt_prev)
4767 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4768 return ret;
4769}
4770
1c601d82
JDB
4771/**
4772 * netif_receive_skb_core - special purpose version of netif_receive_skb
4773 * @skb: buffer to process
4774 *
4775 * More direct receive version of netif_receive_skb(). It should
4776 * only be used by callers that have a need to skip RPS and Generic XDP.
4777 * Caller must also take care of handling if (page_is_)pfmemalloc.
4778 *
4779 * This function may only be called from softirq context and interrupts
4780 * should be enabled.
4781 *
4782 * Return values (usually ignored):
4783 * NET_RX_SUCCESS: no congestion
4784 * NET_RX_DROP: packet was dropped
4785 */
4786int netif_receive_skb_core(struct sk_buff *skb)
4787{
4788 int ret;
4789
4790 rcu_read_lock();
88eb1944 4791 ret = __netif_receive_skb_one_core(skb, false);
1c601d82
JDB
4792 rcu_read_unlock();
4793
4794 return ret;
4795}
4796EXPORT_SYMBOL(netif_receive_skb_core);
4797
88eb1944
EC
4798static inline void __netif_receive_skb_list_ptype(struct list_head *head,
4799 struct packet_type *pt_prev,
4800 struct net_device *orig_dev)
4ce0017a
EC
4801{
4802 struct sk_buff *skb, *next;
4803
88eb1944
EC
4804 if (!pt_prev)
4805 return;
4806 if (list_empty(head))
4807 return;
17266ee9
EC
4808 if (pt_prev->list_func != NULL)
4809 pt_prev->list_func(head, pt_prev, orig_dev);
4810 else
4811 list_for_each_entry_safe(skb, next, head, list)
4812 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
88eb1944
EC
4813}
4814
4815static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
4816{
4817 /* Fast-path assumptions:
4818 * - There is no RX handler.
4819 * - Only one packet_type matches.
4820 * If either of these fails, we will end up doing some per-packet
4821 * processing in-line, then handling the 'last ptype' for the whole
4822 * sublist. This can't cause out-of-order delivery to any single ptype,
4823 * because the 'last ptype' must be constant across the sublist, and all
4824 * other ptypes are handled per-packet.
4825 */
4826 /* Current (common) ptype of sublist */
4827 struct packet_type *pt_curr = NULL;
4828 /* Current (common) orig_dev of sublist */
4829 struct net_device *od_curr = NULL;
4830 struct list_head sublist;
4831 struct sk_buff *skb, *next;
4832
4833 list_for_each_entry_safe(skb, next, head, list) {
4834 struct net_device *orig_dev = skb->dev;
4835 struct packet_type *pt_prev = NULL;
4836
4837 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
4838 if (pt_curr != pt_prev || od_curr != orig_dev) {
4839 /* dispatch old sublist */
4840 list_cut_before(&sublist, head, &skb->list);
4841 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
4842 /* start new sublist */
4843 pt_curr = pt_prev;
4844 od_curr = orig_dev;
4845 }
4846 }
4847
4848 /* dispatch final sublist */
4849 __netif_receive_skb_list_ptype(head, pt_curr, od_curr);
4ce0017a
EC
4850}
4851
9754e293
DM
4852static int __netif_receive_skb(struct sk_buff *skb)
4853{
4854 int ret;
4855
4856 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
f1083048 4857 unsigned int noreclaim_flag;
9754e293
DM
4858
4859 /*
4860 * PFMEMALLOC skbs are special, they should
4861 * - be delivered to SOCK_MEMALLOC sockets only
4862 * - stay away from userspace
4863 * - have bounded memory usage
4864 *
4865 * Use PF_MEMALLOC as this saves us from propagating the allocation
4866 * context down to all allocation sites.
4867 */
f1083048 4868 noreclaim_flag = memalloc_noreclaim_save();
88eb1944 4869 ret = __netif_receive_skb_one_core(skb, true);
f1083048 4870 memalloc_noreclaim_restore(noreclaim_flag);
9754e293 4871 } else
88eb1944 4872 ret = __netif_receive_skb_one_core(skb, false);
9754e293 4873
1da177e4
LT
4874 return ret;
4875}
0a9627f2 4876
4ce0017a
EC
4877static void __netif_receive_skb_list(struct list_head *head)
4878{
4879 unsigned long noreclaim_flag = 0;
4880 struct sk_buff *skb, *next;
4881 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
4882
4883 list_for_each_entry_safe(skb, next, head, list) {
4884 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
4885 struct list_head sublist;
4886
4887 /* Handle the previous sublist */
4888 list_cut_before(&sublist, head, &skb->list);
b9f463d6
EC
4889 if (!list_empty(&sublist))
4890 __netif_receive_skb_list_core(&sublist, pfmemalloc);
4ce0017a
EC
4891 pfmemalloc = !pfmemalloc;
4892 /* See comments in __netif_receive_skb */
4893 if (pfmemalloc)
4894 noreclaim_flag = memalloc_noreclaim_save();
4895 else
4896 memalloc_noreclaim_restore(noreclaim_flag);
4897 }
4898 }
4899 /* Handle the remaining sublist */
b9f463d6
EC
4900 if (!list_empty(head))
4901 __netif_receive_skb_list_core(head, pfmemalloc);
4ce0017a
EC
4902 /* Restore pflags */
4903 if (pfmemalloc)
4904 memalloc_noreclaim_restore(noreclaim_flag);
4905}
4906
f4e63525 4907static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
b5cdae32 4908{
58038695 4909 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
b5cdae32
DM
4910 struct bpf_prog *new = xdp->prog;
4911 int ret = 0;
4912
4913 switch (xdp->command) {
58038695 4914 case XDP_SETUP_PROG:
b5cdae32
DM
4915 rcu_assign_pointer(dev->xdp_prog, new);
4916 if (old)
4917 bpf_prog_put(old);
4918
4919 if (old && !new) {
02786475 4920 static_branch_dec(&generic_xdp_needed_key);
b5cdae32 4921 } else if (new && !old) {
02786475 4922 static_branch_inc(&generic_xdp_needed_key);
b5cdae32 4923 dev_disable_lro(dev);
56f5aa77 4924 dev_disable_gro_hw(dev);
b5cdae32
DM
4925 }
4926 break;
b5cdae32
DM
4927
4928 case XDP_QUERY_PROG:
58038695
MKL
4929 xdp->prog_attached = !!old;
4930 xdp->prog_id = old ? old->aux->id : 0;
b5cdae32
DM
4931 break;
4932
4933 default:
4934 ret = -EINVAL;
4935 break;
4936 }
4937
4938 return ret;
4939}
4940
ae78dbfa 4941static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 4942{
2c17d27c
JA
4943 int ret;
4944
588f0330 4945 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 4946
c1f19b51
RC
4947 if (skb_defer_rx_timestamp(skb))
4948 return NET_RX_SUCCESS;
4949
02786475 4950 if (static_branch_unlikely(&generic_xdp_needed_key)) {
bbbe211c 4951 int ret;
b5cdae32 4952
bbbe211c
JF
4953 preempt_disable();
4954 rcu_read_lock();
4955 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4956 rcu_read_unlock();
4957 preempt_enable();
4958
4959 if (ret != XDP_PASS)
d4455169 4960 return NET_RX_DROP;
b5cdae32
DM
4961 }
4962
bbbe211c 4963 rcu_read_lock();
df334545 4964#ifdef CONFIG_RPS
c5905afb 4965 if (static_key_false(&rps_needed)) {
3b098e2d 4966 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 4967 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 4968
3b098e2d
ED
4969 if (cpu >= 0) {
4970 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4971 rcu_read_unlock();
adc9300e 4972 return ret;
3b098e2d 4973 }
fec5e652 4974 }
1e94d72f 4975#endif
2c17d27c
JA
4976 ret = __netif_receive_skb(skb);
4977 rcu_read_unlock();
4978 return ret;
0a9627f2 4979}
ae78dbfa 4980
7da517a3
EC
4981static void netif_receive_skb_list_internal(struct list_head *head)
4982{
4983 struct bpf_prog *xdp_prog = NULL;
4984 struct sk_buff *skb, *next;
4985
4986 list_for_each_entry_safe(skb, next, head, list) {
4987 net_timestamp_check(netdev_tstamp_prequeue, skb);
4988 if (skb_defer_rx_timestamp(skb))
4989 /* Handled, remove from list */
4990 list_del(&skb->list);
4991 }
4992
4993 if (static_branch_unlikely(&generic_xdp_needed_key)) {
4994 preempt_disable();
4995 rcu_read_lock();
4996 list_for_each_entry_safe(skb, next, head, list) {
4997 xdp_prog = rcu_dereference(skb->dev->xdp_prog);
4998 if (do_xdp_generic(xdp_prog, skb) != XDP_PASS)
4999 /* Dropped, remove from list */
5000 list_del(&skb->list);
5001 }
5002 rcu_read_unlock();
5003 preempt_enable();
5004 }
5005
5006 rcu_read_lock();
5007#ifdef CONFIG_RPS
5008 if (static_key_false(&rps_needed)) {
5009 list_for_each_entry_safe(skb, next, head, list) {
5010 struct rps_dev_flow voidflow, *rflow = &voidflow;
5011 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5012
5013 if (cpu >= 0) {
5014 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5015 /* Handled, remove from list */
5016 list_del(&skb->list);
5017 }
5018 }
5019 }
5020#endif
5021 __netif_receive_skb_list(head);
5022 rcu_read_unlock();
5023}
5024
ae78dbfa
BH
5025/**
5026 * netif_receive_skb - process receive buffer from network
5027 * @skb: buffer to process
5028 *
5029 * netif_receive_skb() is the main receive data processing function.
5030 * It always succeeds. The buffer may be dropped during processing
5031 * for congestion control or by the protocol layers.
5032 *
5033 * This function may only be called from softirq context and interrupts
5034 * should be enabled.
5035 *
5036 * Return values (usually ignored):
5037 * NET_RX_SUCCESS: no congestion
5038 * NET_RX_DROP: packet was dropped
5039 */
04eb4489 5040int netif_receive_skb(struct sk_buff *skb)
ae78dbfa
BH
5041{
5042 trace_netif_receive_skb_entry(skb);
5043
5044 return netif_receive_skb_internal(skb);
5045}
04eb4489 5046EXPORT_SYMBOL(netif_receive_skb);
1da177e4 5047
f6ad8c1b
EC
5048/**
5049 * netif_receive_skb_list - process many receive buffers from network
5050 * @head: list of skbs to process.
5051 *
7da517a3
EC
5052 * Since return value of netif_receive_skb() is normally ignored, and
5053 * wouldn't be meaningful for a list, this function returns void.
f6ad8c1b
EC
5054 *
5055 * This function may only be called from softirq context and interrupts
5056 * should be enabled.
5057 */
5058void netif_receive_skb_list(struct list_head *head)
5059{
7da517a3 5060 struct sk_buff *skb;
f6ad8c1b 5061
b9f463d6
EC
5062 if (list_empty(head))
5063 return;
920572b7
EC
5064 list_for_each_entry(skb, head, list)
5065 trace_netif_receive_skb_list_entry(skb);
7da517a3 5066 netif_receive_skb_list_internal(head);
f6ad8c1b
EC
5067}
5068EXPORT_SYMBOL(netif_receive_skb_list);
5069
41852497 5070DEFINE_PER_CPU(struct work_struct, flush_works);
145dd5f9
PA
5071
5072/* Network device is going away, flush any packets still pending */
5073static void flush_backlog(struct work_struct *work)
6e583ce5 5074{
6e583ce5 5075 struct sk_buff *skb, *tmp;
145dd5f9
PA
5076 struct softnet_data *sd;
5077
5078 local_bh_disable();
5079 sd = this_cpu_ptr(&softnet_data);
6e583ce5 5080
145dd5f9 5081 local_irq_disable();
e36fa2f7 5082 rps_lock(sd);
6e7676c1 5083 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
41852497 5084 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
e36fa2f7 5085 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 5086 kfree_skb(skb);
76cc8b13 5087 input_queue_head_incr(sd);
6e583ce5 5088 }
6e7676c1 5089 }
e36fa2f7 5090 rps_unlock(sd);
145dd5f9 5091 local_irq_enable();
6e7676c1
CG
5092
5093 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
41852497 5094 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
6e7676c1
CG
5095 __skb_unlink(skb, &sd->process_queue);
5096 kfree_skb(skb);
76cc8b13 5097 input_queue_head_incr(sd);
6e7676c1
CG
5098 }
5099 }
145dd5f9
PA
5100 local_bh_enable();
5101}
5102
41852497 5103static void flush_all_backlogs(void)
145dd5f9
PA
5104{
5105 unsigned int cpu;
5106
5107 get_online_cpus();
5108
41852497
ED
5109 for_each_online_cpu(cpu)
5110 queue_work_on(cpu, system_highpri_wq,
5111 per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
5112
5113 for_each_online_cpu(cpu)
41852497 5114 flush_work(per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
5115
5116 put_online_cpus();
6e583ce5
SH
5117}
5118
d565b0a1
HX
5119static int napi_gro_complete(struct sk_buff *skb)
5120{
22061d80 5121 struct packet_offload *ptype;
d565b0a1 5122 __be16 type = skb->protocol;
22061d80 5123 struct list_head *head = &offload_base;
d565b0a1
HX
5124 int err = -ENOENT;
5125
c3c7c254
ED
5126 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5127
fc59f9a3
HX
5128 if (NAPI_GRO_CB(skb)->count == 1) {
5129 skb_shinfo(skb)->gso_size = 0;
d565b0a1 5130 goto out;
fc59f9a3 5131 }
d565b0a1
HX
5132
5133 rcu_read_lock();
5134 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 5135 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
5136 continue;
5137
299603e8 5138 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
5139 break;
5140 }
5141 rcu_read_unlock();
5142
5143 if (err) {
5144 WARN_ON(&ptype->list == head);
5145 kfree_skb(skb);
5146 return NET_RX_SUCCESS;
5147 }
5148
5149out:
ae78dbfa 5150 return netif_receive_skb_internal(skb);
d565b0a1
HX
5151}
5152
6312fe77 5153static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
07d78363 5154 bool flush_old)
d565b0a1 5155{
6312fe77 5156 struct list_head *head = &napi->gro_hash[index].list;
d4546c25 5157 struct sk_buff *skb, *p;
2e71a6f8 5158
07d78363 5159 list_for_each_entry_safe_reverse(skb, p, head, list) {
2e71a6f8
ED
5160 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5161 return;
d4546c25 5162 list_del_init(&skb->list);
d565b0a1 5163 napi_gro_complete(skb);
2e71a6f8 5164 napi->gro_count--;
6312fe77 5165 napi->gro_hash[index].count--;
d565b0a1 5166 }
d565b0a1 5167}
07d78363 5168
6312fe77 5169/* napi->gro_hash[].list contains packets ordered by age.
07d78363
DM
5170 * youngest packets at the head of it.
5171 * Complete skbs in reverse order to reduce latencies.
5172 */
5173void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5174{
6312fe77 5175 u32 i;
07d78363 5176
6312fe77
LR
5177 for (i = 0; i < GRO_HASH_BUCKETS; i++)
5178 __napi_gro_flush_chain(napi, i, flush_old);
07d78363 5179}
86cac58b 5180EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 5181
07d78363
DM
5182static struct list_head *gro_list_prepare(struct napi_struct *napi,
5183 struct sk_buff *skb)
89c5fa33 5184{
89c5fa33 5185 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 5186 u32 hash = skb_get_hash_raw(skb);
07d78363 5187 struct list_head *head;
d4546c25 5188 struct sk_buff *p;
89c5fa33 5189
6312fe77 5190 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
07d78363 5191 list_for_each_entry(p, head, list) {
89c5fa33
ED
5192 unsigned long diffs;
5193
0b4cec8c
TH
5194 NAPI_GRO_CB(p)->flush = 0;
5195
5196 if (hash != skb_get_hash_raw(p)) {
5197 NAPI_GRO_CB(p)->same_flow = 0;
5198 continue;
5199 }
5200
89c5fa33
ED
5201 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5202 diffs |= p->vlan_tci ^ skb->vlan_tci;
ce87fc6c 5203 diffs |= skb_metadata_dst_cmp(p, skb);
de8f3a83 5204 diffs |= skb_metadata_differs(p, skb);
89c5fa33
ED
5205 if (maclen == ETH_HLEN)
5206 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 5207 skb_mac_header(skb));
89c5fa33
ED
5208 else if (!diffs)
5209 diffs = memcmp(skb_mac_header(p),
a50e233c 5210 skb_mac_header(skb),
89c5fa33
ED
5211 maclen);
5212 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33 5213 }
07d78363
DM
5214
5215 return head;
89c5fa33
ED
5216}
5217
299603e8
JC
5218static void skb_gro_reset_offset(struct sk_buff *skb)
5219{
5220 const struct skb_shared_info *pinfo = skb_shinfo(skb);
5221 const skb_frag_t *frag0 = &pinfo->frags[0];
5222
5223 NAPI_GRO_CB(skb)->data_offset = 0;
5224 NAPI_GRO_CB(skb)->frag0 = NULL;
5225 NAPI_GRO_CB(skb)->frag0_len = 0;
5226
5227 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
5228 pinfo->nr_frags &&
5229 !PageHighMem(skb_frag_page(frag0))) {
5230 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
7cfd5fd5
ED
5231 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5232 skb_frag_size(frag0),
5233 skb->end - skb->tail);
89c5fa33
ED
5234 }
5235}
5236
a50e233c
ED
5237static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5238{
5239 struct skb_shared_info *pinfo = skb_shinfo(skb);
5240
5241 BUG_ON(skb->end - skb->tail < grow);
5242
5243 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5244
5245 skb->data_len -= grow;
5246 skb->tail += grow;
5247
5248 pinfo->frags[0].page_offset += grow;
5249 skb_frag_size_sub(&pinfo->frags[0], grow);
5250
5251 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5252 skb_frag_unref(skb, 0);
5253 memmove(pinfo->frags, pinfo->frags + 1,
5254 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5255 }
5256}
5257
6312fe77 5258static void gro_flush_oldest(struct list_head *head)
07d78363 5259{
6312fe77 5260 struct sk_buff *oldest;
07d78363 5261
6312fe77 5262 oldest = list_last_entry(head, struct sk_buff, list);
07d78363 5263
6312fe77 5264 /* We are called with head length >= MAX_GRO_SKBS, so this is
07d78363
DM
5265 * impossible.
5266 */
5267 if (WARN_ON_ONCE(!oldest))
5268 return;
5269
5270 /* Do not adjust napi->gro_count, caller is adding a new SKB to
5271 * the chain.
5272 */
5273 list_del(&oldest->list);
5274 napi_gro_complete(oldest);
5275}
5276
bb728820 5277static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1 5278{
6312fe77 5279 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
d4546c25 5280 struct list_head *head = &offload_base;
22061d80 5281 struct packet_offload *ptype;
d565b0a1 5282 __be16 type = skb->protocol;
07d78363 5283 struct list_head *gro_head;
d4546c25 5284 struct sk_buff *pp = NULL;
5b252f0c 5285 enum gro_result ret;
d4546c25 5286 int same_flow;
a50e233c 5287 int grow;
d565b0a1 5288
b5cdae32 5289 if (netif_elide_gro(skb->dev))
d565b0a1
HX
5290 goto normal;
5291
07d78363 5292 gro_head = gro_list_prepare(napi, skb);
89c5fa33 5293
d565b0a1
HX
5294 rcu_read_lock();
5295 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 5296 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
5297 continue;
5298
86911732 5299 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 5300 skb_reset_mac_len(skb);
d565b0a1 5301 NAPI_GRO_CB(skb)->same_flow = 0;
d61d072e 5302 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5d38a079 5303 NAPI_GRO_CB(skb)->free = 0;
fac8e0f5 5304 NAPI_GRO_CB(skb)->encap_mark = 0;
fcd91dd4 5305 NAPI_GRO_CB(skb)->recursion_counter = 0;
a0ca153f 5306 NAPI_GRO_CB(skb)->is_fou = 0;
1530545e 5307 NAPI_GRO_CB(skb)->is_atomic = 1;
15e2396d 5308 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 5309
662880f4
TH
5310 /* Setup for GRO checksum validation */
5311 switch (skb->ip_summed) {
5312 case CHECKSUM_COMPLETE:
5313 NAPI_GRO_CB(skb)->csum = skb->csum;
5314 NAPI_GRO_CB(skb)->csum_valid = 1;
5315 NAPI_GRO_CB(skb)->csum_cnt = 0;
5316 break;
5317 case CHECKSUM_UNNECESSARY:
5318 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5319 NAPI_GRO_CB(skb)->csum_valid = 0;
5320 break;
5321 default:
5322 NAPI_GRO_CB(skb)->csum_cnt = 0;
5323 NAPI_GRO_CB(skb)->csum_valid = 0;
5324 }
d565b0a1 5325
07d78363 5326 pp = ptype->callbacks.gro_receive(gro_head, skb);
d565b0a1
HX
5327 break;
5328 }
5329 rcu_read_unlock();
5330
5331 if (&ptype->list == head)
5332 goto normal;
5333
25393d3f
SK
5334 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
5335 ret = GRO_CONSUMED;
5336 goto ok;
5337 }
5338
0da2afd5 5339 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 5340 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 5341
d565b0a1 5342 if (pp) {
d4546c25
DM
5343 list_del_init(&pp->list);
5344 napi_gro_complete(pp);
4ae5544f 5345 napi->gro_count--;
6312fe77 5346 napi->gro_hash[hash].count--;
d565b0a1
HX
5347 }
5348
0da2afd5 5349 if (same_flow)
d565b0a1
HX
5350 goto ok;
5351
600adc18 5352 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 5353 goto normal;
d565b0a1 5354
6312fe77
LR
5355 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
5356 gro_flush_oldest(gro_head);
600adc18
ED
5357 } else {
5358 napi->gro_count++;
6312fe77 5359 napi->gro_hash[hash].count++;
600adc18 5360 }
d565b0a1 5361 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 5362 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 5363 NAPI_GRO_CB(skb)->last = skb;
86911732 5364 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
07d78363 5365 list_add(&skb->list, gro_head);
5d0d9be8 5366 ret = GRO_HELD;
d565b0a1 5367
ad0f9904 5368pull:
a50e233c
ED
5369 grow = skb_gro_offset(skb) - skb_headlen(skb);
5370 if (grow > 0)
5371 gro_pull_from_frag0(skb, grow);
d565b0a1 5372ok:
5d0d9be8 5373 return ret;
d565b0a1
HX
5374
5375normal:
ad0f9904
HX
5376 ret = GRO_NORMAL;
5377 goto pull;
5d38a079 5378}
96e93eab 5379
bf5a755f
JC
5380struct packet_offload *gro_find_receive_by_type(__be16 type)
5381{
5382 struct list_head *offload_head = &offload_base;
5383 struct packet_offload *ptype;
5384
5385 list_for_each_entry_rcu(ptype, offload_head, list) {
5386 if (ptype->type != type || !ptype->callbacks.gro_receive)
5387 continue;
5388 return ptype;
5389 }
5390 return NULL;
5391}
e27a2f83 5392EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
5393
5394struct packet_offload *gro_find_complete_by_type(__be16 type)
5395{
5396 struct list_head *offload_head = &offload_base;
5397 struct packet_offload *ptype;
5398
5399 list_for_each_entry_rcu(ptype, offload_head, list) {
5400 if (ptype->type != type || !ptype->callbacks.gro_complete)
5401 continue;
5402 return ptype;
5403 }
5404 return NULL;
5405}
e27a2f83 5406EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 5407
e44699d2
MK
5408static void napi_skb_free_stolen_head(struct sk_buff *skb)
5409{
5410 skb_dst_drop(skb);
5411 secpath_reset(skb);
5412 kmem_cache_free(skbuff_head_cache, skb);
5413}
5414
bb728820 5415static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 5416{
5d0d9be8
HX
5417 switch (ret) {
5418 case GRO_NORMAL:
ae78dbfa 5419 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
5420 ret = GRO_DROP;
5421 break;
5d38a079 5422
5d0d9be8 5423 case GRO_DROP:
5d38a079
HX
5424 kfree_skb(skb);
5425 break;
5b252f0c 5426
daa86548 5427 case GRO_MERGED_FREE:
e44699d2
MK
5428 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5429 napi_skb_free_stolen_head(skb);
5430 else
d7e8883c 5431 __kfree_skb(skb);
daa86548
ED
5432 break;
5433
5b252f0c
BH
5434 case GRO_HELD:
5435 case GRO_MERGED:
25393d3f 5436 case GRO_CONSUMED:
5b252f0c 5437 break;
5d38a079
HX
5438 }
5439
c7c4b3b6 5440 return ret;
5d0d9be8 5441}
5d0d9be8 5442
c7c4b3b6 5443gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 5444{
93f93a44 5445 skb_mark_napi_id(skb, napi);
ae78dbfa 5446 trace_napi_gro_receive_entry(skb);
86911732 5447
a50e233c
ED
5448 skb_gro_reset_offset(skb);
5449
89c5fa33 5450 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
5451}
5452EXPORT_SYMBOL(napi_gro_receive);
5453
d0c2b0d2 5454static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 5455{
93a35f59
ED
5456 if (unlikely(skb->pfmemalloc)) {
5457 consume_skb(skb);
5458 return;
5459 }
96e93eab 5460 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
5461 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5462 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 5463 skb->vlan_tci = 0;
66c46d74 5464 skb->dev = napi->dev;
6d152e23 5465 skb->skb_iif = 0;
c3caf119
JC
5466 skb->encapsulation = 0;
5467 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 5468 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
f991bb9d 5469 secpath_reset(skb);
96e93eab
HX
5470
5471 napi->skb = skb;
5472}
96e93eab 5473
76620aaf 5474struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 5475{
5d38a079 5476 struct sk_buff *skb = napi->skb;
5d38a079
HX
5477
5478 if (!skb) {
fd11a83d 5479 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
e2f9dc3b
ED
5480 if (skb) {
5481 napi->skb = skb;
5482 skb_mark_napi_id(skb, napi);
5483 }
80595d59 5484 }
96e93eab
HX
5485 return skb;
5486}
76620aaf 5487EXPORT_SYMBOL(napi_get_frags);
96e93eab 5488
a50e233c
ED
5489static gro_result_t napi_frags_finish(struct napi_struct *napi,
5490 struct sk_buff *skb,
5491 gro_result_t ret)
96e93eab 5492{
5d0d9be8
HX
5493 switch (ret) {
5494 case GRO_NORMAL:
a50e233c
ED
5495 case GRO_HELD:
5496 __skb_push(skb, ETH_HLEN);
5497 skb->protocol = eth_type_trans(skb, skb->dev);
5498 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 5499 ret = GRO_DROP;
86911732 5500 break;
5d38a079 5501
5d0d9be8 5502 case GRO_DROP:
5d0d9be8
HX
5503 napi_reuse_skb(napi, skb);
5504 break;
5b252f0c 5505
e44699d2
MK
5506 case GRO_MERGED_FREE:
5507 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5508 napi_skb_free_stolen_head(skb);
5509 else
5510 napi_reuse_skb(napi, skb);
5511 break;
5512
5b252f0c 5513 case GRO_MERGED:
25393d3f 5514 case GRO_CONSUMED:
5b252f0c 5515 break;
5d0d9be8 5516 }
5d38a079 5517
c7c4b3b6 5518 return ret;
5d38a079 5519}
5d0d9be8 5520
a50e233c
ED
5521/* Upper GRO stack assumes network header starts at gro_offset=0
5522 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5523 * We copy ethernet header into skb->data to have a common layout.
5524 */
4adb9c4a 5525static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
5526{
5527 struct sk_buff *skb = napi->skb;
a50e233c
ED
5528 const struct ethhdr *eth;
5529 unsigned int hlen = sizeof(*eth);
76620aaf
HX
5530
5531 napi->skb = NULL;
5532
a50e233c
ED
5533 skb_reset_mac_header(skb);
5534 skb_gro_reset_offset(skb);
5535
5536 eth = skb_gro_header_fast(skb, 0);
5537 if (unlikely(skb_gro_header_hard(skb, hlen))) {
5538 eth = skb_gro_header_slow(skb, hlen, 0);
5539 if (unlikely(!eth)) {
4da46ceb
AC
5540 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5541 __func__, napi->dev->name);
a50e233c
ED
5542 napi_reuse_skb(napi, skb);
5543 return NULL;
5544 }
5545 } else {
5546 gro_pull_from_frag0(skb, hlen);
5547 NAPI_GRO_CB(skb)->frag0 += hlen;
5548 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 5549 }
a50e233c
ED
5550 __skb_pull(skb, hlen);
5551
5552 /*
5553 * This works because the only protocols we care about don't require
5554 * special handling.
5555 * We'll fix it up properly in napi_frags_finish()
5556 */
5557 skb->protocol = eth->h_proto;
76620aaf 5558
76620aaf
HX
5559 return skb;
5560}
76620aaf 5561
c7c4b3b6 5562gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 5563{
76620aaf 5564 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
5565
5566 if (!skb)
c7c4b3b6 5567 return GRO_DROP;
5d0d9be8 5568
ae78dbfa
BH
5569 trace_napi_gro_frags_entry(skb);
5570
89c5fa33 5571 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 5572}
5d38a079
HX
5573EXPORT_SYMBOL(napi_gro_frags);
5574
573e8fca
TH
5575/* Compute the checksum from gro_offset and return the folded value
5576 * after adding in any pseudo checksum.
5577 */
5578__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5579{
5580 __wsum wsum;
5581 __sum16 sum;
5582
5583 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5584
5585 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5586 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5587 if (likely(!sum)) {
5588 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5589 !skb->csum_complete_sw)
5590 netdev_rx_csum_fault(skb->dev);
5591 }
5592
5593 NAPI_GRO_CB(skb)->csum = wsum;
5594 NAPI_GRO_CB(skb)->csum_valid = 1;
5595
5596 return sum;
5597}
5598EXPORT_SYMBOL(__skb_gro_checksum_complete);
5599
773fc8f6 5600static void net_rps_send_ipi(struct softnet_data *remsd)
5601{
5602#ifdef CONFIG_RPS
5603 while (remsd) {
5604 struct softnet_data *next = remsd->rps_ipi_next;
5605
5606 if (cpu_online(remsd->cpu))
5607 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5608 remsd = next;
5609 }
5610#endif
5611}
5612
e326bed2 5613/*
855abcf0 5614 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
5615 * Note: called with local irq disabled, but exits with local irq enabled.
5616 */
5617static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5618{
5619#ifdef CONFIG_RPS
5620 struct softnet_data *remsd = sd->rps_ipi_list;
5621
5622 if (remsd) {
5623 sd->rps_ipi_list = NULL;
5624
5625 local_irq_enable();
5626
5627 /* Send pending IPI's to kick RPS processing on remote cpus. */
773fc8f6 5628 net_rps_send_ipi(remsd);
e326bed2
ED
5629 } else
5630#endif
5631 local_irq_enable();
5632}
5633
d75b1ade
ED
5634static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5635{
5636#ifdef CONFIG_RPS
5637 return sd->rps_ipi_list != NULL;
5638#else
5639 return false;
5640#endif
5641}
5642
bea3348e 5643static int process_backlog(struct napi_struct *napi, int quota)
1da177e4 5644{
eecfd7c4 5645 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
145dd5f9
PA
5646 bool again = true;
5647 int work = 0;
1da177e4 5648
e326bed2
ED
5649 /* Check if we have pending ipi, its better to send them now,
5650 * not waiting net_rx_action() end.
5651 */
d75b1ade 5652 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
5653 local_irq_disable();
5654 net_rps_action_and_irq_enable(sd);
5655 }
d75b1ade 5656
3d48b53f 5657 napi->weight = dev_rx_weight;
145dd5f9 5658 while (again) {
1da177e4 5659 struct sk_buff *skb;
6e7676c1
CG
5660
5661 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 5662 rcu_read_lock();
6e7676c1 5663 __netif_receive_skb(skb);
2c17d27c 5664 rcu_read_unlock();
76cc8b13 5665 input_queue_head_incr(sd);
145dd5f9 5666 if (++work >= quota)
76cc8b13 5667 return work;
145dd5f9 5668
6e7676c1 5669 }
1da177e4 5670
145dd5f9 5671 local_irq_disable();
e36fa2f7 5672 rps_lock(sd);
11ef7a89 5673 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
5674 /*
5675 * Inline a custom version of __napi_complete().
5676 * only current cpu owns and manipulates this napi,
11ef7a89
TH
5677 * and NAPI_STATE_SCHED is the only possible flag set
5678 * on backlog.
5679 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
5680 * and we dont need an smp_mb() memory barrier.
5681 */
eecfd7c4 5682 napi->state = 0;
145dd5f9
PA
5683 again = false;
5684 } else {
5685 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5686 &sd->process_queue);
bea3348e 5687 }
e36fa2f7 5688 rps_unlock(sd);
145dd5f9 5689 local_irq_enable();
6e7676c1 5690 }
1da177e4 5691
bea3348e
SH
5692 return work;
5693}
1da177e4 5694
bea3348e
SH
5695/**
5696 * __napi_schedule - schedule for receive
c4ea43c5 5697 * @n: entry to schedule
bea3348e 5698 *
bc9ad166
ED
5699 * The entry's receive function will be scheduled to run.
5700 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 5701 */
b5606c2d 5702void __napi_schedule(struct napi_struct *n)
bea3348e
SH
5703{
5704 unsigned long flags;
1da177e4 5705
bea3348e 5706 local_irq_save(flags);
903ceff7 5707 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 5708 local_irq_restore(flags);
1da177e4 5709}
bea3348e
SH
5710EXPORT_SYMBOL(__napi_schedule);
5711
39e6c820
ED
5712/**
5713 * napi_schedule_prep - check if napi can be scheduled
5714 * @n: napi context
5715 *
5716 * Test if NAPI routine is already running, and if not mark
5717 * it as running. This is used as a condition variable
5718 * insure only one NAPI poll instance runs. We also make
5719 * sure there is no pending NAPI disable.
5720 */
5721bool napi_schedule_prep(struct napi_struct *n)
5722{
5723 unsigned long val, new;
5724
5725 do {
5726 val = READ_ONCE(n->state);
5727 if (unlikely(val & NAPIF_STATE_DISABLE))
5728 return false;
5729 new = val | NAPIF_STATE_SCHED;
5730
5731 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5732 * This was suggested by Alexander Duyck, as compiler
5733 * emits better code than :
5734 * if (val & NAPIF_STATE_SCHED)
5735 * new |= NAPIF_STATE_MISSED;
5736 */
5737 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5738 NAPIF_STATE_MISSED;
5739 } while (cmpxchg(&n->state, val, new) != val);
5740
5741 return !(val & NAPIF_STATE_SCHED);
5742}
5743EXPORT_SYMBOL(napi_schedule_prep);
5744
bc9ad166
ED
5745/**
5746 * __napi_schedule_irqoff - schedule for receive
5747 * @n: entry to schedule
5748 *
5749 * Variant of __napi_schedule() assuming hard irqs are masked
5750 */
5751void __napi_schedule_irqoff(struct napi_struct *n)
5752{
5753 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5754}
5755EXPORT_SYMBOL(__napi_schedule_irqoff);
5756
364b6055 5757bool napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1 5758{
39e6c820 5759 unsigned long flags, val, new;
d565b0a1
HX
5760
5761 /*
217f6974
ED
5762 * 1) Don't let napi dequeue from the cpu poll list
5763 * just in case its running on a different cpu.
5764 * 2) If we are busy polling, do nothing here, we have
5765 * the guarantee we will be called later.
d565b0a1 5766 */
217f6974
ED
5767 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5768 NAPIF_STATE_IN_BUSY_POLL)))
364b6055 5769 return false;
d565b0a1 5770
07d78363 5771 if (n->gro_count) {
3b47d303 5772 unsigned long timeout = 0;
d75b1ade 5773
3b47d303
ED
5774 if (work_done)
5775 timeout = n->dev->gro_flush_timeout;
5776
5777 if (timeout)
5778 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5779 HRTIMER_MODE_REL_PINNED);
5780 else
5781 napi_gro_flush(n, false);
5782 }
02c1602e 5783 if (unlikely(!list_empty(&n->poll_list))) {
d75b1ade
ED
5784 /* If n->poll_list is not empty, we need to mask irqs */
5785 local_irq_save(flags);
02c1602e 5786 list_del_init(&n->poll_list);
d75b1ade
ED
5787 local_irq_restore(flags);
5788 }
39e6c820
ED
5789
5790 do {
5791 val = READ_ONCE(n->state);
5792
5793 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5794
5795 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5796
5797 /* If STATE_MISSED was set, leave STATE_SCHED set,
5798 * because we will call napi->poll() one more time.
5799 * This C code was suggested by Alexander Duyck to help gcc.
5800 */
5801 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5802 NAPIF_STATE_SCHED;
5803 } while (cmpxchg(&n->state, val, new) != val);
5804
5805 if (unlikely(val & NAPIF_STATE_MISSED)) {
5806 __napi_schedule(n);
5807 return false;
5808 }
5809
364b6055 5810 return true;
d565b0a1 5811}
3b47d303 5812EXPORT_SYMBOL(napi_complete_done);
d565b0a1 5813
af12fa6e 5814/* must be called under rcu_read_lock(), as we dont take a reference */
02d62e86 5815static struct napi_struct *napi_by_id(unsigned int napi_id)
af12fa6e
ET
5816{
5817 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5818 struct napi_struct *napi;
5819
5820 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5821 if (napi->napi_id == napi_id)
5822 return napi;
5823
5824 return NULL;
5825}
02d62e86
ED
5826
5827#if defined(CONFIG_NET_RX_BUSY_POLL)
217f6974 5828
ce6aea93 5829#define BUSY_POLL_BUDGET 8
217f6974
ED
5830
5831static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5832{
5833 int rc;
5834
39e6c820
ED
5835 /* Busy polling means there is a high chance device driver hard irq
5836 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5837 * set in napi_schedule_prep().
5838 * Since we are about to call napi->poll() once more, we can safely
5839 * clear NAPI_STATE_MISSED.
5840 *
5841 * Note: x86 could use a single "lock and ..." instruction
5842 * to perform these two clear_bit()
5843 */
5844 clear_bit(NAPI_STATE_MISSED, &napi->state);
217f6974
ED
5845 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5846
5847 local_bh_disable();
5848
5849 /* All we really want here is to re-enable device interrupts.
5850 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5851 */
5852 rc = napi->poll(napi, BUSY_POLL_BUDGET);
1e22391e 5853 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
217f6974
ED
5854 netpoll_poll_unlock(have_poll_lock);
5855 if (rc == BUSY_POLL_BUDGET)
5856 __napi_schedule(napi);
5857 local_bh_enable();
217f6974
ED
5858}
5859
7db6b048
SS
5860void napi_busy_loop(unsigned int napi_id,
5861 bool (*loop_end)(void *, unsigned long),
5862 void *loop_end_arg)
02d62e86 5863{
7db6b048 5864 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
217f6974 5865 int (*napi_poll)(struct napi_struct *napi, int budget);
217f6974 5866 void *have_poll_lock = NULL;
02d62e86 5867 struct napi_struct *napi;
217f6974
ED
5868
5869restart:
217f6974 5870 napi_poll = NULL;
02d62e86 5871
2a028ecb 5872 rcu_read_lock();
02d62e86 5873
545cd5e5 5874 napi = napi_by_id(napi_id);
02d62e86
ED
5875 if (!napi)
5876 goto out;
5877
217f6974
ED
5878 preempt_disable();
5879 for (;;) {
2b5cd0df
AD
5880 int work = 0;
5881
2a028ecb 5882 local_bh_disable();
217f6974
ED
5883 if (!napi_poll) {
5884 unsigned long val = READ_ONCE(napi->state);
5885
5886 /* If multiple threads are competing for this napi,
5887 * we avoid dirtying napi->state as much as we can.
5888 */
5889 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5890 NAPIF_STATE_IN_BUSY_POLL))
5891 goto count;
5892 if (cmpxchg(&napi->state, val,
5893 val | NAPIF_STATE_IN_BUSY_POLL |
5894 NAPIF_STATE_SCHED) != val)
5895 goto count;
5896 have_poll_lock = netpoll_poll_lock(napi);
5897 napi_poll = napi->poll;
5898 }
2b5cd0df
AD
5899 work = napi_poll(napi, BUSY_POLL_BUDGET);
5900 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
217f6974 5901count:
2b5cd0df 5902 if (work > 0)
7db6b048 5903 __NET_ADD_STATS(dev_net(napi->dev),
2b5cd0df 5904 LINUX_MIB_BUSYPOLLRXPACKETS, work);
2a028ecb 5905 local_bh_enable();
02d62e86 5906
7db6b048 5907 if (!loop_end || loop_end(loop_end_arg, start_time))
217f6974 5908 break;
02d62e86 5909
217f6974
ED
5910 if (unlikely(need_resched())) {
5911 if (napi_poll)
5912 busy_poll_stop(napi, have_poll_lock);
5913 preempt_enable();
5914 rcu_read_unlock();
5915 cond_resched();
7db6b048 5916 if (loop_end(loop_end_arg, start_time))
2b5cd0df 5917 return;
217f6974
ED
5918 goto restart;
5919 }
6cdf89b1 5920 cpu_relax();
217f6974
ED
5921 }
5922 if (napi_poll)
5923 busy_poll_stop(napi, have_poll_lock);
5924 preempt_enable();
02d62e86 5925out:
2a028ecb 5926 rcu_read_unlock();
02d62e86 5927}
7db6b048 5928EXPORT_SYMBOL(napi_busy_loop);
02d62e86
ED
5929
5930#endif /* CONFIG_NET_RX_BUSY_POLL */
af12fa6e 5931
149d6ad8 5932static void napi_hash_add(struct napi_struct *napi)
af12fa6e 5933{
d64b5e85
ED
5934 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5935 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
52bd2d62 5936 return;
af12fa6e 5937
52bd2d62 5938 spin_lock(&napi_hash_lock);
af12fa6e 5939
545cd5e5 5940 /* 0..NR_CPUS range is reserved for sender_cpu use */
52bd2d62 5941 do {
545cd5e5
AD
5942 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5943 napi_gen_id = MIN_NAPI_ID;
52bd2d62
ED
5944 } while (napi_by_id(napi_gen_id));
5945 napi->napi_id = napi_gen_id;
af12fa6e 5946
52bd2d62
ED
5947 hlist_add_head_rcu(&napi->napi_hash_node,
5948 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
af12fa6e 5949
52bd2d62 5950 spin_unlock(&napi_hash_lock);
af12fa6e 5951}
af12fa6e
ET
5952
5953/* Warning : caller is responsible to make sure rcu grace period
5954 * is respected before freeing memory containing @napi
5955 */
34cbe27e 5956bool napi_hash_del(struct napi_struct *napi)
af12fa6e 5957{
34cbe27e
ED
5958 bool rcu_sync_needed = false;
5959
af12fa6e
ET
5960 spin_lock(&napi_hash_lock);
5961
34cbe27e
ED
5962 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5963 rcu_sync_needed = true;
af12fa6e 5964 hlist_del_rcu(&napi->napi_hash_node);
34cbe27e 5965 }
af12fa6e 5966 spin_unlock(&napi_hash_lock);
34cbe27e 5967 return rcu_sync_needed;
af12fa6e
ET
5968}
5969EXPORT_SYMBOL_GPL(napi_hash_del);
5970
3b47d303
ED
5971static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5972{
5973 struct napi_struct *napi;
5974
5975 napi = container_of(timer, struct napi_struct, timer);
39e6c820
ED
5976
5977 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5978 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5979 */
07d78363 5980 if (napi->gro_count && !napi_disable_pending(napi) &&
39e6c820
ED
5981 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5982 __napi_schedule_irqoff(napi);
3b47d303
ED
5983
5984 return HRTIMER_NORESTART;
5985}
5986
d565b0a1
HX
5987void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5988 int (*poll)(struct napi_struct *, int), int weight)
5989{
07d78363
DM
5990 int i;
5991
d565b0a1 5992 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
5993 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5994 napi->timer.function = napi_watchdog;
4ae5544f 5995 napi->gro_count = 0;
6312fe77
LR
5996 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
5997 INIT_LIST_HEAD(&napi->gro_hash[i].list);
5998 napi->gro_hash[i].count = 0;
5999 }
5d38a079 6000 napi->skb = NULL;
d565b0a1 6001 napi->poll = poll;
82dc3c63
ED
6002 if (weight > NAPI_POLL_WEIGHT)
6003 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
6004 weight, dev->name);
d565b0a1
HX
6005 napi->weight = weight;
6006 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 6007 napi->dev = dev;
5d38a079 6008#ifdef CONFIG_NETPOLL
d565b0a1
HX
6009 napi->poll_owner = -1;
6010#endif
6011 set_bit(NAPI_STATE_SCHED, &napi->state);
93d05d4a 6012 napi_hash_add(napi);
d565b0a1
HX
6013}
6014EXPORT_SYMBOL(netif_napi_add);
6015
3b47d303
ED
6016void napi_disable(struct napi_struct *n)
6017{
6018 might_sleep();
6019 set_bit(NAPI_STATE_DISABLE, &n->state);
6020
6021 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6022 msleep(1);
2d8bff12
NH
6023 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6024 msleep(1);
3b47d303
ED
6025
6026 hrtimer_cancel(&n->timer);
6027
6028 clear_bit(NAPI_STATE_DISABLE, &n->state);
6029}
6030EXPORT_SYMBOL(napi_disable);
6031
07d78363 6032static void flush_gro_hash(struct napi_struct *napi)
d4546c25 6033{
07d78363 6034 int i;
d4546c25 6035
07d78363
DM
6036 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6037 struct sk_buff *skb, *n;
6038
6312fe77 6039 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
07d78363 6040 kfree_skb(skb);
6312fe77 6041 napi->gro_hash[i].count = 0;
07d78363 6042 }
d4546c25
DM
6043}
6044
93d05d4a 6045/* Must be called in process context */
d565b0a1
HX
6046void netif_napi_del(struct napi_struct *napi)
6047{
93d05d4a
ED
6048 might_sleep();
6049 if (napi_hash_del(napi))
6050 synchronize_net();
d7b06636 6051 list_del_init(&napi->dev_list);
76620aaf 6052 napi_free_frags(napi);
d565b0a1 6053
07d78363 6054 flush_gro_hash(napi);
4ae5544f 6055 napi->gro_count = 0;
d565b0a1
HX
6056}
6057EXPORT_SYMBOL(netif_napi_del);
6058
726ce70e
HX
6059static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6060{
6061 void *have;
6062 int work, weight;
6063
6064 list_del_init(&n->poll_list);
6065
6066 have = netpoll_poll_lock(n);
6067
6068 weight = n->weight;
6069
6070 /* This NAPI_STATE_SCHED test is for avoiding a race
6071 * with netpoll's poll_napi(). Only the entity which
6072 * obtains the lock and sees NAPI_STATE_SCHED set will
6073 * actually make the ->poll() call. Therefore we avoid
6074 * accidentally calling ->poll() when NAPI is not scheduled.
6075 */
6076 work = 0;
6077 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6078 work = n->poll(n, weight);
1db19db7 6079 trace_napi_poll(n, work, weight);
726ce70e
HX
6080 }
6081
6082 WARN_ON_ONCE(work > weight);
6083
6084 if (likely(work < weight))
6085 goto out_unlock;
6086
6087 /* Drivers must not modify the NAPI state if they
6088 * consume the entire weight. In such cases this code
6089 * still "owns" the NAPI instance and therefore can
6090 * move the instance around on the list at-will.
6091 */
6092 if (unlikely(napi_disable_pending(n))) {
6093 napi_complete(n);
6094 goto out_unlock;
6095 }
6096
07d78363 6097 if (n->gro_count) {
726ce70e
HX
6098 /* flush too old packets
6099 * If HZ < 1000, flush all packets.
6100 */
6101 napi_gro_flush(n, HZ >= 1000);
6102 }
6103
001ce546
HX
6104 /* Some drivers may have called napi_schedule
6105 * prior to exhausting their budget.
6106 */
6107 if (unlikely(!list_empty(&n->poll_list))) {
6108 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6109 n->dev ? n->dev->name : "backlog");
6110 goto out_unlock;
6111 }
6112
726ce70e
HX
6113 list_add_tail(&n->poll_list, repoll);
6114
6115out_unlock:
6116 netpoll_poll_unlock(have);
6117
6118 return work;
6119}
6120
0766f788 6121static __latent_entropy void net_rx_action(struct softirq_action *h)
1da177e4 6122{
903ceff7 6123 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7acf8a1e
MW
6124 unsigned long time_limit = jiffies +
6125 usecs_to_jiffies(netdev_budget_usecs);
51b0bded 6126 int budget = netdev_budget;
d75b1ade
ED
6127 LIST_HEAD(list);
6128 LIST_HEAD(repoll);
53fb95d3 6129
1da177e4 6130 local_irq_disable();
d75b1ade
ED
6131 list_splice_init(&sd->poll_list, &list);
6132 local_irq_enable();
1da177e4 6133
ceb8d5bf 6134 for (;;) {
bea3348e 6135 struct napi_struct *n;
1da177e4 6136
ceb8d5bf
HX
6137 if (list_empty(&list)) {
6138 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
f52dffe0 6139 goto out;
ceb8d5bf
HX
6140 break;
6141 }
6142
6bd373eb
HX
6143 n = list_first_entry(&list, struct napi_struct, poll_list);
6144 budget -= napi_poll(n, &repoll);
6145
d75b1ade 6146 /* If softirq window is exhausted then punt.
24f8b238
SH
6147 * Allow this to run for 2 jiffies since which will allow
6148 * an average latency of 1.5/HZ.
bea3348e 6149 */
ceb8d5bf
HX
6150 if (unlikely(budget <= 0 ||
6151 time_after_eq(jiffies, time_limit))) {
6152 sd->time_squeeze++;
6153 break;
6154 }
1da177e4 6155 }
d75b1ade 6156
d75b1ade
ED
6157 local_irq_disable();
6158
6159 list_splice_tail_init(&sd->poll_list, &list);
6160 list_splice_tail(&repoll, &list);
6161 list_splice(&list, &sd->poll_list);
6162 if (!list_empty(&sd->poll_list))
6163 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6164
e326bed2 6165 net_rps_action_and_irq_enable(sd);
f52dffe0
ED
6166out:
6167 __kfree_skb_flush();
1da177e4
LT
6168}
6169
aa9d8560 6170struct netdev_adjacent {
9ff162a8 6171 struct net_device *dev;
5d261913
VF
6172
6173 /* upper master flag, there can only be one master device per list */
9ff162a8 6174 bool master;
5d261913 6175
5d261913
VF
6176 /* counter for the number of times this device was added to us */
6177 u16 ref_nr;
6178
402dae96
VF
6179 /* private field for the users */
6180 void *private;
6181
9ff162a8
JP
6182 struct list_head list;
6183 struct rcu_head rcu;
9ff162a8
JP
6184};
6185
6ea29da1 6186static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 6187 struct list_head *adj_list)
9ff162a8 6188{
5d261913 6189 struct netdev_adjacent *adj;
5d261913 6190
2f268f12 6191 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
6192 if (adj->dev == adj_dev)
6193 return adj;
9ff162a8
JP
6194 }
6195 return NULL;
6196}
6197
f1170fd4
DA
6198static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6199{
6200 struct net_device *dev = data;
6201
6202 return upper_dev == dev;
6203}
6204
9ff162a8
JP
6205/**
6206 * netdev_has_upper_dev - Check if device is linked to an upper device
6207 * @dev: device
6208 * @upper_dev: upper device to check
6209 *
6210 * Find out if a device is linked to specified upper device and return true
6211 * in case it is. Note that this checks only immediate upper device,
6212 * not through a complete stack of devices. The caller must hold the RTNL lock.
6213 */
6214bool netdev_has_upper_dev(struct net_device *dev,
6215 struct net_device *upper_dev)
6216{
6217 ASSERT_RTNL();
6218
f1170fd4
DA
6219 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6220 upper_dev);
9ff162a8
JP
6221}
6222EXPORT_SYMBOL(netdev_has_upper_dev);
6223
1a3f060c
DA
6224/**
6225 * netdev_has_upper_dev_all - Check if device is linked to an upper device
6226 * @dev: device
6227 * @upper_dev: upper device to check
6228 *
6229 * Find out if a device is linked to specified upper device and return true
6230 * in case it is. Note that this checks the entire upper device chain.
6231 * The caller must hold rcu lock.
6232 */
6233
1a3f060c
DA
6234bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6235 struct net_device *upper_dev)
6236{
6237 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6238 upper_dev);
6239}
6240EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6241
9ff162a8
JP
6242/**
6243 * netdev_has_any_upper_dev - Check if device is linked to some device
6244 * @dev: device
6245 *
6246 * Find out if a device is linked to an upper device and return true in case
6247 * it is. The caller must hold the RTNL lock.
6248 */
25cc72a3 6249bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
6250{
6251 ASSERT_RTNL();
6252
f1170fd4 6253 return !list_empty(&dev->adj_list.upper);
9ff162a8 6254}
25cc72a3 6255EXPORT_SYMBOL(netdev_has_any_upper_dev);
9ff162a8
JP
6256
6257/**
6258 * netdev_master_upper_dev_get - Get master upper device
6259 * @dev: device
6260 *
6261 * Find a master upper device and return pointer to it or NULL in case
6262 * it's not there. The caller must hold the RTNL lock.
6263 */
6264struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6265{
aa9d8560 6266 struct netdev_adjacent *upper;
9ff162a8
JP
6267
6268 ASSERT_RTNL();
6269
2f268f12 6270 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
6271 return NULL;
6272
2f268f12 6273 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 6274 struct netdev_adjacent, list);
9ff162a8
JP
6275 if (likely(upper->master))
6276 return upper->dev;
6277 return NULL;
6278}
6279EXPORT_SYMBOL(netdev_master_upper_dev_get);
6280
0f524a80
DA
6281/**
6282 * netdev_has_any_lower_dev - Check if device is linked to some device
6283 * @dev: device
6284 *
6285 * Find out if a device is linked to a lower device and return true in case
6286 * it is. The caller must hold the RTNL lock.
6287 */
6288static bool netdev_has_any_lower_dev(struct net_device *dev)
6289{
6290 ASSERT_RTNL();
6291
6292 return !list_empty(&dev->adj_list.lower);
6293}
6294
b6ccba4c
VF
6295void *netdev_adjacent_get_private(struct list_head *adj_list)
6296{
6297 struct netdev_adjacent *adj;
6298
6299 adj = list_entry(adj_list, struct netdev_adjacent, list);
6300
6301 return adj->private;
6302}
6303EXPORT_SYMBOL(netdev_adjacent_get_private);
6304
44a40855
VY
6305/**
6306 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6307 * @dev: device
6308 * @iter: list_head ** of the current position
6309 *
6310 * Gets the next device from the dev's upper list, starting from iter
6311 * position. The caller must hold RCU read lock.
6312 */
6313struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6314 struct list_head **iter)
6315{
6316 struct netdev_adjacent *upper;
6317
6318 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6319
6320 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6321
6322 if (&upper->list == &dev->adj_list.upper)
6323 return NULL;
6324
6325 *iter = &upper->list;
6326
6327 return upper->dev;
6328}
6329EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6330
1a3f060c
DA
6331static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6332 struct list_head **iter)
6333{
6334 struct netdev_adjacent *upper;
6335
6336 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6337
6338 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6339
6340 if (&upper->list == &dev->adj_list.upper)
6341 return NULL;
6342
6343 *iter = &upper->list;
6344
6345 return upper->dev;
6346}
6347
6348int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6349 int (*fn)(struct net_device *dev,
6350 void *data),
6351 void *data)
6352{
6353 struct net_device *udev;
6354 struct list_head *iter;
6355 int ret;
6356
6357 for (iter = &dev->adj_list.upper,
6358 udev = netdev_next_upper_dev_rcu(dev, &iter);
6359 udev;
6360 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6361 /* first is the upper device itself */
6362 ret = fn(udev, data);
6363 if (ret)
6364 return ret;
6365
6366 /* then look at all of its upper devices */
6367 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
6368 if (ret)
6369 return ret;
6370 }
6371
6372 return 0;
6373}
6374EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6375
31088a11
VF
6376/**
6377 * netdev_lower_get_next_private - Get the next ->private from the
6378 * lower neighbour list
6379 * @dev: device
6380 * @iter: list_head ** of the current position
6381 *
6382 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6383 * list, starting from iter position. The caller must hold either hold the
6384 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 6385 * list will remain unchanged.
31088a11
VF
6386 */
6387void *netdev_lower_get_next_private(struct net_device *dev,
6388 struct list_head **iter)
6389{
6390 struct netdev_adjacent *lower;
6391
6392 lower = list_entry(*iter, struct netdev_adjacent, list);
6393
6394 if (&lower->list == &dev->adj_list.lower)
6395 return NULL;
6396
6859e7df 6397 *iter = lower->list.next;
31088a11
VF
6398
6399 return lower->private;
6400}
6401EXPORT_SYMBOL(netdev_lower_get_next_private);
6402
6403/**
6404 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6405 * lower neighbour list, RCU
6406 * variant
6407 * @dev: device
6408 * @iter: list_head ** of the current position
6409 *
6410 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6411 * list, starting from iter position. The caller must hold RCU read lock.
6412 */
6413void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6414 struct list_head **iter)
6415{
6416 struct netdev_adjacent *lower;
6417
6418 WARN_ON_ONCE(!rcu_read_lock_held());
6419
6420 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6421
6422 if (&lower->list == &dev->adj_list.lower)
6423 return NULL;
6424
6859e7df 6425 *iter = &lower->list;
31088a11
VF
6426
6427 return lower->private;
6428}
6429EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
6430
4085ebe8
VY
6431/**
6432 * netdev_lower_get_next - Get the next device from the lower neighbour
6433 * list
6434 * @dev: device
6435 * @iter: list_head ** of the current position
6436 *
6437 * Gets the next netdev_adjacent from the dev's lower neighbour
6438 * list, starting from iter position. The caller must hold RTNL lock or
6439 * its own locking that guarantees that the neighbour lower
b469139e 6440 * list will remain unchanged.
4085ebe8
VY
6441 */
6442void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
6443{
6444 struct netdev_adjacent *lower;
6445
cfdd28be 6446 lower = list_entry(*iter, struct netdev_adjacent, list);
4085ebe8
VY
6447
6448 if (&lower->list == &dev->adj_list.lower)
6449 return NULL;
6450
cfdd28be 6451 *iter = lower->list.next;
4085ebe8
VY
6452
6453 return lower->dev;
6454}
6455EXPORT_SYMBOL(netdev_lower_get_next);
6456
1a3f060c
DA
6457static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6458 struct list_head **iter)
6459{
6460 struct netdev_adjacent *lower;
6461
46b5ab1a 6462 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
1a3f060c
DA
6463
6464 if (&lower->list == &dev->adj_list.lower)
6465 return NULL;
6466
46b5ab1a 6467 *iter = &lower->list;
1a3f060c
DA
6468
6469 return lower->dev;
6470}
6471
6472int netdev_walk_all_lower_dev(struct net_device *dev,
6473 int (*fn)(struct net_device *dev,
6474 void *data),
6475 void *data)
6476{
6477 struct net_device *ldev;
6478 struct list_head *iter;
6479 int ret;
6480
6481 for (iter = &dev->adj_list.lower,
6482 ldev = netdev_next_lower_dev(dev, &iter);
6483 ldev;
6484 ldev = netdev_next_lower_dev(dev, &iter)) {
6485 /* first is the lower device itself */
6486 ret = fn(ldev, data);
6487 if (ret)
6488 return ret;
6489
6490 /* then look at all of its lower devices */
6491 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6492 if (ret)
6493 return ret;
6494 }
6495
6496 return 0;
6497}
6498EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6499
1a3f060c
DA
6500static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6501 struct list_head **iter)
6502{
6503 struct netdev_adjacent *lower;
6504
6505 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6506 if (&lower->list == &dev->adj_list.lower)
6507 return NULL;
6508
6509 *iter = &lower->list;
6510
6511 return lower->dev;
6512}
6513
6514int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6515 int (*fn)(struct net_device *dev,
6516 void *data),
6517 void *data)
6518{
6519 struct net_device *ldev;
6520 struct list_head *iter;
6521 int ret;
6522
6523 for (iter = &dev->adj_list.lower,
6524 ldev = netdev_next_lower_dev_rcu(dev, &iter);
6525 ldev;
6526 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6527 /* first is the lower device itself */
6528 ret = fn(ldev, data);
6529 if (ret)
6530 return ret;
6531
6532 /* then look at all of its lower devices */
6533 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6534 if (ret)
6535 return ret;
6536 }
6537
6538 return 0;
6539}
6540EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6541
e001bfad 6542/**
6543 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6544 * lower neighbour list, RCU
6545 * variant
6546 * @dev: device
6547 *
6548 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6549 * list. The caller must hold RCU read lock.
6550 */
6551void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6552{
6553 struct netdev_adjacent *lower;
6554
6555 lower = list_first_or_null_rcu(&dev->adj_list.lower,
6556 struct netdev_adjacent, list);
6557 if (lower)
6558 return lower->private;
6559 return NULL;
6560}
6561EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6562
9ff162a8
JP
6563/**
6564 * netdev_master_upper_dev_get_rcu - Get master upper device
6565 * @dev: device
6566 *
6567 * Find a master upper device and return pointer to it or NULL in case
6568 * it's not there. The caller must hold the RCU read lock.
6569 */
6570struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6571{
aa9d8560 6572 struct netdev_adjacent *upper;
9ff162a8 6573
2f268f12 6574 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 6575 struct netdev_adjacent, list);
9ff162a8
JP
6576 if (upper && likely(upper->master))
6577 return upper->dev;
6578 return NULL;
6579}
6580EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6581
0a59f3a9 6582static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
6583 struct net_device *adj_dev,
6584 struct list_head *dev_list)
6585{
6586 char linkname[IFNAMSIZ+7];
f4563a75 6587
3ee32707
VF
6588 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6589 "upper_%s" : "lower_%s", adj_dev->name);
6590 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6591 linkname);
6592}
0a59f3a9 6593static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
6594 char *name,
6595 struct list_head *dev_list)
6596{
6597 char linkname[IFNAMSIZ+7];
f4563a75 6598
3ee32707
VF
6599 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6600 "upper_%s" : "lower_%s", name);
6601 sysfs_remove_link(&(dev->dev.kobj), linkname);
6602}
6603
7ce64c79
AF
6604static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6605 struct net_device *adj_dev,
6606 struct list_head *dev_list)
6607{
6608 return (dev_list == &dev->adj_list.upper ||
6609 dev_list == &dev->adj_list.lower) &&
6610 net_eq(dev_net(dev), dev_net(adj_dev));
6611}
3ee32707 6612
5d261913
VF
6613static int __netdev_adjacent_dev_insert(struct net_device *dev,
6614 struct net_device *adj_dev,
7863c054 6615 struct list_head *dev_list,
402dae96 6616 void *private, bool master)
5d261913
VF
6617{
6618 struct netdev_adjacent *adj;
842d67a7 6619 int ret;
5d261913 6620
6ea29da1 6621 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
6622
6623 if (adj) {
790510d9 6624 adj->ref_nr += 1;
67b62f98
DA
6625 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6626 dev->name, adj_dev->name, adj->ref_nr);
6627
5d261913
VF
6628 return 0;
6629 }
6630
6631 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6632 if (!adj)
6633 return -ENOMEM;
6634
6635 adj->dev = adj_dev;
6636 adj->master = master;
790510d9 6637 adj->ref_nr = 1;
402dae96 6638 adj->private = private;
5d261913 6639 dev_hold(adj_dev);
2f268f12 6640
67b62f98
DA
6641 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6642 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
5d261913 6643
7ce64c79 6644 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 6645 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
6646 if (ret)
6647 goto free_adj;
6648 }
6649
7863c054 6650 /* Ensure that master link is always the first item in list. */
842d67a7
VF
6651 if (master) {
6652 ret = sysfs_create_link(&(dev->dev.kobj),
6653 &(adj_dev->dev.kobj), "master");
6654 if (ret)
5831d66e 6655 goto remove_symlinks;
842d67a7 6656
7863c054 6657 list_add_rcu(&adj->list, dev_list);
842d67a7 6658 } else {
7863c054 6659 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 6660 }
5d261913
VF
6661
6662 return 0;
842d67a7 6663
5831d66e 6664remove_symlinks:
7ce64c79 6665 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 6666 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
6667free_adj:
6668 kfree(adj);
974daef7 6669 dev_put(adj_dev);
842d67a7
VF
6670
6671 return ret;
5d261913
VF
6672}
6673
1d143d9f 6674static void __netdev_adjacent_dev_remove(struct net_device *dev,
6675 struct net_device *adj_dev,
93409033 6676 u16 ref_nr,
1d143d9f 6677 struct list_head *dev_list)
5d261913
VF
6678{
6679 struct netdev_adjacent *adj;
6680
67b62f98
DA
6681 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6682 dev->name, adj_dev->name, ref_nr);
6683
6ea29da1 6684 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 6685
2f268f12 6686 if (!adj) {
67b62f98 6687 pr_err("Adjacency does not exist for device %s from %s\n",
2f268f12 6688 dev->name, adj_dev->name);
67b62f98
DA
6689 WARN_ON(1);
6690 return;
2f268f12 6691 }
5d261913 6692
93409033 6693 if (adj->ref_nr > ref_nr) {
67b62f98
DA
6694 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6695 dev->name, adj_dev->name, ref_nr,
6696 adj->ref_nr - ref_nr);
93409033 6697 adj->ref_nr -= ref_nr;
5d261913
VF
6698 return;
6699 }
6700
842d67a7
VF
6701 if (adj->master)
6702 sysfs_remove_link(&(dev->dev.kobj), "master");
6703
7ce64c79 6704 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 6705 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 6706
5d261913 6707 list_del_rcu(&adj->list);
67b62f98 6708 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
2f268f12 6709 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
6710 dev_put(adj_dev);
6711 kfree_rcu(adj, rcu);
6712}
6713
1d143d9f 6714static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6715 struct net_device *upper_dev,
6716 struct list_head *up_list,
6717 struct list_head *down_list,
6718 void *private, bool master)
5d261913
VF
6719{
6720 int ret;
6721
790510d9 6722 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
93409033 6723 private, master);
5d261913
VF
6724 if (ret)
6725 return ret;
6726
790510d9 6727 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
93409033 6728 private, false);
5d261913 6729 if (ret) {
790510d9 6730 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
5d261913
VF
6731 return ret;
6732 }
6733
6734 return 0;
6735}
6736
1d143d9f 6737static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6738 struct net_device *upper_dev,
93409033 6739 u16 ref_nr,
1d143d9f 6740 struct list_head *up_list,
6741 struct list_head *down_list)
5d261913 6742{
93409033
AC
6743 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6744 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
5d261913
VF
6745}
6746
1d143d9f 6747static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6748 struct net_device *upper_dev,
6749 void *private, bool master)
2f268f12 6750{
f1170fd4
DA
6751 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6752 &dev->adj_list.upper,
6753 &upper_dev->adj_list.lower,
6754 private, master);
5d261913
VF
6755}
6756
1d143d9f 6757static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6758 struct net_device *upper_dev)
2f268f12 6759{
93409033 6760 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
2f268f12
VF
6761 &dev->adj_list.upper,
6762 &upper_dev->adj_list.lower);
6763}
5d261913 6764
9ff162a8 6765static int __netdev_upper_dev_link(struct net_device *dev,
402dae96 6766 struct net_device *upper_dev, bool master,
42ab19ee
DA
6767 void *upper_priv, void *upper_info,
6768 struct netlink_ext_ack *extack)
9ff162a8 6769{
51d0c047
DA
6770 struct netdev_notifier_changeupper_info changeupper_info = {
6771 .info = {
6772 .dev = dev,
42ab19ee 6773 .extack = extack,
51d0c047
DA
6774 },
6775 .upper_dev = upper_dev,
6776 .master = master,
6777 .linking = true,
6778 .upper_info = upper_info,
6779 };
50d629e7 6780 struct net_device *master_dev;
5d261913 6781 int ret = 0;
9ff162a8
JP
6782
6783 ASSERT_RTNL();
6784
6785 if (dev == upper_dev)
6786 return -EBUSY;
6787
6788 /* To prevent loops, check if dev is not upper device to upper_dev. */
f1170fd4 6789 if (netdev_has_upper_dev(upper_dev, dev))
9ff162a8
JP
6790 return -EBUSY;
6791
50d629e7
MM
6792 if (!master) {
6793 if (netdev_has_upper_dev(dev, upper_dev))
6794 return -EEXIST;
6795 } else {
6796 master_dev = netdev_master_upper_dev_get(dev);
6797 if (master_dev)
6798 return master_dev == upper_dev ? -EEXIST : -EBUSY;
6799 }
9ff162a8 6800
51d0c047 6801 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
6802 &changeupper_info.info);
6803 ret = notifier_to_errno(ret);
6804 if (ret)
6805 return ret;
6806
6dffb044 6807 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
402dae96 6808 master);
5d261913
VF
6809 if (ret)
6810 return ret;
9ff162a8 6811
51d0c047 6812 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
b03804e7
IS
6813 &changeupper_info.info);
6814 ret = notifier_to_errno(ret);
6815 if (ret)
f1170fd4 6816 goto rollback;
b03804e7 6817
9ff162a8 6818 return 0;
5d261913 6819
f1170fd4 6820rollback:
2f268f12 6821 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
6822
6823 return ret;
9ff162a8
JP
6824}
6825
6826/**
6827 * netdev_upper_dev_link - Add a link to the upper device
6828 * @dev: device
6829 * @upper_dev: new upper device
7a006d59 6830 * @extack: netlink extended ack
9ff162a8
JP
6831 *
6832 * Adds a link to device which is upper to this one. The caller must hold
6833 * the RTNL lock. On a failure a negative errno code is returned.
6834 * On success the reference counts are adjusted and the function
6835 * returns zero.
6836 */
6837int netdev_upper_dev_link(struct net_device *dev,
42ab19ee
DA
6838 struct net_device *upper_dev,
6839 struct netlink_ext_ack *extack)
9ff162a8 6840{
42ab19ee
DA
6841 return __netdev_upper_dev_link(dev, upper_dev, false,
6842 NULL, NULL, extack);
9ff162a8
JP
6843}
6844EXPORT_SYMBOL(netdev_upper_dev_link);
6845
6846/**
6847 * netdev_master_upper_dev_link - Add a master link to the upper device
6848 * @dev: device
6849 * @upper_dev: new upper device
6dffb044 6850 * @upper_priv: upper device private
29bf24af 6851 * @upper_info: upper info to be passed down via notifier
7a006d59 6852 * @extack: netlink extended ack
9ff162a8
JP
6853 *
6854 * Adds a link to device which is upper to this one. In this case, only
6855 * one master upper device can be linked, although other non-master devices
6856 * might be linked as well. The caller must hold the RTNL lock.
6857 * On a failure a negative errno code is returned. On success the reference
6858 * counts are adjusted and the function returns zero.
6859 */
6860int netdev_master_upper_dev_link(struct net_device *dev,
6dffb044 6861 struct net_device *upper_dev,
42ab19ee
DA
6862 void *upper_priv, void *upper_info,
6863 struct netlink_ext_ack *extack)
9ff162a8 6864{
29bf24af 6865 return __netdev_upper_dev_link(dev, upper_dev, true,
42ab19ee 6866 upper_priv, upper_info, extack);
9ff162a8
JP
6867}
6868EXPORT_SYMBOL(netdev_master_upper_dev_link);
6869
6870/**
6871 * netdev_upper_dev_unlink - Removes a link to upper device
6872 * @dev: device
6873 * @upper_dev: new upper device
6874 *
6875 * Removes a link to device which is upper to this one. The caller must hold
6876 * the RTNL lock.
6877 */
6878void netdev_upper_dev_unlink(struct net_device *dev,
6879 struct net_device *upper_dev)
6880{
51d0c047
DA
6881 struct netdev_notifier_changeupper_info changeupper_info = {
6882 .info = {
6883 .dev = dev,
6884 },
6885 .upper_dev = upper_dev,
6886 .linking = false,
6887 };
f4563a75 6888
9ff162a8
JP
6889 ASSERT_RTNL();
6890
0e4ead9d 6891 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
0e4ead9d 6892
51d0c047 6893 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
6894 &changeupper_info.info);
6895
2f268f12 6896 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913 6897
51d0c047 6898 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
0e4ead9d 6899 &changeupper_info.info);
9ff162a8
JP
6900}
6901EXPORT_SYMBOL(netdev_upper_dev_unlink);
6902
61bd3857
MS
6903/**
6904 * netdev_bonding_info_change - Dispatch event about slave change
6905 * @dev: device
4a26e453 6906 * @bonding_info: info to dispatch
61bd3857
MS
6907 *
6908 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6909 * The caller must hold the RTNL lock.
6910 */
6911void netdev_bonding_info_change(struct net_device *dev,
6912 struct netdev_bonding_info *bonding_info)
6913{
51d0c047
DA
6914 struct netdev_notifier_bonding_info info = {
6915 .info.dev = dev,
6916 };
61bd3857
MS
6917
6918 memcpy(&info.bonding_info, bonding_info,
6919 sizeof(struct netdev_bonding_info));
51d0c047 6920 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
61bd3857
MS
6921 &info.info);
6922}
6923EXPORT_SYMBOL(netdev_bonding_info_change);
6924
2ce1ee17 6925static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
6926{
6927 struct netdev_adjacent *iter;
6928
6929 struct net *net = dev_net(dev);
6930
6931 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6932 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6933 continue;
6934 netdev_adjacent_sysfs_add(iter->dev, dev,
6935 &iter->dev->adj_list.lower);
6936 netdev_adjacent_sysfs_add(dev, iter->dev,
6937 &dev->adj_list.upper);
6938 }
6939
6940 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6941 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6942 continue;
6943 netdev_adjacent_sysfs_add(iter->dev, dev,
6944 &iter->dev->adj_list.upper);
6945 netdev_adjacent_sysfs_add(dev, iter->dev,
6946 &dev->adj_list.lower);
6947 }
6948}
6949
2ce1ee17 6950static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
6951{
6952 struct netdev_adjacent *iter;
6953
6954 struct net *net = dev_net(dev);
6955
6956 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6957 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6958 continue;
6959 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6960 &iter->dev->adj_list.lower);
6961 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6962 &dev->adj_list.upper);
6963 }
6964
6965 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6966 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
6967 continue;
6968 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6969 &iter->dev->adj_list.upper);
6970 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6971 &dev->adj_list.lower);
6972 }
6973}
6974
5bb025fa 6975void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 6976{
5bb025fa 6977 struct netdev_adjacent *iter;
402dae96 6978
4c75431a
AF
6979 struct net *net = dev_net(dev);
6980
5bb025fa 6981 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 6982 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 6983 continue;
5bb025fa
VF
6984 netdev_adjacent_sysfs_del(iter->dev, oldname,
6985 &iter->dev->adj_list.lower);
6986 netdev_adjacent_sysfs_add(iter->dev, dev,
6987 &iter->dev->adj_list.lower);
6988 }
402dae96 6989
5bb025fa 6990 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 6991 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 6992 continue;
5bb025fa
VF
6993 netdev_adjacent_sysfs_del(iter->dev, oldname,
6994 &iter->dev->adj_list.upper);
6995 netdev_adjacent_sysfs_add(iter->dev, dev,
6996 &iter->dev->adj_list.upper);
6997 }
402dae96 6998}
402dae96
VF
6999
7000void *netdev_lower_dev_get_private(struct net_device *dev,
7001 struct net_device *lower_dev)
7002{
7003 struct netdev_adjacent *lower;
7004
7005 if (!lower_dev)
7006 return NULL;
6ea29da1 7007 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
7008 if (!lower)
7009 return NULL;
7010
7011 return lower->private;
7012}
7013EXPORT_SYMBOL(netdev_lower_dev_get_private);
7014
4085ebe8 7015
952fcfd0 7016int dev_get_nest_level(struct net_device *dev)
4085ebe8
VY
7017{
7018 struct net_device *lower = NULL;
7019 struct list_head *iter;
7020 int max_nest = -1;
7021 int nest;
7022
7023 ASSERT_RTNL();
7024
7025 netdev_for_each_lower_dev(dev, lower, iter) {
952fcfd0 7026 nest = dev_get_nest_level(lower);
4085ebe8
VY
7027 if (max_nest < nest)
7028 max_nest = nest;
7029 }
7030
952fcfd0 7031 return max_nest + 1;
4085ebe8
VY
7032}
7033EXPORT_SYMBOL(dev_get_nest_level);
7034
04d48266
JP
7035/**
7036 * netdev_lower_change - Dispatch event about lower device state change
7037 * @lower_dev: device
7038 * @lower_state_info: state to dispatch
7039 *
7040 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7041 * The caller must hold the RTNL lock.
7042 */
7043void netdev_lower_state_changed(struct net_device *lower_dev,
7044 void *lower_state_info)
7045{
51d0c047
DA
7046 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
7047 .info.dev = lower_dev,
7048 };
04d48266
JP
7049
7050 ASSERT_RTNL();
7051 changelowerstate_info.lower_state_info = lower_state_info;
51d0c047 7052 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
04d48266
JP
7053 &changelowerstate_info.info);
7054}
7055EXPORT_SYMBOL(netdev_lower_state_changed);
7056
b6c40d68
PM
7057static void dev_change_rx_flags(struct net_device *dev, int flags)
7058{
d314774c
SH
7059 const struct net_device_ops *ops = dev->netdev_ops;
7060
d2615bf4 7061 if (ops->ndo_change_rx_flags)
d314774c 7062 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
7063}
7064
991fb3f7 7065static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 7066{
b536db93 7067 unsigned int old_flags = dev->flags;
d04a48b0
EB
7068 kuid_t uid;
7069 kgid_t gid;
1da177e4 7070
24023451
PM
7071 ASSERT_RTNL();
7072
dad9b335
WC
7073 dev->flags |= IFF_PROMISC;
7074 dev->promiscuity += inc;
7075 if (dev->promiscuity == 0) {
7076 /*
7077 * Avoid overflow.
7078 * If inc causes overflow, untouch promisc and return error.
7079 */
7080 if (inc < 0)
7081 dev->flags &= ~IFF_PROMISC;
7082 else {
7083 dev->promiscuity -= inc;
7b6cd1ce
JP
7084 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7085 dev->name);
dad9b335
WC
7086 return -EOVERFLOW;
7087 }
7088 }
52609c0b 7089 if (dev->flags != old_flags) {
7b6cd1ce
JP
7090 pr_info("device %s %s promiscuous mode\n",
7091 dev->name,
7092 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
7093 if (audit_enabled) {
7094 current_uid_gid(&uid, &gid);
cdfb6b34
RGB
7095 audit_log(audit_context(), GFP_ATOMIC,
7096 AUDIT_ANOM_PROMISCUOUS,
7097 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7098 dev->name, (dev->flags & IFF_PROMISC),
7099 (old_flags & IFF_PROMISC),
7100 from_kuid(&init_user_ns, audit_get_loginuid(current)),
7101 from_kuid(&init_user_ns, uid),
7102 from_kgid(&init_user_ns, gid),
7103 audit_get_sessionid(current));
8192b0c4 7104 }
24023451 7105
b6c40d68 7106 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 7107 }
991fb3f7
ND
7108 if (notify)
7109 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 7110 return 0;
1da177e4
LT
7111}
7112
4417da66
PM
7113/**
7114 * dev_set_promiscuity - update promiscuity count on a device
7115 * @dev: device
7116 * @inc: modifier
7117 *
7118 * Add or remove promiscuity from a device. While the count in the device
7119 * remains above zero the interface remains promiscuous. Once it hits zero
7120 * the device reverts back to normal filtering operation. A negative inc
7121 * value is used to drop promiscuity on the device.
dad9b335 7122 * Return 0 if successful or a negative errno code on error.
4417da66 7123 */
dad9b335 7124int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 7125{
b536db93 7126 unsigned int old_flags = dev->flags;
dad9b335 7127 int err;
4417da66 7128
991fb3f7 7129 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 7130 if (err < 0)
dad9b335 7131 return err;
4417da66
PM
7132 if (dev->flags != old_flags)
7133 dev_set_rx_mode(dev);
dad9b335 7134 return err;
4417da66 7135}
d1b19dff 7136EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 7137
991fb3f7 7138static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 7139{
991fb3f7 7140 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 7141
24023451
PM
7142 ASSERT_RTNL();
7143
1da177e4 7144 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
7145 dev->allmulti += inc;
7146 if (dev->allmulti == 0) {
7147 /*
7148 * Avoid overflow.
7149 * If inc causes overflow, untouch allmulti and return error.
7150 */
7151 if (inc < 0)
7152 dev->flags &= ~IFF_ALLMULTI;
7153 else {
7154 dev->allmulti -= inc;
7b6cd1ce
JP
7155 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7156 dev->name);
dad9b335
WC
7157 return -EOVERFLOW;
7158 }
7159 }
24023451 7160 if (dev->flags ^ old_flags) {
b6c40d68 7161 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 7162 dev_set_rx_mode(dev);
991fb3f7
ND
7163 if (notify)
7164 __dev_notify_flags(dev, old_flags,
7165 dev->gflags ^ old_gflags);
24023451 7166 }
dad9b335 7167 return 0;
4417da66 7168}
991fb3f7
ND
7169
7170/**
7171 * dev_set_allmulti - update allmulti count on a device
7172 * @dev: device
7173 * @inc: modifier
7174 *
7175 * Add or remove reception of all multicast frames to a device. While the
7176 * count in the device remains above zero the interface remains listening
7177 * to all interfaces. Once it hits zero the device reverts back to normal
7178 * filtering operation. A negative @inc value is used to drop the counter
7179 * when releasing a resource needing all multicasts.
7180 * Return 0 if successful or a negative errno code on error.
7181 */
7182
7183int dev_set_allmulti(struct net_device *dev, int inc)
7184{
7185 return __dev_set_allmulti(dev, inc, true);
7186}
d1b19dff 7187EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
7188
7189/*
7190 * Upload unicast and multicast address lists to device and
7191 * configure RX filtering. When the device doesn't support unicast
53ccaae1 7192 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
7193 * are present.
7194 */
7195void __dev_set_rx_mode(struct net_device *dev)
7196{
d314774c
SH
7197 const struct net_device_ops *ops = dev->netdev_ops;
7198
4417da66
PM
7199 /* dev_open will call this function so the list will stay sane. */
7200 if (!(dev->flags&IFF_UP))
7201 return;
7202
7203 if (!netif_device_present(dev))
40b77c94 7204 return;
4417da66 7205
01789349 7206 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
7207 /* Unicast addresses changes may only happen under the rtnl,
7208 * therefore calling __dev_set_promiscuity here is safe.
7209 */
32e7bfc4 7210 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 7211 __dev_set_promiscuity(dev, 1, false);
2d348d1f 7212 dev->uc_promisc = true;
32e7bfc4 7213 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 7214 __dev_set_promiscuity(dev, -1, false);
2d348d1f 7215 dev->uc_promisc = false;
4417da66 7216 }
4417da66 7217 }
01789349
JP
7218
7219 if (ops->ndo_set_rx_mode)
7220 ops->ndo_set_rx_mode(dev);
4417da66
PM
7221}
7222
7223void dev_set_rx_mode(struct net_device *dev)
7224{
b9e40857 7225 netif_addr_lock_bh(dev);
4417da66 7226 __dev_set_rx_mode(dev);
b9e40857 7227 netif_addr_unlock_bh(dev);
1da177e4
LT
7228}
7229
f0db275a
SH
7230/**
7231 * dev_get_flags - get flags reported to userspace
7232 * @dev: device
7233 *
7234 * Get the combination of flag bits exported through APIs to userspace.
7235 */
95c96174 7236unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 7237{
95c96174 7238 unsigned int flags;
1da177e4
LT
7239
7240 flags = (dev->flags & ~(IFF_PROMISC |
7241 IFF_ALLMULTI |
b00055aa
SR
7242 IFF_RUNNING |
7243 IFF_LOWER_UP |
7244 IFF_DORMANT)) |
1da177e4
LT
7245 (dev->gflags & (IFF_PROMISC |
7246 IFF_ALLMULTI));
7247
b00055aa
SR
7248 if (netif_running(dev)) {
7249 if (netif_oper_up(dev))
7250 flags |= IFF_RUNNING;
7251 if (netif_carrier_ok(dev))
7252 flags |= IFF_LOWER_UP;
7253 if (netif_dormant(dev))
7254 flags |= IFF_DORMANT;
7255 }
1da177e4
LT
7256
7257 return flags;
7258}
d1b19dff 7259EXPORT_SYMBOL(dev_get_flags);
1da177e4 7260
bd380811 7261int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 7262{
b536db93 7263 unsigned int old_flags = dev->flags;
bd380811 7264 int ret;
1da177e4 7265
24023451
PM
7266 ASSERT_RTNL();
7267
1da177e4
LT
7268 /*
7269 * Set the flags on our device.
7270 */
7271
7272 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
7273 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
7274 IFF_AUTOMEDIA)) |
7275 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
7276 IFF_ALLMULTI));
7277
7278 /*
7279 * Load in the correct multicast list now the flags have changed.
7280 */
7281
b6c40d68
PM
7282 if ((old_flags ^ flags) & IFF_MULTICAST)
7283 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 7284
4417da66 7285 dev_set_rx_mode(dev);
1da177e4
LT
7286
7287 /*
7288 * Have we downed the interface. We handle IFF_UP ourselves
7289 * according to user attempts to set it, rather than blindly
7290 * setting it.
7291 */
7292
7293 ret = 0;
7051b88a 7294 if ((old_flags ^ flags) & IFF_UP) {
7295 if (old_flags & IFF_UP)
7296 __dev_close(dev);
7297 else
7298 ret = __dev_open(dev);
7299 }
1da177e4 7300
1da177e4 7301 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 7302 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 7303 unsigned int old_flags = dev->flags;
d1b19dff 7304
1da177e4 7305 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
7306
7307 if (__dev_set_promiscuity(dev, inc, false) >= 0)
7308 if (dev->flags != old_flags)
7309 dev_set_rx_mode(dev);
1da177e4
LT
7310 }
7311
7312 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
eb13da1a 7313 * is important. Some (broken) drivers set IFF_PROMISC, when
7314 * IFF_ALLMULTI is requested not asking us and not reporting.
1da177e4
LT
7315 */
7316 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
7317 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
7318
1da177e4 7319 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 7320 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
7321 }
7322
bd380811
PM
7323 return ret;
7324}
7325
a528c219
ND
7326void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
7327 unsigned int gchanges)
bd380811
PM
7328{
7329 unsigned int changes = dev->flags ^ old_flags;
7330
a528c219 7331 if (gchanges)
7f294054 7332 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 7333
bd380811
PM
7334 if (changes & IFF_UP) {
7335 if (dev->flags & IFF_UP)
7336 call_netdevice_notifiers(NETDEV_UP, dev);
7337 else
7338 call_netdevice_notifiers(NETDEV_DOWN, dev);
7339 }
7340
7341 if (dev->flags & IFF_UP &&
be9efd36 7342 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
51d0c047
DA
7343 struct netdev_notifier_change_info change_info = {
7344 .info = {
7345 .dev = dev,
7346 },
7347 .flags_changed = changes,
7348 };
be9efd36 7349
51d0c047 7350 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
be9efd36 7351 }
bd380811
PM
7352}
7353
7354/**
7355 * dev_change_flags - change device settings
7356 * @dev: device
7357 * @flags: device state flags
7358 *
7359 * Change settings on device based state flags. The flags are
7360 * in the userspace exported format.
7361 */
b536db93 7362int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 7363{
b536db93 7364 int ret;
991fb3f7 7365 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
7366
7367 ret = __dev_change_flags(dev, flags);
7368 if (ret < 0)
7369 return ret;
7370
991fb3f7 7371 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 7372 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
7373 return ret;
7374}
d1b19dff 7375EXPORT_SYMBOL(dev_change_flags);
1da177e4 7376
f51048c3 7377int __dev_set_mtu(struct net_device *dev, int new_mtu)
2315dc91
VF
7378{
7379 const struct net_device_ops *ops = dev->netdev_ops;
7380
7381 if (ops->ndo_change_mtu)
7382 return ops->ndo_change_mtu(dev, new_mtu);
7383
7384 dev->mtu = new_mtu;
7385 return 0;
7386}
f51048c3 7387EXPORT_SYMBOL(__dev_set_mtu);
2315dc91 7388
f0db275a
SH
7389/**
7390 * dev_set_mtu - Change maximum transfer unit
7391 * @dev: device
7392 * @new_mtu: new transfer unit
7393 *
7394 * Change the maximum transfer size of the network device.
7395 */
1da177e4
LT
7396int dev_set_mtu(struct net_device *dev, int new_mtu)
7397{
2315dc91 7398 int err, orig_mtu;
1da177e4
LT
7399
7400 if (new_mtu == dev->mtu)
7401 return 0;
7402
61e84623
JW
7403 /* MTU must be positive, and in range */
7404 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
7405 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
7406 dev->name, new_mtu, dev->min_mtu);
1da177e4 7407 return -EINVAL;
61e84623
JW
7408 }
7409
7410 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
7411 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
a0e65de7 7412 dev->name, new_mtu, dev->max_mtu);
61e84623
JW
7413 return -EINVAL;
7414 }
1da177e4
LT
7415
7416 if (!netif_device_present(dev))
7417 return -ENODEV;
7418
1d486bfb
VF
7419 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
7420 err = notifier_to_errno(err);
7421 if (err)
7422 return err;
d314774c 7423
2315dc91
VF
7424 orig_mtu = dev->mtu;
7425 err = __dev_set_mtu(dev, new_mtu);
d314774c 7426
2315dc91
VF
7427 if (!err) {
7428 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
7429 err = notifier_to_errno(err);
7430 if (err) {
7431 /* setting mtu back and notifying everyone again,
7432 * so that they have a chance to revert changes.
7433 */
7434 __dev_set_mtu(dev, orig_mtu);
7435 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
7436 }
7437 }
1da177e4
LT
7438 return err;
7439}
d1b19dff 7440EXPORT_SYMBOL(dev_set_mtu);
1da177e4 7441
6a643ddb
CW
7442/**
7443 * dev_change_tx_queue_len - Change TX queue length of a netdevice
7444 * @dev: device
7445 * @new_len: new tx queue length
7446 */
7447int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7448{
7449 unsigned int orig_len = dev->tx_queue_len;
7450 int res;
7451
7452 if (new_len != (unsigned int)new_len)
7453 return -ERANGE;
7454
7455 if (new_len != orig_len) {
7456 dev->tx_queue_len = new_len;
7457 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7458 res = notifier_to_errno(res);
7459 if (res) {
7460 netdev_err(dev,
7461 "refused to change device tx_queue_len\n");
7462 dev->tx_queue_len = orig_len;
7463 return res;
7464 }
48bfd55e 7465 return dev_qdisc_change_tx_queue_len(dev);
6a643ddb
CW
7466 }
7467
7468 return 0;
7469}
7470
cbda10fa
VD
7471/**
7472 * dev_set_group - Change group this device belongs to
7473 * @dev: device
7474 * @new_group: group this device should belong to
7475 */
7476void dev_set_group(struct net_device *dev, int new_group)
7477{
7478 dev->group = new_group;
7479}
7480EXPORT_SYMBOL(dev_set_group);
7481
f0db275a
SH
7482/**
7483 * dev_set_mac_address - Change Media Access Control Address
7484 * @dev: device
7485 * @sa: new address
7486 *
7487 * Change the hardware (MAC) address of the device
7488 */
1da177e4
LT
7489int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
7490{
d314774c 7491 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
7492 int err;
7493
d314774c 7494 if (!ops->ndo_set_mac_address)
1da177e4
LT
7495 return -EOPNOTSUPP;
7496 if (sa->sa_family != dev->type)
7497 return -EINVAL;
7498 if (!netif_device_present(dev))
7499 return -ENODEV;
d314774c 7500 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
7501 if (err)
7502 return err;
fbdeca2d 7503 dev->addr_assign_type = NET_ADDR_SET;
f6521516 7504 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 7505 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 7506 return 0;
1da177e4 7507}
d1b19dff 7508EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 7509
4bf84c35
JP
7510/**
7511 * dev_change_carrier - Change device carrier
7512 * @dev: device
691b3b7e 7513 * @new_carrier: new value
4bf84c35
JP
7514 *
7515 * Change device carrier
7516 */
7517int dev_change_carrier(struct net_device *dev, bool new_carrier)
7518{
7519 const struct net_device_ops *ops = dev->netdev_ops;
7520
7521 if (!ops->ndo_change_carrier)
7522 return -EOPNOTSUPP;
7523 if (!netif_device_present(dev))
7524 return -ENODEV;
7525 return ops->ndo_change_carrier(dev, new_carrier);
7526}
7527EXPORT_SYMBOL(dev_change_carrier);
7528
66b52b0d
JP
7529/**
7530 * dev_get_phys_port_id - Get device physical port ID
7531 * @dev: device
7532 * @ppid: port ID
7533 *
7534 * Get device physical port ID
7535 */
7536int dev_get_phys_port_id(struct net_device *dev,
02637fce 7537 struct netdev_phys_item_id *ppid)
66b52b0d
JP
7538{
7539 const struct net_device_ops *ops = dev->netdev_ops;
7540
7541 if (!ops->ndo_get_phys_port_id)
7542 return -EOPNOTSUPP;
7543 return ops->ndo_get_phys_port_id(dev, ppid);
7544}
7545EXPORT_SYMBOL(dev_get_phys_port_id);
7546
db24a904
DA
7547/**
7548 * dev_get_phys_port_name - Get device physical port name
7549 * @dev: device
7550 * @name: port name
ed49e650 7551 * @len: limit of bytes to copy to name
db24a904
DA
7552 *
7553 * Get device physical port name
7554 */
7555int dev_get_phys_port_name(struct net_device *dev,
7556 char *name, size_t len)
7557{
7558 const struct net_device_ops *ops = dev->netdev_ops;
7559
7560 if (!ops->ndo_get_phys_port_name)
7561 return -EOPNOTSUPP;
7562 return ops->ndo_get_phys_port_name(dev, name, len);
7563}
7564EXPORT_SYMBOL(dev_get_phys_port_name);
7565
d746d707
AK
7566/**
7567 * dev_change_proto_down - update protocol port state information
7568 * @dev: device
7569 * @proto_down: new value
7570 *
7571 * This info can be used by switch drivers to set the phys state of the
7572 * port.
7573 */
7574int dev_change_proto_down(struct net_device *dev, bool proto_down)
7575{
7576 const struct net_device_ops *ops = dev->netdev_ops;
7577
7578 if (!ops->ndo_change_proto_down)
7579 return -EOPNOTSUPP;
7580 if (!netif_device_present(dev))
7581 return -ENODEV;
7582 return ops->ndo_change_proto_down(dev, proto_down);
7583}
7584EXPORT_SYMBOL(dev_change_proto_down);
7585
118b4aa2
JK
7586void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
7587 struct netdev_bpf *xdp)
d67b9cd2 7588{
118b4aa2
JK
7589 memset(xdp, 0, sizeof(*xdp));
7590 xdp->command = XDP_QUERY_PROG;
d67b9cd2
DB
7591
7592 /* Query must always succeed. */
118b4aa2
JK
7593 WARN_ON(bpf_op(dev, xdp) < 0);
7594}
7595
7596static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
7597{
7598 struct netdev_bpf xdp;
7599
7600 __dev_xdp_query(dev, bpf_op, &xdp);
58038695 7601
d67b9cd2
DB
7602 return xdp.prog_attached;
7603}
7604
f4e63525 7605static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
32d60277 7606 struct netlink_ext_ack *extack, u32 flags,
d67b9cd2
DB
7607 struct bpf_prog *prog)
7608{
f4e63525 7609 struct netdev_bpf xdp;
d67b9cd2
DB
7610
7611 memset(&xdp, 0, sizeof(xdp));
ee5d032f
JK
7612 if (flags & XDP_FLAGS_HW_MODE)
7613 xdp.command = XDP_SETUP_PROG_HW;
7614 else
7615 xdp.command = XDP_SETUP_PROG;
d67b9cd2 7616 xdp.extack = extack;
32d60277 7617 xdp.flags = flags;
d67b9cd2
DB
7618 xdp.prog = prog;
7619
f4e63525 7620 return bpf_op(dev, &xdp);
d67b9cd2
DB
7621}
7622
bd0b2e7f
JK
7623static void dev_xdp_uninstall(struct net_device *dev)
7624{
7625 struct netdev_bpf xdp;
7626 bpf_op_t ndo_bpf;
7627
7628 /* Remove generic XDP */
7629 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
7630
7631 /* Remove from the driver */
7632 ndo_bpf = dev->netdev_ops->ndo_bpf;
7633 if (!ndo_bpf)
7634 return;
7635
7636 __dev_xdp_query(dev, ndo_bpf, &xdp);
7637 if (xdp.prog_attached == XDP_ATTACHED_NONE)
7638 return;
7639
7640 /* Program removal should always succeed */
7641 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
7642}
7643
a7862b45
BB
7644/**
7645 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7646 * @dev: device
b5d60989 7647 * @extack: netlink extended ack
a7862b45 7648 * @fd: new program fd or negative value to clear
85de8576 7649 * @flags: xdp-related flags
a7862b45
BB
7650 *
7651 * Set or clear a bpf program for a device
7652 */
ddf9f970
JK
7653int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7654 int fd, u32 flags)
a7862b45
BB
7655{
7656 const struct net_device_ops *ops = dev->netdev_ops;
7657 struct bpf_prog *prog = NULL;
f4e63525 7658 bpf_op_t bpf_op, bpf_chk;
a7862b45
BB
7659 int err;
7660
85de8576
DB
7661 ASSERT_RTNL();
7662
f4e63525
JK
7663 bpf_op = bpf_chk = ops->ndo_bpf;
7664 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
0489df9a 7665 return -EOPNOTSUPP;
f4e63525
JK
7666 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
7667 bpf_op = generic_xdp_install;
7668 if (bpf_op == bpf_chk)
7669 bpf_chk = generic_xdp_install;
b5cdae32 7670
a7862b45 7671 if (fd >= 0) {
118b4aa2 7672 if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
d67b9cd2
DB
7673 return -EEXIST;
7674 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
118b4aa2 7675 __dev_xdp_attached(dev, bpf_op))
d67b9cd2 7676 return -EBUSY;
85de8576 7677
288b3de5
JK
7678 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
7679 bpf_op == ops->ndo_bpf);
a7862b45
BB
7680 if (IS_ERR(prog))
7681 return PTR_ERR(prog);
441a3303
JK
7682
7683 if (!(flags & XDP_FLAGS_HW_MODE) &&
7684 bpf_prog_is_dev_bound(prog->aux)) {
7685 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
7686 bpf_prog_put(prog);
7687 return -EINVAL;
7688 }
a7862b45
BB
7689 }
7690
f4e63525 7691 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
a7862b45
BB
7692 if (err < 0 && prog)
7693 bpf_prog_put(prog);
7694
7695 return err;
7696}
a7862b45 7697
1da177e4
LT
7698/**
7699 * dev_new_index - allocate an ifindex
c4ea43c5 7700 * @net: the applicable net namespace
1da177e4
LT
7701 *
7702 * Returns a suitable unique value for a new device interface
7703 * number. The caller must hold the rtnl semaphore or the
7704 * dev_base_lock to be sure it remains unique.
7705 */
881d966b 7706static int dev_new_index(struct net *net)
1da177e4 7707{
aa79e66e 7708 int ifindex = net->ifindex;
f4563a75 7709
1da177e4
LT
7710 for (;;) {
7711 if (++ifindex <= 0)
7712 ifindex = 1;
881d966b 7713 if (!__dev_get_by_index(net, ifindex))
aa79e66e 7714 return net->ifindex = ifindex;
1da177e4
LT
7715 }
7716}
7717
1da177e4 7718/* Delayed registration/unregisteration */
3b5b34fd 7719static LIST_HEAD(net_todo_list);
200b916f 7720DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 7721
6f05f629 7722static void net_set_todo(struct net_device *dev)
1da177e4 7723{
1da177e4 7724 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 7725 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
7726}
7727
9b5e383c 7728static void rollback_registered_many(struct list_head *head)
93ee31f1 7729{
e93737b0 7730 struct net_device *dev, *tmp;
5cde2829 7731 LIST_HEAD(close_head);
9b5e383c 7732
93ee31f1
DL
7733 BUG_ON(dev_boot_phase);
7734 ASSERT_RTNL();
7735
e93737b0 7736 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 7737 /* Some devices call without registering
e93737b0
KK
7738 * for initialization unwind. Remove those
7739 * devices and proceed with the remaining.
9b5e383c
ED
7740 */
7741 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
7742 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7743 dev->name, dev);
93ee31f1 7744
9b5e383c 7745 WARN_ON(1);
e93737b0
KK
7746 list_del(&dev->unreg_list);
7747 continue;
9b5e383c 7748 }
449f4544 7749 dev->dismantle = true;
9b5e383c 7750 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 7751 }
93ee31f1 7752
44345724 7753 /* If device is running, close it first. */
5cde2829
EB
7754 list_for_each_entry(dev, head, unreg_list)
7755 list_add_tail(&dev->close_list, &close_head);
99c4a26a 7756 dev_close_many(&close_head, true);
93ee31f1 7757
44345724 7758 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
7759 /* And unlink it from device chain. */
7760 unlist_netdevice(dev);
93ee31f1 7761
9b5e383c
ED
7762 dev->reg_state = NETREG_UNREGISTERING;
7763 }
41852497 7764 flush_all_backlogs();
93ee31f1
DL
7765
7766 synchronize_net();
7767
9b5e383c 7768 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
7769 struct sk_buff *skb = NULL;
7770
9b5e383c
ED
7771 /* Shutdown queueing discipline. */
7772 dev_shutdown(dev);
93ee31f1 7773
bd0b2e7f 7774 dev_xdp_uninstall(dev);
93ee31f1 7775
9b5e383c 7776 /* Notify protocols, that we are about to destroy
eb13da1a 7777 * this device. They should clean all the things.
7778 */
9b5e383c 7779 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 7780
395eea6c
MB
7781 if (!dev->rtnl_link_ops ||
7782 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
3d3ea5af 7783 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
38e01b30 7784 GFP_KERNEL, NULL, 0);
395eea6c 7785
9b5e383c
ED
7786 /*
7787 * Flush the unicast and multicast chains
7788 */
a748ee24 7789 dev_uc_flush(dev);
22bedad3 7790 dev_mc_flush(dev);
93ee31f1 7791
9b5e383c
ED
7792 if (dev->netdev_ops->ndo_uninit)
7793 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 7794
395eea6c
MB
7795 if (skb)
7796 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 7797
9ff162a8
JP
7798 /* Notifier chain MUST detach us all upper devices. */
7799 WARN_ON(netdev_has_any_upper_dev(dev));
0f524a80 7800 WARN_ON(netdev_has_any_lower_dev(dev));
93ee31f1 7801
9b5e383c
ED
7802 /* Remove entries from kobject tree */
7803 netdev_unregister_kobject(dev);
024e9679
AD
7804#ifdef CONFIG_XPS
7805 /* Remove XPS queueing entries */
7806 netif_reset_xps_queues_gt(dev, 0);
7807#endif
9b5e383c 7808 }
93ee31f1 7809
850a545b 7810 synchronize_net();
395264d5 7811
a5ee1551 7812 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
7813 dev_put(dev);
7814}
7815
7816static void rollback_registered(struct net_device *dev)
7817{
7818 LIST_HEAD(single);
7819
7820 list_add(&dev->unreg_list, &single);
7821 rollback_registered_many(&single);
ceaaec98 7822 list_del(&single);
93ee31f1
DL
7823}
7824
fd867d51
JW
7825static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7826 struct net_device *upper, netdev_features_t features)
7827{
7828 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7829 netdev_features_t feature;
5ba3f7d6 7830 int feature_bit;
fd867d51 7831
5ba3f7d6
JW
7832 for_each_netdev_feature(&upper_disables, feature_bit) {
7833 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
7834 if (!(upper->wanted_features & feature)
7835 && (features & feature)) {
7836 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7837 &feature, upper->name);
7838 features &= ~feature;
7839 }
7840 }
7841
7842 return features;
7843}
7844
7845static void netdev_sync_lower_features(struct net_device *upper,
7846 struct net_device *lower, netdev_features_t features)
7847{
7848 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7849 netdev_features_t feature;
5ba3f7d6 7850 int feature_bit;
fd867d51 7851
5ba3f7d6
JW
7852 for_each_netdev_feature(&upper_disables, feature_bit) {
7853 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
7854 if (!(features & feature) && (lower->features & feature)) {
7855 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7856 &feature, lower->name);
7857 lower->wanted_features &= ~feature;
7858 netdev_update_features(lower);
7859
7860 if (unlikely(lower->features & feature))
7861 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7862 &feature, lower->name);
7863 }
7864 }
7865}
7866
c8f44aff
MM
7867static netdev_features_t netdev_fix_features(struct net_device *dev,
7868 netdev_features_t features)
b63365a2 7869{
57422dc5
MM
7870 /* Fix illegal checksum combinations */
7871 if ((features & NETIF_F_HW_CSUM) &&
7872 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 7873 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
7874 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7875 }
7876
b63365a2 7877 /* TSO requires that SG is present as well. */
ea2d3688 7878 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 7879 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 7880 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
7881 }
7882
ec5f0615
PS
7883 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7884 !(features & NETIF_F_IP_CSUM)) {
7885 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7886 features &= ~NETIF_F_TSO;
7887 features &= ~NETIF_F_TSO_ECN;
7888 }
7889
7890 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7891 !(features & NETIF_F_IPV6_CSUM)) {
7892 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7893 features &= ~NETIF_F_TSO6;
7894 }
7895
b1dc497b
AD
7896 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7897 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7898 features &= ~NETIF_F_TSO_MANGLEID;
7899
31d8b9e0
BH
7900 /* TSO ECN requires that TSO is present as well. */
7901 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7902 features &= ~NETIF_F_TSO_ECN;
7903
212b573f
MM
7904 /* Software GSO depends on SG. */
7905 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 7906 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
7907 features &= ~NETIF_F_GSO;
7908 }
7909
802ab55a
AD
7910 /* GSO partial features require GSO partial be set */
7911 if ((features & dev->gso_partial_features) &&
7912 !(features & NETIF_F_GSO_PARTIAL)) {
7913 netdev_dbg(dev,
7914 "Dropping partially supported GSO features since no GSO partial.\n");
7915 features &= ~dev->gso_partial_features;
7916 }
7917
fb1f5f79
MC
7918 if (!(features & NETIF_F_RXCSUM)) {
7919 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
7920 * successfully merged by hardware must also have the
7921 * checksum verified by hardware. If the user does not
7922 * want to enable RXCSUM, logically, we should disable GRO_HW.
7923 */
7924 if (features & NETIF_F_GRO_HW) {
7925 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
7926 features &= ~NETIF_F_GRO_HW;
7927 }
7928 }
7929
de8d5ab2
GP
7930 /* LRO/HW-GRO features cannot be combined with RX-FCS */
7931 if (features & NETIF_F_RXFCS) {
7932 if (features & NETIF_F_LRO) {
7933 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
7934 features &= ~NETIF_F_LRO;
7935 }
7936
7937 if (features & NETIF_F_GRO_HW) {
7938 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
7939 features &= ~NETIF_F_GRO_HW;
7940 }
e6c6a929
GP
7941 }
7942
b63365a2
HX
7943 return features;
7944}
b63365a2 7945
6cb6a27c 7946int __netdev_update_features(struct net_device *dev)
5455c699 7947{
fd867d51 7948 struct net_device *upper, *lower;
c8f44aff 7949 netdev_features_t features;
fd867d51 7950 struct list_head *iter;
e7868a85 7951 int err = -1;
5455c699 7952
87267485
MM
7953 ASSERT_RTNL();
7954
5455c699
MM
7955 features = netdev_get_wanted_features(dev);
7956
7957 if (dev->netdev_ops->ndo_fix_features)
7958 features = dev->netdev_ops->ndo_fix_features(dev, features);
7959
7960 /* driver might be less strict about feature dependencies */
7961 features = netdev_fix_features(dev, features);
7962
fd867d51
JW
7963 /* some features can't be enabled if they're off an an upper device */
7964 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7965 features = netdev_sync_upper_features(dev, upper, features);
7966
5455c699 7967 if (dev->features == features)
e7868a85 7968 goto sync_lower;
5455c699 7969
c8f44aff
MM
7970 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7971 &dev->features, &features);
5455c699
MM
7972
7973 if (dev->netdev_ops->ndo_set_features)
7974 err = dev->netdev_ops->ndo_set_features(dev, features);
5f8dc33e
NA
7975 else
7976 err = 0;
5455c699 7977
6cb6a27c 7978 if (unlikely(err < 0)) {
5455c699 7979 netdev_err(dev,
c8f44aff
MM
7980 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7981 err, &features, &dev->features);
17b85d29
NA
7982 /* return non-0 since some features might have changed and
7983 * it's better to fire a spurious notification than miss it
7984 */
7985 return -1;
6cb6a27c
MM
7986 }
7987
e7868a85 7988sync_lower:
fd867d51
JW
7989 /* some features must be disabled on lower devices when disabled
7990 * on an upper device (think: bonding master or bridge)
7991 */
7992 netdev_for_each_lower_dev(dev, lower, iter)
7993 netdev_sync_lower_features(dev, lower, features);
7994
ae847f40
SD
7995 if (!err) {
7996 netdev_features_t diff = features ^ dev->features;
7997
7998 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
7999 /* udp_tunnel_{get,drop}_rx_info both need
8000 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
8001 * device, or they won't do anything.
8002 * Thus we need to update dev->features
8003 * *before* calling udp_tunnel_get_rx_info,
8004 * but *after* calling udp_tunnel_drop_rx_info.
8005 */
8006 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
8007 dev->features = features;
8008 udp_tunnel_get_rx_info(dev);
8009 } else {
8010 udp_tunnel_drop_rx_info(dev);
8011 }
8012 }
8013
9daae9bd
GP
8014 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
8015 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
8016 dev->features = features;
8017 err |= vlan_get_rx_ctag_filter_info(dev);
8018 } else {
8019 vlan_drop_rx_ctag_filter_info(dev);
8020 }
8021 }
8022
8023 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
8024 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
8025 dev->features = features;
8026 err |= vlan_get_rx_stag_filter_info(dev);
8027 } else {
8028 vlan_drop_rx_stag_filter_info(dev);
8029 }
8030 }
8031
6cb6a27c 8032 dev->features = features;
ae847f40 8033 }
6cb6a27c 8034
e7868a85 8035 return err < 0 ? 0 : 1;
6cb6a27c
MM
8036}
8037
afe12cc8
MM
8038/**
8039 * netdev_update_features - recalculate device features
8040 * @dev: the device to check
8041 *
8042 * Recalculate dev->features set and send notifications if it
8043 * has changed. Should be called after driver or hardware dependent
8044 * conditions might have changed that influence the features.
8045 */
6cb6a27c
MM
8046void netdev_update_features(struct net_device *dev)
8047{
8048 if (__netdev_update_features(dev))
8049 netdev_features_change(dev);
5455c699
MM
8050}
8051EXPORT_SYMBOL(netdev_update_features);
8052
afe12cc8
MM
8053/**
8054 * netdev_change_features - recalculate device features
8055 * @dev: the device to check
8056 *
8057 * Recalculate dev->features set and send notifications even
8058 * if they have not changed. Should be called instead of
8059 * netdev_update_features() if also dev->vlan_features might
8060 * have changed to allow the changes to be propagated to stacked
8061 * VLAN devices.
8062 */
8063void netdev_change_features(struct net_device *dev)
8064{
8065 __netdev_update_features(dev);
8066 netdev_features_change(dev);
8067}
8068EXPORT_SYMBOL(netdev_change_features);
8069
fc4a7489
PM
8070/**
8071 * netif_stacked_transfer_operstate - transfer operstate
8072 * @rootdev: the root or lower level device to transfer state from
8073 * @dev: the device to transfer operstate to
8074 *
8075 * Transfer operational state from root to device. This is normally
8076 * called when a stacking relationship exists between the root
8077 * device and the device(a leaf device).
8078 */
8079void netif_stacked_transfer_operstate(const struct net_device *rootdev,
8080 struct net_device *dev)
8081{
8082 if (rootdev->operstate == IF_OPER_DORMANT)
8083 netif_dormant_on(dev);
8084 else
8085 netif_dormant_off(dev);
8086
0575c86b
ZS
8087 if (netif_carrier_ok(rootdev))
8088 netif_carrier_on(dev);
8089 else
8090 netif_carrier_off(dev);
fc4a7489
PM
8091}
8092EXPORT_SYMBOL(netif_stacked_transfer_operstate);
8093
1b4bf461
ED
8094static int netif_alloc_rx_queues(struct net_device *dev)
8095{
1b4bf461 8096 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 8097 struct netdev_rx_queue *rx;
10595902 8098 size_t sz = count * sizeof(*rx);
e817f856 8099 int err = 0;
1b4bf461 8100
bd25fa7b 8101 BUG_ON(count < 1);
1b4bf461 8102
dcda9b04 8103 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
8104 if (!rx)
8105 return -ENOMEM;
8106
bd25fa7b
TH
8107 dev->_rx = rx;
8108
e817f856 8109 for (i = 0; i < count; i++) {
fe822240 8110 rx[i].dev = dev;
e817f856
JDB
8111
8112 /* XDP RX-queue setup */
8113 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
8114 if (err < 0)
8115 goto err_rxq_info;
8116 }
1b4bf461 8117 return 0;
e817f856
JDB
8118
8119err_rxq_info:
8120 /* Rollback successful reg's and free other resources */
8121 while (i--)
8122 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
141b52a9 8123 kvfree(dev->_rx);
e817f856
JDB
8124 dev->_rx = NULL;
8125 return err;
8126}
8127
8128static void netif_free_rx_queues(struct net_device *dev)
8129{
8130 unsigned int i, count = dev->num_rx_queues;
e817f856
JDB
8131
8132 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
8133 if (!dev->_rx)
8134 return;
8135
e817f856 8136 for (i = 0; i < count; i++)
82aaff2f
JK
8137 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
8138
8139 kvfree(dev->_rx);
1b4bf461
ED
8140}
8141
aa942104
CG
8142static void netdev_init_one_queue(struct net_device *dev,
8143 struct netdev_queue *queue, void *_unused)
8144{
8145 /* Initialize queue lock */
8146 spin_lock_init(&queue->_xmit_lock);
8147 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
8148 queue->xmit_lock_owner = -1;
b236da69 8149 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 8150 queue->dev = dev;
114cf580
TH
8151#ifdef CONFIG_BQL
8152 dql_init(&queue->dql, HZ);
8153#endif
aa942104
CG
8154}
8155
60877a32
ED
8156static void netif_free_tx_queues(struct net_device *dev)
8157{
4cb28970 8158 kvfree(dev->_tx);
60877a32
ED
8159}
8160
e6484930
TH
8161static int netif_alloc_netdev_queues(struct net_device *dev)
8162{
8163 unsigned int count = dev->num_tx_queues;
8164 struct netdev_queue *tx;
60877a32 8165 size_t sz = count * sizeof(*tx);
e6484930 8166
d339727c
ED
8167 if (count < 1 || count > 0xffff)
8168 return -EINVAL;
62b5942a 8169
dcda9b04 8170 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
8171 if (!tx)
8172 return -ENOMEM;
8173
e6484930 8174 dev->_tx = tx;
1d24eb48 8175
e6484930
TH
8176 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
8177 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
8178
8179 return 0;
e6484930
TH
8180}
8181
a2029240
DV
8182void netif_tx_stop_all_queues(struct net_device *dev)
8183{
8184 unsigned int i;
8185
8186 for (i = 0; i < dev->num_tx_queues; i++) {
8187 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
f4563a75 8188
a2029240
DV
8189 netif_tx_stop_queue(txq);
8190 }
8191}
8192EXPORT_SYMBOL(netif_tx_stop_all_queues);
8193
1da177e4
LT
8194/**
8195 * register_netdevice - register a network device
8196 * @dev: device to register
8197 *
8198 * Take a completed network device structure and add it to the kernel
8199 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8200 * chain. 0 is returned on success. A negative errno code is returned
8201 * on a failure to set up the device, or if the name is a duplicate.
8202 *
8203 * Callers must hold the rtnl semaphore. You may want
8204 * register_netdev() instead of this.
8205 *
8206 * BUGS:
8207 * The locking appears insufficient to guarantee two parallel registers
8208 * will not get the same name.
8209 */
8210
8211int register_netdevice(struct net_device *dev)
8212{
1da177e4 8213 int ret;
d314774c 8214 struct net *net = dev_net(dev);
1da177e4 8215
e283de3a
FF
8216 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
8217 NETDEV_FEATURE_COUNT);
1da177e4
LT
8218 BUG_ON(dev_boot_phase);
8219 ASSERT_RTNL();
8220
b17a7c17
SH
8221 might_sleep();
8222
1da177e4
LT
8223 /* When net_device's are persistent, this will be fatal. */
8224 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 8225 BUG_ON(!net);
1da177e4 8226
f1f28aa3 8227 spin_lock_init(&dev->addr_list_lock);
cf508b12 8228 netdev_set_addr_lockdep_class(dev);
1da177e4 8229
828de4f6 8230 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
8231 if (ret < 0)
8232 goto out;
8233
1da177e4 8234 /* Init, if this function is available */
d314774c
SH
8235 if (dev->netdev_ops->ndo_init) {
8236 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
8237 if (ret) {
8238 if (ret > 0)
8239 ret = -EIO;
90833aa4 8240 goto out;
1da177e4
LT
8241 }
8242 }
4ec93edb 8243
f646968f
PM
8244 if (((dev->hw_features | dev->features) &
8245 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
8246 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
8247 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
8248 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
8249 ret = -EINVAL;
8250 goto err_uninit;
8251 }
8252
9c7dafbf
PE
8253 ret = -EBUSY;
8254 if (!dev->ifindex)
8255 dev->ifindex = dev_new_index(net);
8256 else if (__dev_get_by_index(net, dev->ifindex))
8257 goto err_uninit;
8258
5455c699
MM
8259 /* Transfer changeable features to wanted_features and enable
8260 * software offloads (GSO and GRO).
8261 */
8262 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f 8263 dev->features |= NETIF_F_SOFT_FEATURES;
d764a122
SD
8264
8265 if (dev->netdev_ops->ndo_udp_tunnel_add) {
8266 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8267 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8268 }
8269
14d1232f 8270 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 8271
cbc53e08 8272 if (!(dev->flags & IFF_LOOPBACK))
34324dc2 8273 dev->hw_features |= NETIF_F_NOCACHE_COPY;
cbc53e08 8274
7f348a60
AD
8275 /* If IPv4 TCP segmentation offload is supported we should also
8276 * allow the device to enable segmenting the frame with the option
8277 * of ignoring a static IP ID value. This doesn't enable the
8278 * feature itself but allows the user to enable it later.
8279 */
cbc53e08
AD
8280 if (dev->hw_features & NETIF_F_TSO)
8281 dev->hw_features |= NETIF_F_TSO_MANGLEID;
7f348a60
AD
8282 if (dev->vlan_features & NETIF_F_TSO)
8283 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
8284 if (dev->mpls_features & NETIF_F_TSO)
8285 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
8286 if (dev->hw_enc_features & NETIF_F_TSO)
8287 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
c6e1a0d1 8288
1180e7d6 8289 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 8290 */
1180e7d6 8291 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 8292
ee579677
PS
8293 /* Make NETIF_F_SG inheritable to tunnel devices.
8294 */
802ab55a 8295 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
ee579677 8296
0d89d203
SH
8297 /* Make NETIF_F_SG inheritable to MPLS.
8298 */
8299 dev->mpls_features |= NETIF_F_SG;
8300
7ffbe3fd
JB
8301 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
8302 ret = notifier_to_errno(ret);
8303 if (ret)
8304 goto err_uninit;
8305
8b41d188 8306 ret = netdev_register_kobject(dev);
b17a7c17 8307 if (ret)
7ce1b0ed 8308 goto err_uninit;
b17a7c17
SH
8309 dev->reg_state = NETREG_REGISTERED;
8310
6cb6a27c 8311 __netdev_update_features(dev);
8e9b59b2 8312
1da177e4
LT
8313 /*
8314 * Default initial state at registry is that the
8315 * device is present.
8316 */
8317
8318 set_bit(__LINK_STATE_PRESENT, &dev->state);
8319
8f4cccbb
BH
8320 linkwatch_init_dev(dev);
8321
1da177e4 8322 dev_init_scheduler(dev);
1da177e4 8323 dev_hold(dev);
ce286d32 8324 list_netdevice(dev);
7bf23575 8325 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 8326
948b337e
JP
8327 /* If the device has permanent device address, driver should
8328 * set dev_addr and also addr_assign_type should be set to
8329 * NET_ADDR_PERM (default value).
8330 */
8331 if (dev->addr_assign_type == NET_ADDR_PERM)
8332 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
8333
1da177e4 8334 /* Notify protocols, that a new device appeared. */
056925ab 8335 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 8336 ret = notifier_to_errno(ret);
93ee31f1
DL
8337 if (ret) {
8338 rollback_registered(dev);
8339 dev->reg_state = NETREG_UNREGISTERED;
8340 }
d90a909e
EB
8341 /*
8342 * Prevent userspace races by waiting until the network
8343 * device is fully setup before sending notifications.
8344 */
a2835763
PM
8345 if (!dev->rtnl_link_ops ||
8346 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 8347 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
8348
8349out:
8350 return ret;
7ce1b0ed
HX
8351
8352err_uninit:
d314774c
SH
8353 if (dev->netdev_ops->ndo_uninit)
8354 dev->netdev_ops->ndo_uninit(dev);
cf124db5
DM
8355 if (dev->priv_destructor)
8356 dev->priv_destructor(dev);
7ce1b0ed 8357 goto out;
1da177e4 8358}
d1b19dff 8359EXPORT_SYMBOL(register_netdevice);
1da177e4 8360
937f1ba5
BH
8361/**
8362 * init_dummy_netdev - init a dummy network device for NAPI
8363 * @dev: device to init
8364 *
8365 * This takes a network device structure and initialize the minimum
8366 * amount of fields so it can be used to schedule NAPI polls without
8367 * registering a full blown interface. This is to be used by drivers
8368 * that need to tie several hardware interfaces to a single NAPI
8369 * poll scheduler due to HW limitations.
8370 */
8371int init_dummy_netdev(struct net_device *dev)
8372{
8373 /* Clear everything. Note we don't initialize spinlocks
8374 * are they aren't supposed to be taken by any of the
8375 * NAPI code and this dummy netdev is supposed to be
8376 * only ever used for NAPI polls
8377 */
8378 memset(dev, 0, sizeof(struct net_device));
8379
8380 /* make sure we BUG if trying to hit standard
8381 * register/unregister code path
8382 */
8383 dev->reg_state = NETREG_DUMMY;
8384
937f1ba5
BH
8385 /* NAPI wants this */
8386 INIT_LIST_HEAD(&dev->napi_list);
8387
8388 /* a dummy interface is started by default */
8389 set_bit(__LINK_STATE_PRESENT, &dev->state);
8390 set_bit(__LINK_STATE_START, &dev->state);
8391
29b4433d
ED
8392 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8393 * because users of this 'device' dont need to change
8394 * its refcount.
8395 */
8396
937f1ba5
BH
8397 return 0;
8398}
8399EXPORT_SYMBOL_GPL(init_dummy_netdev);
8400
8401
1da177e4
LT
8402/**
8403 * register_netdev - register a network device
8404 * @dev: device to register
8405 *
8406 * Take a completed network device structure and add it to the kernel
8407 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8408 * chain. 0 is returned on success. A negative errno code is returned
8409 * on a failure to set up the device, or if the name is a duplicate.
8410 *
38b4da38 8411 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
8412 * and expands the device name if you passed a format string to
8413 * alloc_netdev.
8414 */
8415int register_netdev(struct net_device *dev)
8416{
8417 int err;
8418
b0f3debc
KT
8419 if (rtnl_lock_killable())
8420 return -EINTR;
1da177e4 8421 err = register_netdevice(dev);
1da177e4
LT
8422 rtnl_unlock();
8423 return err;
8424}
8425EXPORT_SYMBOL(register_netdev);
8426
29b4433d
ED
8427int netdev_refcnt_read(const struct net_device *dev)
8428{
8429 int i, refcnt = 0;
8430
8431 for_each_possible_cpu(i)
8432 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
8433 return refcnt;
8434}
8435EXPORT_SYMBOL(netdev_refcnt_read);
8436
2c53040f 8437/**
1da177e4 8438 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 8439 * @dev: target net_device
1da177e4
LT
8440 *
8441 * This is called when unregistering network devices.
8442 *
8443 * Any protocol or device that holds a reference should register
8444 * for netdevice notification, and cleanup and put back the
8445 * reference if they receive an UNREGISTER event.
8446 * We can get stuck here if buggy protocols don't correctly
4ec93edb 8447 * call dev_put.
1da177e4
LT
8448 */
8449static void netdev_wait_allrefs(struct net_device *dev)
8450{
8451 unsigned long rebroadcast_time, warning_time;
29b4433d 8452 int refcnt;
1da177e4 8453
e014debe
ED
8454 linkwatch_forget_dev(dev);
8455
1da177e4 8456 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
8457 refcnt = netdev_refcnt_read(dev);
8458
8459 while (refcnt != 0) {
1da177e4 8460 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 8461 rtnl_lock();
1da177e4
LT
8462
8463 /* Rebroadcast unregister notification */
056925ab 8464 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 8465
748e2d93 8466 __rtnl_unlock();
0115e8e3 8467 rcu_barrier();
748e2d93
ED
8468 rtnl_lock();
8469
1da177e4
LT
8470 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
8471 &dev->state)) {
8472 /* We must not have linkwatch events
8473 * pending on unregister. If this
8474 * happens, we simply run the queue
8475 * unscheduled, resulting in a noop
8476 * for this device.
8477 */
8478 linkwatch_run_queue();
8479 }
8480
6756ae4b 8481 __rtnl_unlock();
1da177e4
LT
8482
8483 rebroadcast_time = jiffies;
8484 }
8485
8486 msleep(250);
8487
29b4433d
ED
8488 refcnt = netdev_refcnt_read(dev);
8489
1da177e4 8490 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
8491 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8492 dev->name, refcnt);
1da177e4
LT
8493 warning_time = jiffies;
8494 }
8495 }
8496}
8497
8498/* The sequence is:
8499 *
8500 * rtnl_lock();
8501 * ...
8502 * register_netdevice(x1);
8503 * register_netdevice(x2);
8504 * ...
8505 * unregister_netdevice(y1);
8506 * unregister_netdevice(y2);
8507 * ...
8508 * rtnl_unlock();
8509 * free_netdev(y1);
8510 * free_netdev(y2);
8511 *
58ec3b4d 8512 * We are invoked by rtnl_unlock().
1da177e4 8513 * This allows us to deal with problems:
b17a7c17 8514 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
8515 * without deadlocking with linkwatch via keventd.
8516 * 2) Since we run with the RTNL semaphore not held, we can sleep
8517 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
8518 *
8519 * We must not return until all unregister events added during
8520 * the interval the lock was held have been completed.
1da177e4 8521 */
1da177e4
LT
8522void netdev_run_todo(void)
8523{
626ab0e6 8524 struct list_head list;
1da177e4 8525
1da177e4 8526 /* Snapshot list, allow later requests */
626ab0e6 8527 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
8528
8529 __rtnl_unlock();
626ab0e6 8530
0115e8e3
ED
8531
8532 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
8533 if (!list_empty(&list))
8534 rcu_barrier();
8535
1da177e4
LT
8536 while (!list_empty(&list)) {
8537 struct net_device *dev
e5e26d75 8538 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
8539 list_del(&dev->todo_list);
8540
b17a7c17 8541 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 8542 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
8543 dev->name, dev->reg_state);
8544 dump_stack();
8545 continue;
8546 }
1da177e4 8547
b17a7c17 8548 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 8549
b17a7c17 8550 netdev_wait_allrefs(dev);
1da177e4 8551
b17a7c17 8552 /* paranoia */
29b4433d 8553 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
8554 BUG_ON(!list_empty(&dev->ptype_all));
8555 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
8556 WARN_ON(rcu_access_pointer(dev->ip_ptr));
8557 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
330c7272 8558#if IS_ENABLED(CONFIG_DECNET)
547b792c 8559 WARN_ON(dev->dn_ptr);
330c7272 8560#endif
cf124db5
DM
8561 if (dev->priv_destructor)
8562 dev->priv_destructor(dev);
8563 if (dev->needs_free_netdev)
8564 free_netdev(dev);
9093bbb2 8565
50624c93
EB
8566 /* Report a network device has been unregistered */
8567 rtnl_lock();
8568 dev_net(dev)->dev_unreg_count--;
8569 __rtnl_unlock();
8570 wake_up(&netdev_unregistering_wq);
8571
9093bbb2
SH
8572 /* Free network device */
8573 kobject_put(&dev->dev.kobj);
1da177e4 8574 }
1da177e4
LT
8575}
8576
9256645a
JW
8577/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
8578 * all the same fields in the same order as net_device_stats, with only
8579 * the type differing, but rtnl_link_stats64 may have additional fields
8580 * at the end for newer counters.
3cfde79c 8581 */
77a1abf5
ED
8582void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
8583 const struct net_device_stats *netdev_stats)
3cfde79c
BH
8584{
8585#if BITS_PER_LONG == 64
9256645a 8586 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9af9959e 8587 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9256645a
JW
8588 /* zero out counters that only exist in rtnl_link_stats64 */
8589 memset((char *)stats64 + sizeof(*netdev_stats), 0,
8590 sizeof(*stats64) - sizeof(*netdev_stats));
3cfde79c 8591#else
9256645a 8592 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
3cfde79c
BH
8593 const unsigned long *src = (const unsigned long *)netdev_stats;
8594 u64 *dst = (u64 *)stats64;
8595
9256645a 8596 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
3cfde79c
BH
8597 for (i = 0; i < n; i++)
8598 dst[i] = src[i];
9256645a
JW
8599 /* zero out counters that only exist in rtnl_link_stats64 */
8600 memset((char *)stats64 + n * sizeof(u64), 0,
8601 sizeof(*stats64) - n * sizeof(u64));
3cfde79c
BH
8602#endif
8603}
77a1abf5 8604EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 8605
eeda3fd6
SH
8606/**
8607 * dev_get_stats - get network device statistics
8608 * @dev: device to get statistics from
28172739 8609 * @storage: place to store stats
eeda3fd6 8610 *
d7753516
BH
8611 * Get network statistics from device. Return @storage.
8612 * The device driver may provide its own method by setting
8613 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8614 * otherwise the internal statistics structure is used.
eeda3fd6 8615 */
d7753516
BH
8616struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
8617 struct rtnl_link_stats64 *storage)
7004bf25 8618{
eeda3fd6
SH
8619 const struct net_device_ops *ops = dev->netdev_ops;
8620
28172739
ED
8621 if (ops->ndo_get_stats64) {
8622 memset(storage, 0, sizeof(*storage));
caf586e5
ED
8623 ops->ndo_get_stats64(dev, storage);
8624 } else if (ops->ndo_get_stats) {
3cfde79c 8625 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
8626 } else {
8627 netdev_stats_to_stats64(storage, &dev->stats);
28172739 8628 }
6f64ec74
ED
8629 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
8630 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
8631 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
28172739 8632 return storage;
c45d286e 8633}
eeda3fd6 8634EXPORT_SYMBOL(dev_get_stats);
c45d286e 8635
24824a09 8636struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 8637{
24824a09 8638 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 8639
24824a09
ED
8640#ifdef CONFIG_NET_CLS_ACT
8641 if (queue)
8642 return queue;
8643 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
8644 if (!queue)
8645 return NULL;
8646 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 8647 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
8648 queue->qdisc_sleeping = &noop_qdisc;
8649 rcu_assign_pointer(dev->ingress_queue, queue);
8650#endif
8651 return queue;
bb949fbd
DM
8652}
8653
2c60db03
ED
8654static const struct ethtool_ops default_ethtool_ops;
8655
d07d7507
SG
8656void netdev_set_default_ethtool_ops(struct net_device *dev,
8657 const struct ethtool_ops *ops)
8658{
8659 if (dev->ethtool_ops == &default_ethtool_ops)
8660 dev->ethtool_ops = ops;
8661}
8662EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
8663
74d332c1
ED
8664void netdev_freemem(struct net_device *dev)
8665{
8666 char *addr = (char *)dev - dev->padded;
8667
4cb28970 8668 kvfree(addr);
74d332c1
ED
8669}
8670
1da177e4 8671/**
722c9a0c 8672 * alloc_netdev_mqs - allocate network device
8673 * @sizeof_priv: size of private data to allocate space for
8674 * @name: device name format string
8675 * @name_assign_type: origin of device name
8676 * @setup: callback to initialize device
8677 * @txqs: the number of TX subqueues to allocate
8678 * @rxqs: the number of RX subqueues to allocate
8679 *
8680 * Allocates a struct net_device with private data area for driver use
8681 * and performs basic initialization. Also allocates subqueue structs
8682 * for each queue on the device.
1da177e4 8683 */
36909ea4 8684struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 8685 unsigned char name_assign_type,
36909ea4
TH
8686 void (*setup)(struct net_device *),
8687 unsigned int txqs, unsigned int rxqs)
1da177e4 8688{
1da177e4 8689 struct net_device *dev;
52a59bd5 8690 unsigned int alloc_size;
1ce8e7b5 8691 struct net_device *p;
1da177e4 8692
b6fe17d6
SH
8693 BUG_ON(strlen(name) >= sizeof(dev->name));
8694
36909ea4 8695 if (txqs < 1) {
7b6cd1ce 8696 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
8697 return NULL;
8698 }
8699
36909ea4 8700 if (rxqs < 1) {
7b6cd1ce 8701 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
8702 return NULL;
8703 }
36909ea4 8704
fd2ea0a7 8705 alloc_size = sizeof(struct net_device);
d1643d24
AD
8706 if (sizeof_priv) {
8707 /* ensure 32-byte alignment of private area */
1ce8e7b5 8708 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
8709 alloc_size += sizeof_priv;
8710 }
8711 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 8712 alloc_size += NETDEV_ALIGN - 1;
1da177e4 8713
dcda9b04 8714 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
62b5942a 8715 if (!p)
1da177e4 8716 return NULL;
1da177e4 8717
1ce8e7b5 8718 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 8719 dev->padded = (char *)dev - (char *)p;
ab9c73cc 8720
29b4433d
ED
8721 dev->pcpu_refcnt = alloc_percpu(int);
8722 if (!dev->pcpu_refcnt)
74d332c1 8723 goto free_dev;
ab9c73cc 8724
ab9c73cc 8725 if (dev_addr_init(dev))
29b4433d 8726 goto free_pcpu;
ab9c73cc 8727
22bedad3 8728 dev_mc_init(dev);
a748ee24 8729 dev_uc_init(dev);
ccffad25 8730
c346dca1 8731 dev_net_set(dev, &init_net);
1da177e4 8732
8d3bdbd5 8733 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 8734 dev->gso_max_segs = GSO_MAX_SEGS;
8d3bdbd5 8735
8d3bdbd5
DM
8736 INIT_LIST_HEAD(&dev->napi_list);
8737 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 8738 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 8739 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
8740 INIT_LIST_HEAD(&dev->adj_list.upper);
8741 INIT_LIST_HEAD(&dev->adj_list.lower);
7866a621
SN
8742 INIT_LIST_HEAD(&dev->ptype_all);
8743 INIT_LIST_HEAD(&dev->ptype_specific);
59cc1f61
JK
8744#ifdef CONFIG_NET_SCHED
8745 hash_init(dev->qdisc_hash);
8746#endif
02875878 8747 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
8748 setup(dev);
8749
a813104d 8750 if (!dev->tx_queue_len) {
f84bb1ea 8751 dev->priv_flags |= IFF_NO_QUEUE;
11597084 8752 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
a813104d 8753 }
906470c1 8754
36909ea4
TH
8755 dev->num_tx_queues = txqs;
8756 dev->real_num_tx_queues = txqs;
ed9af2e8 8757 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 8758 goto free_all;
e8a0464c 8759
36909ea4
TH
8760 dev->num_rx_queues = rxqs;
8761 dev->real_num_rx_queues = rxqs;
fe822240 8762 if (netif_alloc_rx_queues(dev))
8d3bdbd5 8763 goto free_all;
0a9627f2 8764
1da177e4 8765 strcpy(dev->name, name);
c835a677 8766 dev->name_assign_type = name_assign_type;
cbda10fa 8767 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
8768 if (!dev->ethtool_ops)
8769 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
8770
8771 nf_hook_ingress_init(dev);
8772
1da177e4 8773 return dev;
ab9c73cc 8774
8d3bdbd5
DM
8775free_all:
8776 free_netdev(dev);
8777 return NULL;
8778
29b4433d
ED
8779free_pcpu:
8780 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
8781free_dev:
8782 netdev_freemem(dev);
ab9c73cc 8783 return NULL;
1da177e4 8784}
36909ea4 8785EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
8786
8787/**
722c9a0c 8788 * free_netdev - free network device
8789 * @dev: device
1da177e4 8790 *
722c9a0c 8791 * This function does the last stage of destroying an allocated device
8792 * interface. The reference to the device object is released. If this
8793 * is the last reference then it will be freed.Must be called in process
8794 * context.
1da177e4
LT
8795 */
8796void free_netdev(struct net_device *dev)
8797{
d565b0a1
HX
8798 struct napi_struct *p, *n;
8799
93d05d4a 8800 might_sleep();
60877a32 8801 netif_free_tx_queues(dev);
e817f856 8802 netif_free_rx_queues(dev);
e8a0464c 8803
33d480ce 8804 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 8805
f001fde5
JP
8806 /* Flush device addresses */
8807 dev_addr_flush(dev);
8808
d565b0a1
HX
8809 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8810 netif_napi_del(p);
8811
29b4433d
ED
8812 free_percpu(dev->pcpu_refcnt);
8813 dev->pcpu_refcnt = NULL;
8814
3041a069 8815 /* Compatibility with error handling in drivers */
1da177e4 8816 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 8817 netdev_freemem(dev);
1da177e4
LT
8818 return;
8819 }
8820
8821 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8822 dev->reg_state = NETREG_RELEASED;
8823
43cb76d9
GKH
8824 /* will free via device release */
8825 put_device(&dev->dev);
1da177e4 8826}
d1b19dff 8827EXPORT_SYMBOL(free_netdev);
4ec93edb 8828
f0db275a
SH
8829/**
8830 * synchronize_net - Synchronize with packet receive processing
8831 *
8832 * Wait for packets currently being received to be done.
8833 * Does not block later packets from starting.
8834 */
4ec93edb 8835void synchronize_net(void)
1da177e4
LT
8836{
8837 might_sleep();
be3fc413
ED
8838 if (rtnl_is_locked())
8839 synchronize_rcu_expedited();
8840 else
8841 synchronize_rcu();
1da177e4 8842}
d1b19dff 8843EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
8844
8845/**
44a0873d 8846 * unregister_netdevice_queue - remove device from the kernel
1da177e4 8847 * @dev: device
44a0873d 8848 * @head: list
6ebfbc06 8849 *
1da177e4 8850 * This function shuts down a device interface and removes it
d59b54b1 8851 * from the kernel tables.
44a0873d 8852 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
8853 *
8854 * Callers must hold the rtnl semaphore. You may want
8855 * unregister_netdev() instead of this.
8856 */
8857
44a0873d 8858void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 8859{
a6620712
HX
8860 ASSERT_RTNL();
8861
44a0873d 8862 if (head) {
9fdce099 8863 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
8864 } else {
8865 rollback_registered(dev);
8866 /* Finish processing unregister after unlock */
8867 net_set_todo(dev);
8868 }
1da177e4 8869}
44a0873d 8870EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 8871
9b5e383c
ED
8872/**
8873 * unregister_netdevice_many - unregister many devices
8874 * @head: list of devices
87757a91
ED
8875 *
8876 * Note: As most callers use a stack allocated list_head,
8877 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
8878 */
8879void unregister_netdevice_many(struct list_head *head)
8880{
8881 struct net_device *dev;
8882
8883 if (!list_empty(head)) {
8884 rollback_registered_many(head);
8885 list_for_each_entry(dev, head, unreg_list)
8886 net_set_todo(dev);
87757a91 8887 list_del(head);
9b5e383c
ED
8888 }
8889}
63c8099d 8890EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 8891
1da177e4
LT
8892/**
8893 * unregister_netdev - remove device from the kernel
8894 * @dev: device
8895 *
8896 * This function shuts down a device interface and removes it
d59b54b1 8897 * from the kernel tables.
1da177e4
LT
8898 *
8899 * This is just a wrapper for unregister_netdevice that takes
8900 * the rtnl semaphore. In general you want to use this and not
8901 * unregister_netdevice.
8902 */
8903void unregister_netdev(struct net_device *dev)
8904{
8905 rtnl_lock();
8906 unregister_netdevice(dev);
8907 rtnl_unlock();
8908}
1da177e4
LT
8909EXPORT_SYMBOL(unregister_netdev);
8910
ce286d32
EB
8911/**
8912 * dev_change_net_namespace - move device to different nethost namespace
8913 * @dev: device
8914 * @net: network namespace
8915 * @pat: If not NULL name pattern to try if the current device name
8916 * is already taken in the destination network namespace.
8917 *
8918 * This function shuts down a device interface and moves it
8919 * to a new network namespace. On success 0 is returned, on
8920 * a failure a netagive errno code is returned.
8921 *
8922 * Callers must hold the rtnl semaphore.
8923 */
8924
8925int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8926{
38e01b30 8927 int err, new_nsid, new_ifindex;
ce286d32
EB
8928
8929 ASSERT_RTNL();
8930
8931 /* Don't allow namespace local devices to be moved. */
8932 err = -EINVAL;
8933 if (dev->features & NETIF_F_NETNS_LOCAL)
8934 goto out;
8935
8936 /* Ensure the device has been registrered */
ce286d32
EB
8937 if (dev->reg_state != NETREG_REGISTERED)
8938 goto out;
8939
8940 /* Get out if there is nothing todo */
8941 err = 0;
878628fb 8942 if (net_eq(dev_net(dev), net))
ce286d32
EB
8943 goto out;
8944
8945 /* Pick the destination device name, and ensure
8946 * we can use it in the destination network namespace.
8947 */
8948 err = -EEXIST;
d9031024 8949 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
8950 /* We get here if we can't use the current device name */
8951 if (!pat)
8952 goto out;
7892bd08
LR
8953 err = dev_get_valid_name(net, dev, pat);
8954 if (err < 0)
ce286d32
EB
8955 goto out;
8956 }
8957
8958 /*
8959 * And now a mini version of register_netdevice unregister_netdevice.
8960 */
8961
8962 /* If device is running close it first. */
9b772652 8963 dev_close(dev);
ce286d32
EB
8964
8965 /* And unlink it from device chain */
ce286d32
EB
8966 unlist_netdevice(dev);
8967
8968 synchronize_net();
8969
8970 /* Shutdown queueing discipline. */
8971 dev_shutdown(dev);
8972
8973 /* Notify protocols, that we are about to destroy
eb13da1a 8974 * this device. They should clean all the things.
8975 *
8976 * Note that dev->reg_state stays at NETREG_REGISTERED.
8977 * This is wanted because this way 8021q and macvlan know
8978 * the device is just moving and can keep their slaves up.
8979 */
ce286d32 8980 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43 8981 rcu_barrier();
38e01b30 8982
c36ac8e2 8983 new_nsid = peernet2id_alloc(dev_net(dev), net);
38e01b30
ND
8984 /* If there is an ifindex conflict assign a new one */
8985 if (__dev_get_by_index(net, dev->ifindex))
8986 new_ifindex = dev_new_index(net);
8987 else
8988 new_ifindex = dev->ifindex;
8989
8990 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
8991 new_ifindex);
ce286d32
EB
8992
8993 /*
8994 * Flush the unicast and multicast chains
8995 */
a748ee24 8996 dev_uc_flush(dev);
22bedad3 8997 dev_mc_flush(dev);
ce286d32 8998
4e66ae2e
SH
8999 /* Send a netdev-removed uevent to the old namespace */
9000 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 9001 netdev_adjacent_del_links(dev);
4e66ae2e 9002
ce286d32 9003 /* Actually switch the network namespace */
c346dca1 9004 dev_net_set(dev, net);
38e01b30 9005 dev->ifindex = new_ifindex;
ce286d32 9006
4e66ae2e
SH
9007 /* Send a netdev-add uevent to the new namespace */
9008 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 9009 netdev_adjacent_add_links(dev);
4e66ae2e 9010
8b41d188 9011 /* Fixup kobjects */
a1b3f594 9012 err = device_rename(&dev->dev, dev->name);
8b41d188 9013 WARN_ON(err);
ce286d32
EB
9014
9015 /* Add the device back in the hashes */
9016 list_netdevice(dev);
9017
9018 /* Notify protocols, that a new device appeared. */
9019 call_netdevice_notifiers(NETDEV_REGISTER, dev);
9020
d90a909e
EB
9021 /*
9022 * Prevent userspace races by waiting until the network
9023 * device is fully setup before sending notifications.
9024 */
7f294054 9025 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 9026
ce286d32
EB
9027 synchronize_net();
9028 err = 0;
9029out:
9030 return err;
9031}
463d0183 9032EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 9033
f0bf90de 9034static int dev_cpu_dead(unsigned int oldcpu)
1da177e4
LT
9035{
9036 struct sk_buff **list_skb;
1da177e4 9037 struct sk_buff *skb;
f0bf90de 9038 unsigned int cpu;
97d8b6e3 9039 struct softnet_data *sd, *oldsd, *remsd = NULL;
1da177e4 9040
1da177e4
LT
9041 local_irq_disable();
9042 cpu = smp_processor_id();
9043 sd = &per_cpu(softnet_data, cpu);
9044 oldsd = &per_cpu(softnet_data, oldcpu);
9045
9046 /* Find end of our completion_queue. */
9047 list_skb = &sd->completion_queue;
9048 while (*list_skb)
9049 list_skb = &(*list_skb)->next;
9050 /* Append completion queue from offline CPU. */
9051 *list_skb = oldsd->completion_queue;
9052 oldsd->completion_queue = NULL;
9053
1da177e4 9054 /* Append output queue from offline CPU. */
a9cbd588
CG
9055 if (oldsd->output_queue) {
9056 *sd->output_queue_tailp = oldsd->output_queue;
9057 sd->output_queue_tailp = oldsd->output_queue_tailp;
9058 oldsd->output_queue = NULL;
9059 oldsd->output_queue_tailp = &oldsd->output_queue;
9060 }
ac64da0b
ED
9061 /* Append NAPI poll list from offline CPU, with one exception :
9062 * process_backlog() must be called by cpu owning percpu backlog.
9063 * We properly handle process_queue & input_pkt_queue later.
9064 */
9065 while (!list_empty(&oldsd->poll_list)) {
9066 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
9067 struct napi_struct,
9068 poll_list);
9069
9070 list_del_init(&napi->poll_list);
9071 if (napi->poll == process_backlog)
9072 napi->state = 0;
9073 else
9074 ____napi_schedule(sd, napi);
264524d5 9075 }
1da177e4
LT
9076
9077 raise_softirq_irqoff(NET_TX_SOFTIRQ);
9078 local_irq_enable();
9079
773fc8f6 9080#ifdef CONFIG_RPS
9081 remsd = oldsd->rps_ipi_list;
9082 oldsd->rps_ipi_list = NULL;
9083#endif
9084 /* send out pending IPI's on offline CPU */
9085 net_rps_send_ipi(remsd);
9086
1da177e4 9087 /* Process offline CPU's input_pkt_queue */
76cc8b13 9088 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 9089 netif_rx_ni(skb);
76cc8b13 9090 input_queue_head_incr(oldsd);
fec5e652 9091 }
ac64da0b 9092 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 9093 netif_rx_ni(skb);
76cc8b13
TH
9094 input_queue_head_incr(oldsd);
9095 }
1da177e4 9096
f0bf90de 9097 return 0;
1da177e4 9098}
1da177e4 9099
7f353bf2 9100/**
b63365a2
HX
9101 * netdev_increment_features - increment feature set by one
9102 * @all: current feature set
9103 * @one: new feature set
9104 * @mask: mask feature set
7f353bf2
HX
9105 *
9106 * Computes a new feature set after adding a device with feature set
b63365a2
HX
9107 * @one to the master device with current feature set @all. Will not
9108 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 9109 */
c8f44aff
MM
9110netdev_features_t netdev_increment_features(netdev_features_t all,
9111 netdev_features_t one, netdev_features_t mask)
b63365a2 9112{
c8cd0989 9113 if (mask & NETIF_F_HW_CSUM)
a188222b 9114 mask |= NETIF_F_CSUM_MASK;
1742f183 9115 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 9116
a188222b 9117 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
1742f183 9118 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 9119
1742f183 9120 /* If one device supports hw checksumming, set for all. */
c8cd0989
TH
9121 if (all & NETIF_F_HW_CSUM)
9122 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
7f353bf2
HX
9123
9124 return all;
9125}
b63365a2 9126EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 9127
430f03cd 9128static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
9129{
9130 int i;
9131 struct hlist_head *hash;
9132
6da2ec56 9133 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
30d97d35
PE
9134 if (hash != NULL)
9135 for (i = 0; i < NETDEV_HASHENTRIES; i++)
9136 INIT_HLIST_HEAD(&hash[i]);
9137
9138 return hash;
9139}
9140
881d966b 9141/* Initialize per network namespace state */
4665079c 9142static int __net_init netdev_init(struct net *net)
881d966b 9143{
734b6541
RM
9144 if (net != &init_net)
9145 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 9146
30d97d35
PE
9147 net->dev_name_head = netdev_create_hash();
9148 if (net->dev_name_head == NULL)
9149 goto err_name;
881d966b 9150
30d97d35
PE
9151 net->dev_index_head = netdev_create_hash();
9152 if (net->dev_index_head == NULL)
9153 goto err_idx;
881d966b
EB
9154
9155 return 0;
30d97d35
PE
9156
9157err_idx:
9158 kfree(net->dev_name_head);
9159err_name:
9160 return -ENOMEM;
881d966b
EB
9161}
9162
f0db275a
SH
9163/**
9164 * netdev_drivername - network driver for the device
9165 * @dev: network device
f0db275a
SH
9166 *
9167 * Determine network driver for device.
9168 */
3019de12 9169const char *netdev_drivername(const struct net_device *dev)
6579e57b 9170{
cf04a4c7
SH
9171 const struct device_driver *driver;
9172 const struct device *parent;
3019de12 9173 const char *empty = "";
6579e57b
AV
9174
9175 parent = dev->dev.parent;
6579e57b 9176 if (!parent)
3019de12 9177 return empty;
6579e57b
AV
9178
9179 driver = parent->driver;
9180 if (driver && driver->name)
3019de12
DM
9181 return driver->name;
9182 return empty;
6579e57b
AV
9183}
9184
6ea754eb
JP
9185static void __netdev_printk(const char *level, const struct net_device *dev,
9186 struct va_format *vaf)
256df2f3 9187{
b004ff49 9188 if (dev && dev->dev.parent) {
6ea754eb
JP
9189 dev_printk_emit(level[1] - '0',
9190 dev->dev.parent,
9191 "%s %s %s%s: %pV",
9192 dev_driver_string(dev->dev.parent),
9193 dev_name(dev->dev.parent),
9194 netdev_name(dev), netdev_reg_state(dev),
9195 vaf);
b004ff49 9196 } else if (dev) {
6ea754eb
JP
9197 printk("%s%s%s: %pV",
9198 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 9199 } else {
6ea754eb 9200 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 9201 }
256df2f3
JP
9202}
9203
6ea754eb
JP
9204void netdev_printk(const char *level, const struct net_device *dev,
9205 const char *format, ...)
256df2f3
JP
9206{
9207 struct va_format vaf;
9208 va_list args;
256df2f3
JP
9209
9210 va_start(args, format);
9211
9212 vaf.fmt = format;
9213 vaf.va = &args;
9214
6ea754eb 9215 __netdev_printk(level, dev, &vaf);
b004ff49 9216
256df2f3 9217 va_end(args);
256df2f3
JP
9218}
9219EXPORT_SYMBOL(netdev_printk);
9220
9221#define define_netdev_printk_level(func, level) \
6ea754eb 9222void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 9223{ \
256df2f3
JP
9224 struct va_format vaf; \
9225 va_list args; \
9226 \
9227 va_start(args, fmt); \
9228 \
9229 vaf.fmt = fmt; \
9230 vaf.va = &args; \
9231 \
6ea754eb 9232 __netdev_printk(level, dev, &vaf); \
b004ff49 9233 \
256df2f3 9234 va_end(args); \
256df2f3
JP
9235} \
9236EXPORT_SYMBOL(func);
9237
9238define_netdev_printk_level(netdev_emerg, KERN_EMERG);
9239define_netdev_printk_level(netdev_alert, KERN_ALERT);
9240define_netdev_printk_level(netdev_crit, KERN_CRIT);
9241define_netdev_printk_level(netdev_err, KERN_ERR);
9242define_netdev_printk_level(netdev_warn, KERN_WARNING);
9243define_netdev_printk_level(netdev_notice, KERN_NOTICE);
9244define_netdev_printk_level(netdev_info, KERN_INFO);
9245
4665079c 9246static void __net_exit netdev_exit(struct net *net)
881d966b
EB
9247{
9248 kfree(net->dev_name_head);
9249 kfree(net->dev_index_head);
ee21b18b
VA
9250 if (net != &init_net)
9251 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
881d966b
EB
9252}
9253
022cbae6 9254static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
9255 .init = netdev_init,
9256 .exit = netdev_exit,
9257};
9258
4665079c 9259static void __net_exit default_device_exit(struct net *net)
ce286d32 9260{
e008b5fc 9261 struct net_device *dev, *aux;
ce286d32 9262 /*
e008b5fc 9263 * Push all migratable network devices back to the
ce286d32
EB
9264 * initial network namespace
9265 */
9266 rtnl_lock();
e008b5fc 9267 for_each_netdev_safe(net, dev, aux) {
ce286d32 9268 int err;
aca51397 9269 char fb_name[IFNAMSIZ];
ce286d32
EB
9270
9271 /* Ignore unmoveable devices (i.e. loopback) */
9272 if (dev->features & NETIF_F_NETNS_LOCAL)
9273 continue;
9274
e008b5fc
EB
9275 /* Leave virtual devices for the generic cleanup */
9276 if (dev->rtnl_link_ops)
9277 continue;
d0c082ce 9278
25985edc 9279 /* Push remaining network devices to init_net */
aca51397
PE
9280 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
9281 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 9282 if (err) {
7b6cd1ce
JP
9283 pr_emerg("%s: failed to move %s to init_net: %d\n",
9284 __func__, dev->name, err);
aca51397 9285 BUG();
ce286d32
EB
9286 }
9287 }
9288 rtnl_unlock();
9289}
9290
50624c93
EB
9291static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
9292{
9293 /* Return with the rtnl_lock held when there are no network
9294 * devices unregistering in any network namespace in net_list.
9295 */
9296 struct net *net;
9297 bool unregistering;
ff960a73 9298 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 9299
ff960a73 9300 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 9301 for (;;) {
50624c93
EB
9302 unregistering = false;
9303 rtnl_lock();
9304 list_for_each_entry(net, net_list, exit_list) {
9305 if (net->dev_unreg_count > 0) {
9306 unregistering = true;
9307 break;
9308 }
9309 }
9310 if (!unregistering)
9311 break;
9312 __rtnl_unlock();
ff960a73
PZ
9313
9314 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 9315 }
ff960a73 9316 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
9317}
9318
04dc7f6b
EB
9319static void __net_exit default_device_exit_batch(struct list_head *net_list)
9320{
9321 /* At exit all network devices most be removed from a network
b595076a 9322 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
9323 * Do this across as many network namespaces as possible to
9324 * improve batching efficiency.
9325 */
9326 struct net_device *dev;
9327 struct net *net;
9328 LIST_HEAD(dev_kill_list);
9329
50624c93
EB
9330 /* To prevent network device cleanup code from dereferencing
9331 * loopback devices or network devices that have been freed
9332 * wait here for all pending unregistrations to complete,
9333 * before unregistring the loopback device and allowing the
9334 * network namespace be freed.
9335 *
9336 * The netdev todo list containing all network devices
9337 * unregistrations that happen in default_device_exit_batch
9338 * will run in the rtnl_unlock() at the end of
9339 * default_device_exit_batch.
9340 */
9341 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
9342 list_for_each_entry(net, net_list, exit_list) {
9343 for_each_netdev_reverse(net, dev) {
b0ab2fab 9344 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
9345 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
9346 else
9347 unregister_netdevice_queue(dev, &dev_kill_list);
9348 }
9349 }
9350 unregister_netdevice_many(&dev_kill_list);
9351 rtnl_unlock();
9352}
9353
022cbae6 9354static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 9355 .exit = default_device_exit,
04dc7f6b 9356 .exit_batch = default_device_exit_batch,
ce286d32
EB
9357};
9358
1da177e4
LT
9359/*
9360 * Initialize the DEV module. At boot time this walks the device list and
9361 * unhooks any devices that fail to initialise (normally hardware not
9362 * present) and leaves us with a valid list of present and active devices.
9363 *
9364 */
9365
9366/*
9367 * This is called single threaded during boot, so no need
9368 * to take the rtnl semaphore.
9369 */
9370static int __init net_dev_init(void)
9371{
9372 int i, rc = -ENOMEM;
9373
9374 BUG_ON(!dev_boot_phase);
9375
1da177e4
LT
9376 if (dev_proc_init())
9377 goto out;
9378
8b41d188 9379 if (netdev_kobject_init())
1da177e4
LT
9380 goto out;
9381
9382 INIT_LIST_HEAD(&ptype_all);
82d8a867 9383 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
9384 INIT_LIST_HEAD(&ptype_base[i]);
9385
62532da9
VY
9386 INIT_LIST_HEAD(&offload_base);
9387
881d966b
EB
9388 if (register_pernet_subsys(&netdev_net_ops))
9389 goto out;
1da177e4
LT
9390
9391 /*
9392 * Initialise the packet receive queues.
9393 */
9394
6f912042 9395 for_each_possible_cpu(i) {
41852497 9396 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
e36fa2f7 9397 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 9398
41852497
ED
9399 INIT_WORK(flush, flush_backlog);
9400
e36fa2f7 9401 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 9402 skb_queue_head_init(&sd->process_queue);
f53c7239
SK
9403#ifdef CONFIG_XFRM_OFFLOAD
9404 skb_queue_head_init(&sd->xfrm_backlog);
9405#endif
e36fa2f7 9406 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 9407 sd->output_queue_tailp = &sd->output_queue;
df334545 9408#ifdef CONFIG_RPS
e36fa2f7
ED
9409 sd->csd.func = rps_trigger_softirq;
9410 sd->csd.info = sd;
e36fa2f7 9411 sd->cpu = i;
1e94d72f 9412#endif
0a9627f2 9413
e36fa2f7
ED
9414 sd->backlog.poll = process_backlog;
9415 sd->backlog.weight = weight_p;
1da177e4
LT
9416 }
9417
1da177e4
LT
9418 dev_boot_phase = 0;
9419
505d4f73
EB
9420 /* The loopback device is special if any other network devices
9421 * is present in a network namespace the loopback device must
9422 * be present. Since we now dynamically allocate and free the
9423 * loopback device ensure this invariant is maintained by
9424 * keeping the loopback device as the first device on the
9425 * list of network devices. Ensuring the loopback devices
9426 * is the first device that appears and the last network device
9427 * that disappears.
9428 */
9429 if (register_pernet_device(&loopback_net_ops))
9430 goto out;
9431
9432 if (register_pernet_device(&default_device_ops))
9433 goto out;
9434
962cf36c
CM
9435 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
9436 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4 9437
f0bf90de
SAS
9438 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
9439 NULL, dev_cpu_dead);
9440 WARN_ON(rc < 0);
1da177e4
LT
9441 rc = 0;
9442out:
9443 return rc;
9444}
9445
9446subsys_initcall(net_dev_init);