]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/core/dev.c
r8152: Use guard clause and fix comment typos
[mirror_ubuntu-hirsute-kernel.git] / net / core / dev.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
722c9a0c 3 * NET3 Protocol independent device support routines.
1da177e4 4 *
1da177e4 5 * Derived from the non IP parts of dev.c 1.0.19
722c9a0c 6 * Authors: Ross Biro
1da177e4
LT
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
9 *
10 * Additional Authors:
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
17 *
18 * Changes:
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
722c9a0c 20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
1da177e4
LT
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
30 * drivers
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
722c9a0c 35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
1da177e4
LT
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
40 * call a packet.
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
722c9a0c 45 * Alan Cox : Fixed nasty side effect of device close
1da177e4
LT
46 * changes.
47 * Rudi Cilibrasi : Pass the right thing to
48 * set_mac_address()
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
54 * 1 device.
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
62 * the backlog queue.
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
722c9a0c 66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
1da177e4
LT
68 * - netif_rx() feedback
69 */
70
7c0f6ba6 71#include <linux/uaccess.h>
1da177e4 72#include <linux/bitops.h>
4fc268d2 73#include <linux/capability.h>
1da177e4
LT
74#include <linux/cpu.h>
75#include <linux/types.h>
76#include <linux/kernel.h>
08e9897d 77#include <linux/hash.h>
5a0e3ad6 78#include <linux/slab.h>
1da177e4 79#include <linux/sched.h>
f1083048 80#include <linux/sched/mm.h>
4a3e2f71 81#include <linux/mutex.h>
1da177e4
LT
82#include <linux/string.h>
83#include <linux/mm.h>
84#include <linux/socket.h>
85#include <linux/sockios.h>
86#include <linux/errno.h>
87#include <linux/interrupt.h>
88#include <linux/if_ether.h>
89#include <linux/netdevice.h>
90#include <linux/etherdevice.h>
0187bdfb 91#include <linux/ethtool.h>
1da177e4 92#include <linux/skbuff.h>
a7862b45 93#include <linux/bpf.h>
b5cdae32 94#include <linux/bpf_trace.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4 96#include <net/sock.h>
02d62e86 97#include <net/busy_poll.h>
1da177e4 98#include <linux/rtnetlink.h>
1da177e4 99#include <linux/stat.h>
1da177e4 100#include <net/dst.h>
fc4099f1 101#include <net/dst_metadata.h>
1da177e4 102#include <net/pkt_sched.h>
87d83093 103#include <net/pkt_cls.h>
1da177e4 104#include <net/checksum.h>
44540960 105#include <net/xfrm.h>
1da177e4
LT
106#include <linux/highmem.h>
107#include <linux/init.h>
1da177e4 108#include <linux/module.h>
1da177e4
LT
109#include <linux/netpoll.h>
110#include <linux/rcupdate.h>
111#include <linux/delay.h>
1da177e4 112#include <net/iw_handler.h>
1da177e4 113#include <asm/current.h>
5bdb9886 114#include <linux/audit.h>
db217334 115#include <linux/dmaengine.h>
f6a78bfc 116#include <linux/err.h>
c7fa9d18 117#include <linux/ctype.h>
723e98b7 118#include <linux/if_arp.h>
6de329e2 119#include <linux/if_vlan.h>
8f0f2223 120#include <linux/ip.h>
ad55dcaf 121#include <net/ip.h>
25cd9ba0 122#include <net/mpls.h>
8f0f2223
DM
123#include <linux/ipv6.h>
124#include <linux/in.h>
b6b2fed1
DM
125#include <linux/jhash.h>
126#include <linux/random.h>
9cbc1cb8 127#include <trace/events/napi.h>
cf66ba58 128#include <trace/events/net.h>
07dc22e7 129#include <trace/events/skb.h>
caeda9b9 130#include <linux/inetdevice.h>
c445477d 131#include <linux/cpu_rmap.h>
c5905afb 132#include <linux/static_key.h>
af12fa6e 133#include <linux/hashtable.h>
60877a32 134#include <linux/vmalloc.h>
529d0489 135#include <linux/if_macvlan.h>
e7fd2885 136#include <linux/errqueue.h>
3b47d303 137#include <linux/hrtimer.h>
e687ad60 138#include <linux/netfilter_ingress.h>
40e4e713 139#include <linux/crash_dump.h>
b72b5bf6 140#include <linux/sctp.h>
ae847f40 141#include <net/udp_tunnel.h>
6621dd29 142#include <linux/net_namespace.h>
aaa5d90b 143#include <linux/indirect_call_wrapper.h>
af3836df 144#include <net/devlink.h>
1da177e4 145
342709ef
PE
146#include "net-sysfs.h"
147
d565b0a1
HX
148#define MAX_GRO_SKBS 8
149
5d38a079
HX
150/* This should be increased if a protocol with a bigger head is added. */
151#define GRO_MAX_HEAD (MAX_HEADER + 128)
152
1da177e4 153static DEFINE_SPINLOCK(ptype_lock);
62532da9 154static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
155struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
156struct list_head ptype_all __read_mostly; /* Taps */
62532da9 157static struct list_head offload_base __read_mostly;
1da177e4 158
ae78dbfa 159static int netif_rx_internal(struct sk_buff *skb);
54951194 160static int call_netdevice_notifiers_info(unsigned long val,
54951194 161 struct netdev_notifier_info *info);
26372605
PM
162static int call_netdevice_notifiers_extack(unsigned long val,
163 struct net_device *dev,
164 struct netlink_ext_ack *extack);
90b602f8 165static struct napi_struct *napi_by_id(unsigned int napi_id);
ae78dbfa 166
1da177e4 167/*
7562f876 168 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
169 * semaphore.
170 *
c6d14c84 171 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
172 *
173 * Writers must hold the rtnl semaphore while they loop through the
7562f876 174 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
175 * actual updates. This allows pure readers to access the list even
176 * while a writer is preparing to update it.
177 *
178 * To put it another way, dev_base_lock is held for writing only to
179 * protect against pure readers; the rtnl semaphore provides the
180 * protection against other writers.
181 *
182 * See, for example usages, register_netdevice() and
183 * unregister_netdevice(), which must be called with the rtnl
184 * semaphore held.
185 */
1da177e4 186DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
187EXPORT_SYMBOL(dev_base_lock);
188
6c557001
FW
189static DEFINE_MUTEX(ifalias_mutex);
190
af12fa6e
ET
191/* protects napi_hash addition/deletion and napi_gen_id */
192static DEFINE_SPINLOCK(napi_hash_lock);
193
52bd2d62 194static unsigned int napi_gen_id = NR_CPUS;
6180d9de 195static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
af12fa6e 196
18afa4b0 197static seqcount_t devnet_rename_seq;
c91f6df2 198
4e985ada
TG
199static inline void dev_base_seq_inc(struct net *net)
200{
643aa9cb 201 while (++net->dev_base_seq == 0)
202 ;
4e985ada
TG
203}
204
881d966b 205static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 206{
8387ff25 207 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
95c96174 208
08e9897d 209 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
210}
211
881d966b 212static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 213{
7c28bd0b 214 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
215}
216
e36fa2f7 217static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
218{
219#ifdef CONFIG_RPS
e36fa2f7 220 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
221#endif
222}
223
e36fa2f7 224static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
225{
226#ifdef CONFIG_RPS
e36fa2f7 227 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
228#endif
229}
230
ff927412
JP
231static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
232 const char *name)
233{
234 struct netdev_name_node *name_node;
235
236 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
237 if (!name_node)
238 return NULL;
239 INIT_HLIST_NODE(&name_node->hlist);
240 name_node->dev = dev;
241 name_node->name = name;
242 return name_node;
243}
244
245static struct netdev_name_node *
246netdev_name_node_head_alloc(struct net_device *dev)
247{
36fbf1e5
JP
248 struct netdev_name_node *name_node;
249
250 name_node = netdev_name_node_alloc(dev, dev->name);
251 if (!name_node)
252 return NULL;
253 INIT_LIST_HEAD(&name_node->list);
254 return name_node;
ff927412
JP
255}
256
257static void netdev_name_node_free(struct netdev_name_node *name_node)
258{
259 kfree(name_node);
260}
261
262static void netdev_name_node_add(struct net *net,
263 struct netdev_name_node *name_node)
264{
265 hlist_add_head_rcu(&name_node->hlist,
266 dev_name_hash(net, name_node->name));
267}
268
269static void netdev_name_node_del(struct netdev_name_node *name_node)
270{
271 hlist_del_rcu(&name_node->hlist);
272}
273
274static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
275 const char *name)
276{
277 struct hlist_head *head = dev_name_hash(net, name);
278 struct netdev_name_node *name_node;
279
280 hlist_for_each_entry(name_node, head, hlist)
281 if (!strcmp(name_node->name, name))
282 return name_node;
283 return NULL;
284}
285
286static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
287 const char *name)
288{
289 struct hlist_head *head = dev_name_hash(net, name);
290 struct netdev_name_node *name_node;
291
292 hlist_for_each_entry_rcu(name_node, head, hlist)
293 if (!strcmp(name_node->name, name))
294 return name_node;
295 return NULL;
296}
297
36fbf1e5
JP
298int netdev_name_node_alt_create(struct net_device *dev, const char *name)
299{
300 struct netdev_name_node *name_node;
301 struct net *net = dev_net(dev);
302
303 name_node = netdev_name_node_lookup(net, name);
304 if (name_node)
305 return -EEXIST;
306 name_node = netdev_name_node_alloc(dev, name);
307 if (!name_node)
308 return -ENOMEM;
309 netdev_name_node_add(net, name_node);
310 /* The node that holds dev->name acts as a head of per-device list. */
311 list_add_tail(&name_node->list, &dev->name_node->list);
312
313 return 0;
314}
315EXPORT_SYMBOL(netdev_name_node_alt_create);
316
317static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
318{
319 list_del(&name_node->list);
320 netdev_name_node_del(name_node);
321 kfree(name_node->name);
322 netdev_name_node_free(name_node);
323}
324
325int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
326{
327 struct netdev_name_node *name_node;
328 struct net *net = dev_net(dev);
329
330 name_node = netdev_name_node_lookup(net, name);
331 if (!name_node)
332 return -ENOENT;
333 __netdev_name_node_alt_destroy(name_node);
334
335 return 0;
336}
337EXPORT_SYMBOL(netdev_name_node_alt_destroy);
338
339static void netdev_name_node_alt_flush(struct net_device *dev)
340{
341 struct netdev_name_node *name_node, *tmp;
342
343 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
344 __netdev_name_node_alt_destroy(name_node);
345}
346
ce286d32 347/* Device list insertion */
53759be9 348static void list_netdevice(struct net_device *dev)
ce286d32 349{
c346dca1 350 struct net *net = dev_net(dev);
ce286d32
EB
351
352 ASSERT_RTNL();
353
354 write_lock_bh(&dev_base_lock);
c6d14c84 355 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
ff927412 356 netdev_name_node_add(net, dev->name_node);
fb699dfd
ED
357 hlist_add_head_rcu(&dev->index_hlist,
358 dev_index_hash(net, dev->ifindex));
ce286d32 359 write_unlock_bh(&dev_base_lock);
4e985ada
TG
360
361 dev_base_seq_inc(net);
ce286d32
EB
362}
363
fb699dfd
ED
364/* Device list removal
365 * caller must respect a RCU grace period before freeing/reusing dev
366 */
ce286d32
EB
367static void unlist_netdevice(struct net_device *dev)
368{
369 ASSERT_RTNL();
370
371 /* Unlink dev from the device chain */
372 write_lock_bh(&dev_base_lock);
c6d14c84 373 list_del_rcu(&dev->dev_list);
ff927412 374 netdev_name_node_del(dev->name_node);
fb699dfd 375 hlist_del_rcu(&dev->index_hlist);
ce286d32 376 write_unlock_bh(&dev_base_lock);
4e985ada
TG
377
378 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
379}
380
1da177e4
LT
381/*
382 * Our notifier list
383 */
384
f07d5b94 385static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
386
387/*
388 * Device drivers call our routines to queue packets here. We empty the
389 * queue in the local softnet handler.
390 */
bea3348e 391
9958da05 392DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 393EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 394
cf508b12 395#ifdef CONFIG_LOCKDEP
723e98b7 396/*
c773e847 397 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
398 * according to dev->type
399 */
643aa9cb 400static const unsigned short netdev_lock_type[] = {
401 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
723e98b7
JP
402 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
403 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
404 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
405 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
406 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
407 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
408 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
409 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
410 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
411 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
412 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
413 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
414 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
415 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 416
643aa9cb 417static const char *const netdev_lock_name[] = {
418 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
419 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
420 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
421 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
422 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
423 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
424 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
425 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
426 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
427 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
428 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
429 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
430 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
431 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
432 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
433
434static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 435static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
436
437static inline unsigned short netdev_lock_pos(unsigned short dev_type)
438{
439 int i;
440
441 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
442 if (netdev_lock_type[i] == dev_type)
443 return i;
444 /* the last key is used by default */
445 return ARRAY_SIZE(netdev_lock_type) - 1;
446}
447
cf508b12
DM
448static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
449 unsigned short dev_type)
723e98b7
JP
450{
451 int i;
452
453 i = netdev_lock_pos(dev_type);
454 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
455 netdev_lock_name[i]);
456}
cf508b12
DM
457
458static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
459{
460 int i;
461
462 i = netdev_lock_pos(dev->type);
463 lockdep_set_class_and_name(&dev->addr_list_lock,
464 &netdev_addr_lock_key[i],
465 netdev_lock_name[i]);
466}
723e98b7 467#else
cf508b12
DM
468static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
469 unsigned short dev_type)
470{
471}
472static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
473{
474}
475#endif
1da177e4
LT
476
477/*******************************************************************************
eb13da1a 478 *
479 * Protocol management and registration routines
480 *
481 *******************************************************************************/
1da177e4 482
1da177e4 483
1da177e4
LT
484/*
485 * Add a protocol ID to the list. Now that the input handler is
486 * smarter we can dispense with all the messy stuff that used to be
487 * here.
488 *
489 * BEWARE!!! Protocol handlers, mangling input packets,
490 * MUST BE last in hash buckets and checking protocol handlers
491 * MUST start from promiscuous ptype_all chain in net_bh.
492 * It is true now, do not change it.
493 * Explanation follows: if protocol handler, mangling packet, will
494 * be the first on list, it is not able to sense, that packet
495 * is cloned and should be copied-on-write, so that it will
496 * change it and subsequent readers will get broken packet.
497 * --ANK (980803)
498 */
499
c07b68e8
ED
500static inline struct list_head *ptype_head(const struct packet_type *pt)
501{
502 if (pt->type == htons(ETH_P_ALL))
7866a621 503 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 504 else
7866a621
SN
505 return pt->dev ? &pt->dev->ptype_specific :
506 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
507}
508
1da177e4
LT
509/**
510 * dev_add_pack - add packet handler
511 * @pt: packet type declaration
512 *
513 * Add a protocol handler to the networking stack. The passed &packet_type
514 * is linked into kernel lists and may not be freed until it has been
515 * removed from the kernel lists.
516 *
4ec93edb 517 * This call does not sleep therefore it can not
1da177e4
LT
518 * guarantee all CPU's that are in middle of receiving packets
519 * will see the new packet type (until the next received packet).
520 */
521
522void dev_add_pack(struct packet_type *pt)
523{
c07b68e8 524 struct list_head *head = ptype_head(pt);
1da177e4 525
c07b68e8
ED
526 spin_lock(&ptype_lock);
527 list_add_rcu(&pt->list, head);
528 spin_unlock(&ptype_lock);
1da177e4 529}
d1b19dff 530EXPORT_SYMBOL(dev_add_pack);
1da177e4 531
1da177e4
LT
532/**
533 * __dev_remove_pack - remove packet handler
534 * @pt: packet type declaration
535 *
536 * Remove a protocol handler that was previously added to the kernel
537 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
538 * from the kernel lists and can be freed or reused once this function
4ec93edb 539 * returns.
1da177e4
LT
540 *
541 * The packet type might still be in use by receivers
542 * and must not be freed until after all the CPU's have gone
543 * through a quiescent state.
544 */
545void __dev_remove_pack(struct packet_type *pt)
546{
c07b68e8 547 struct list_head *head = ptype_head(pt);
1da177e4
LT
548 struct packet_type *pt1;
549
c07b68e8 550 spin_lock(&ptype_lock);
1da177e4
LT
551
552 list_for_each_entry(pt1, head, list) {
553 if (pt == pt1) {
554 list_del_rcu(&pt->list);
555 goto out;
556 }
557 }
558
7b6cd1ce 559 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 560out:
c07b68e8 561 spin_unlock(&ptype_lock);
1da177e4 562}
d1b19dff
ED
563EXPORT_SYMBOL(__dev_remove_pack);
564
1da177e4
LT
565/**
566 * dev_remove_pack - remove packet handler
567 * @pt: packet type declaration
568 *
569 * Remove a protocol handler that was previously added to the kernel
570 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
571 * from the kernel lists and can be freed or reused once this function
572 * returns.
573 *
574 * This call sleeps to guarantee that no CPU is looking at the packet
575 * type after return.
576 */
577void dev_remove_pack(struct packet_type *pt)
578{
579 __dev_remove_pack(pt);
4ec93edb 580
1da177e4
LT
581 synchronize_net();
582}
d1b19dff 583EXPORT_SYMBOL(dev_remove_pack);
1da177e4 584
62532da9
VY
585
586/**
587 * dev_add_offload - register offload handlers
588 * @po: protocol offload declaration
589 *
590 * Add protocol offload handlers to the networking stack. The passed
591 * &proto_offload is linked into kernel lists and may not be freed until
592 * it has been removed from the kernel lists.
593 *
594 * This call does not sleep therefore it can not
595 * guarantee all CPU's that are in middle of receiving packets
596 * will see the new offload handlers (until the next received packet).
597 */
598void dev_add_offload(struct packet_offload *po)
599{
bdef7de4 600 struct packet_offload *elem;
62532da9
VY
601
602 spin_lock(&offload_lock);
bdef7de4
DM
603 list_for_each_entry(elem, &offload_base, list) {
604 if (po->priority < elem->priority)
605 break;
606 }
607 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
608 spin_unlock(&offload_lock);
609}
610EXPORT_SYMBOL(dev_add_offload);
611
612/**
613 * __dev_remove_offload - remove offload handler
614 * @po: packet offload declaration
615 *
616 * Remove a protocol offload handler that was previously added to the
617 * kernel offload handlers by dev_add_offload(). The passed &offload_type
618 * is removed from the kernel lists and can be freed or reused once this
619 * function returns.
620 *
621 * The packet type might still be in use by receivers
622 * and must not be freed until after all the CPU's have gone
623 * through a quiescent state.
624 */
1d143d9f 625static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
626{
627 struct list_head *head = &offload_base;
628 struct packet_offload *po1;
629
c53aa505 630 spin_lock(&offload_lock);
62532da9
VY
631
632 list_for_each_entry(po1, head, list) {
633 if (po == po1) {
634 list_del_rcu(&po->list);
635 goto out;
636 }
637 }
638
639 pr_warn("dev_remove_offload: %p not found\n", po);
640out:
c53aa505 641 spin_unlock(&offload_lock);
62532da9 642}
62532da9
VY
643
644/**
645 * dev_remove_offload - remove packet offload handler
646 * @po: packet offload declaration
647 *
648 * Remove a packet offload handler that was previously added to the kernel
649 * offload handlers by dev_add_offload(). The passed &offload_type is
650 * removed from the kernel lists and can be freed or reused once this
651 * function returns.
652 *
653 * This call sleeps to guarantee that no CPU is looking at the packet
654 * type after return.
655 */
656void dev_remove_offload(struct packet_offload *po)
657{
658 __dev_remove_offload(po);
659
660 synchronize_net();
661}
662EXPORT_SYMBOL(dev_remove_offload);
663
1da177e4 664/******************************************************************************
eb13da1a 665 *
666 * Device Boot-time Settings Routines
667 *
668 ******************************************************************************/
1da177e4
LT
669
670/* Boot time configuration table */
671static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
672
673/**
674 * netdev_boot_setup_add - add new setup entry
675 * @name: name of the device
676 * @map: configured settings for the device
677 *
678 * Adds new setup entry to the dev_boot_setup list. The function
679 * returns 0 on error and 1 on success. This is a generic routine to
680 * all netdevices.
681 */
682static int netdev_boot_setup_add(char *name, struct ifmap *map)
683{
684 struct netdev_boot_setup *s;
685 int i;
686
687 s = dev_boot_setup;
688 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
689 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
690 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 691 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
692 memcpy(&s[i].map, map, sizeof(s[i].map));
693 break;
694 }
695 }
696
697 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
698}
699
700/**
722c9a0c 701 * netdev_boot_setup_check - check boot time settings
702 * @dev: the netdevice
1da177e4 703 *
722c9a0c 704 * Check boot time settings for the device.
705 * The found settings are set for the device to be used
706 * later in the device probing.
707 * Returns 0 if no settings found, 1 if they are.
1da177e4
LT
708 */
709int netdev_boot_setup_check(struct net_device *dev)
710{
711 struct netdev_boot_setup *s = dev_boot_setup;
712 int i;
713
714 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
715 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 716 !strcmp(dev->name, s[i].name)) {
722c9a0c 717 dev->irq = s[i].map.irq;
718 dev->base_addr = s[i].map.base_addr;
719 dev->mem_start = s[i].map.mem_start;
720 dev->mem_end = s[i].map.mem_end;
1da177e4
LT
721 return 1;
722 }
723 }
724 return 0;
725}
d1b19dff 726EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
727
728
729/**
722c9a0c 730 * netdev_boot_base - get address from boot time settings
731 * @prefix: prefix for network device
732 * @unit: id for network device
733 *
734 * Check boot time settings for the base address of device.
735 * The found settings are set for the device to be used
736 * later in the device probing.
737 * Returns 0 if no settings found.
1da177e4
LT
738 */
739unsigned long netdev_boot_base(const char *prefix, int unit)
740{
741 const struct netdev_boot_setup *s = dev_boot_setup;
742 char name[IFNAMSIZ];
743 int i;
744
745 sprintf(name, "%s%d", prefix, unit);
746
747 /*
748 * If device already registered then return base of 1
749 * to indicate not to probe for this interface
750 */
881d966b 751 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
752 return 1;
753
754 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
755 if (!strcmp(name, s[i].name))
756 return s[i].map.base_addr;
757 return 0;
758}
759
760/*
761 * Saves at boot time configured settings for any netdevice.
762 */
763int __init netdev_boot_setup(char *str)
764{
765 int ints[5];
766 struct ifmap map;
767
768 str = get_options(str, ARRAY_SIZE(ints), ints);
769 if (!str || !*str)
770 return 0;
771
772 /* Save settings */
773 memset(&map, 0, sizeof(map));
774 if (ints[0] > 0)
775 map.irq = ints[1];
776 if (ints[0] > 1)
777 map.base_addr = ints[2];
778 if (ints[0] > 2)
779 map.mem_start = ints[3];
780 if (ints[0] > 3)
781 map.mem_end = ints[4];
782
783 /* Add new entry to the list */
784 return netdev_boot_setup_add(str, &map);
785}
786
787__setup("netdev=", netdev_boot_setup);
788
789/*******************************************************************************
eb13da1a 790 *
791 * Device Interface Subroutines
792 *
793 *******************************************************************************/
1da177e4 794
a54acb3a
ND
795/**
796 * dev_get_iflink - get 'iflink' value of a interface
797 * @dev: targeted interface
798 *
799 * Indicates the ifindex the interface is linked to.
800 * Physical interfaces have the same 'ifindex' and 'iflink' values.
801 */
802
803int dev_get_iflink(const struct net_device *dev)
804{
805 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
806 return dev->netdev_ops->ndo_get_iflink(dev);
807
7a66bbc9 808 return dev->ifindex;
a54acb3a
ND
809}
810EXPORT_SYMBOL(dev_get_iflink);
811
fc4099f1
PS
812/**
813 * dev_fill_metadata_dst - Retrieve tunnel egress information.
814 * @dev: targeted interface
815 * @skb: The packet.
816 *
817 * For better visibility of tunnel traffic OVS needs to retrieve
818 * egress tunnel information for a packet. Following API allows
819 * user to get this info.
820 */
821int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
822{
823 struct ip_tunnel_info *info;
824
825 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
826 return -EINVAL;
827
828 info = skb_tunnel_info_unclone(skb);
829 if (!info)
830 return -ENOMEM;
831 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
832 return -EINVAL;
833
834 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
835}
836EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
837
1da177e4
LT
838/**
839 * __dev_get_by_name - find a device by its name
c4ea43c5 840 * @net: the applicable net namespace
1da177e4
LT
841 * @name: name to find
842 *
843 * Find an interface by name. Must be called under RTNL semaphore
844 * or @dev_base_lock. If the name is found a pointer to the device
845 * is returned. If the name is not found then %NULL is returned. The
846 * reference counters are not incremented so the caller must be
847 * careful with locks.
848 */
849
881d966b 850struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 851{
ff927412 852 struct netdev_name_node *node_name;
1da177e4 853
ff927412
JP
854 node_name = netdev_name_node_lookup(net, name);
855 return node_name ? node_name->dev : NULL;
1da177e4 856}
d1b19dff 857EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 858
72c9528b 859/**
722c9a0c 860 * dev_get_by_name_rcu - find a device by its name
861 * @net: the applicable net namespace
862 * @name: name to find
863 *
864 * Find an interface by name.
865 * If the name is found a pointer to the device is returned.
866 * If the name is not found then %NULL is returned.
867 * The reference counters are not incremented so the caller must be
868 * careful with locks. The caller must hold RCU lock.
72c9528b
ED
869 */
870
871struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
872{
ff927412 873 struct netdev_name_node *node_name;
72c9528b 874
ff927412
JP
875 node_name = netdev_name_node_lookup_rcu(net, name);
876 return node_name ? node_name->dev : NULL;
72c9528b
ED
877}
878EXPORT_SYMBOL(dev_get_by_name_rcu);
879
1da177e4
LT
880/**
881 * dev_get_by_name - find a device by its name
c4ea43c5 882 * @net: the applicable net namespace
1da177e4
LT
883 * @name: name to find
884 *
885 * Find an interface by name. This can be called from any
886 * context and does its own locking. The returned handle has
887 * the usage count incremented and the caller must use dev_put() to
888 * release it when it is no longer needed. %NULL is returned if no
889 * matching device is found.
890 */
891
881d966b 892struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
893{
894 struct net_device *dev;
895
72c9528b
ED
896 rcu_read_lock();
897 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
898 if (dev)
899 dev_hold(dev);
72c9528b 900 rcu_read_unlock();
1da177e4
LT
901 return dev;
902}
d1b19dff 903EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
904
905/**
906 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 907 * @net: the applicable net namespace
1da177e4
LT
908 * @ifindex: index of device
909 *
910 * Search for an interface by index. Returns %NULL if the device
911 * is not found or a pointer to the device. The device has not
912 * had its reference counter increased so the caller must be careful
913 * about locking. The caller must hold either the RTNL semaphore
914 * or @dev_base_lock.
915 */
916
881d966b 917struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 918{
0bd8d536
ED
919 struct net_device *dev;
920 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 921
b67bfe0d 922 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
923 if (dev->ifindex == ifindex)
924 return dev;
0bd8d536 925
1da177e4
LT
926 return NULL;
927}
d1b19dff 928EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 929
fb699dfd
ED
930/**
931 * dev_get_by_index_rcu - find a device by its ifindex
932 * @net: the applicable net namespace
933 * @ifindex: index of device
934 *
935 * Search for an interface by index. Returns %NULL if the device
936 * is not found or a pointer to the device. The device has not
937 * had its reference counter increased so the caller must be careful
938 * about locking. The caller must hold RCU lock.
939 */
940
941struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
942{
fb699dfd
ED
943 struct net_device *dev;
944 struct hlist_head *head = dev_index_hash(net, ifindex);
945
b67bfe0d 946 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
947 if (dev->ifindex == ifindex)
948 return dev;
949
950 return NULL;
951}
952EXPORT_SYMBOL(dev_get_by_index_rcu);
953
1da177e4
LT
954
955/**
956 * dev_get_by_index - find a device by its ifindex
c4ea43c5 957 * @net: the applicable net namespace
1da177e4
LT
958 * @ifindex: index of device
959 *
960 * Search for an interface by index. Returns NULL if the device
961 * is not found or a pointer to the device. The device returned has
962 * had a reference added and the pointer is safe until the user calls
963 * dev_put to indicate they have finished with it.
964 */
965
881d966b 966struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
967{
968 struct net_device *dev;
969
fb699dfd
ED
970 rcu_read_lock();
971 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
972 if (dev)
973 dev_hold(dev);
fb699dfd 974 rcu_read_unlock();
1da177e4
LT
975 return dev;
976}
d1b19dff 977EXPORT_SYMBOL(dev_get_by_index);
1da177e4 978
90b602f8
ML
979/**
980 * dev_get_by_napi_id - find a device by napi_id
981 * @napi_id: ID of the NAPI struct
982 *
983 * Search for an interface by NAPI ID. Returns %NULL if the device
984 * is not found or a pointer to the device. The device has not had
985 * its reference counter increased so the caller must be careful
986 * about locking. The caller must hold RCU lock.
987 */
988
989struct net_device *dev_get_by_napi_id(unsigned int napi_id)
990{
991 struct napi_struct *napi;
992
993 WARN_ON_ONCE(!rcu_read_lock_held());
994
995 if (napi_id < MIN_NAPI_ID)
996 return NULL;
997
998 napi = napi_by_id(napi_id);
999
1000 return napi ? napi->dev : NULL;
1001}
1002EXPORT_SYMBOL(dev_get_by_napi_id);
1003
5dbe7c17
NS
1004/**
1005 * netdev_get_name - get a netdevice name, knowing its ifindex.
1006 * @net: network namespace
1007 * @name: a pointer to the buffer where the name will be stored.
1008 * @ifindex: the ifindex of the interface to get the name from.
1009 *
1010 * The use of raw_seqcount_begin() and cond_resched() before
1011 * retrying is required as we want to give the writers a chance
1012 * to complete when CONFIG_PREEMPT is not set.
1013 */
1014int netdev_get_name(struct net *net, char *name, int ifindex)
1015{
1016 struct net_device *dev;
1017 unsigned int seq;
1018
1019retry:
1020 seq = raw_seqcount_begin(&devnet_rename_seq);
1021 rcu_read_lock();
1022 dev = dev_get_by_index_rcu(net, ifindex);
1023 if (!dev) {
1024 rcu_read_unlock();
1025 return -ENODEV;
1026 }
1027
1028 strcpy(name, dev->name);
1029 rcu_read_unlock();
1030 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
1031 cond_resched();
1032 goto retry;
1033 }
1034
1035 return 0;
1036}
1037
1da177e4 1038/**
941666c2 1039 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 1040 * @net: the applicable net namespace
1da177e4
LT
1041 * @type: media type of device
1042 * @ha: hardware address
1043 *
1044 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
1045 * is not found or a pointer to the device.
1046 * The caller must hold RCU or RTNL.
941666c2 1047 * The returned device has not had its ref count increased
1da177e4
LT
1048 * and the caller must therefore be careful about locking
1049 *
1da177e4
LT
1050 */
1051
941666c2
ED
1052struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1053 const char *ha)
1da177e4
LT
1054{
1055 struct net_device *dev;
1056
941666c2 1057 for_each_netdev_rcu(net, dev)
1da177e4
LT
1058 if (dev->type == type &&
1059 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
1060 return dev;
1061
1062 return NULL;
1da177e4 1063}
941666c2 1064EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 1065
881d966b 1066struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
1067{
1068 struct net_device *dev;
1069
4e9cac2b 1070 ASSERT_RTNL();
881d966b 1071 for_each_netdev(net, dev)
4e9cac2b 1072 if (dev->type == type)
7562f876
PE
1073 return dev;
1074
1075 return NULL;
4e9cac2b 1076}
4e9cac2b
PM
1077EXPORT_SYMBOL(__dev_getfirstbyhwtype);
1078
881d966b 1079struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 1080{
99fe3c39 1081 struct net_device *dev, *ret = NULL;
4e9cac2b 1082
99fe3c39
ED
1083 rcu_read_lock();
1084 for_each_netdev_rcu(net, dev)
1085 if (dev->type == type) {
1086 dev_hold(dev);
1087 ret = dev;
1088 break;
1089 }
1090 rcu_read_unlock();
1091 return ret;
1da177e4 1092}
1da177e4
LT
1093EXPORT_SYMBOL(dev_getfirstbyhwtype);
1094
1095/**
6c555490 1096 * __dev_get_by_flags - find any device with given flags
c4ea43c5 1097 * @net: the applicable net namespace
1da177e4
LT
1098 * @if_flags: IFF_* values
1099 * @mask: bitmask of bits in if_flags to check
1100 *
1101 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 1102 * is not found or a pointer to the device. Must be called inside
6c555490 1103 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
1104 */
1105
6c555490
WC
1106struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1107 unsigned short mask)
1da177e4 1108{
7562f876 1109 struct net_device *dev, *ret;
1da177e4 1110
6c555490
WC
1111 ASSERT_RTNL();
1112
7562f876 1113 ret = NULL;
6c555490 1114 for_each_netdev(net, dev) {
1da177e4 1115 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 1116 ret = dev;
1da177e4
LT
1117 break;
1118 }
1119 }
7562f876 1120 return ret;
1da177e4 1121}
6c555490 1122EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
1123
1124/**
1125 * dev_valid_name - check if name is okay for network device
1126 * @name: name string
1127 *
1128 * Network device names need to be valid file names to
c7fa9d18
DM
1129 * to allow sysfs to work. We also disallow any kind of
1130 * whitespace.
1da177e4 1131 */
95f050bf 1132bool dev_valid_name(const char *name)
1da177e4 1133{
c7fa9d18 1134 if (*name == '\0')
95f050bf 1135 return false;
a9d48205 1136 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
95f050bf 1137 return false;
c7fa9d18 1138 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 1139 return false;
c7fa9d18
DM
1140
1141 while (*name) {
a4176a93 1142 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 1143 return false;
c7fa9d18
DM
1144 name++;
1145 }
95f050bf 1146 return true;
1da177e4 1147}
d1b19dff 1148EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
1149
1150/**
b267b179
EB
1151 * __dev_alloc_name - allocate a name for a device
1152 * @net: network namespace to allocate the device name in
1da177e4 1153 * @name: name format string
b267b179 1154 * @buf: scratch buffer and result name string
1da177e4
LT
1155 *
1156 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
1157 * id. It scans list of devices to build up a free map, then chooses
1158 * the first empty slot. The caller must hold the dev_base or rtnl lock
1159 * while allocating the name and adding the device in order to avoid
1160 * duplicates.
1161 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1162 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
1163 */
1164
b267b179 1165static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
1166{
1167 int i = 0;
1da177e4
LT
1168 const char *p;
1169 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 1170 unsigned long *inuse;
1da177e4
LT
1171 struct net_device *d;
1172
93809105
RV
1173 if (!dev_valid_name(name))
1174 return -EINVAL;
1175
51f299dd 1176 p = strchr(name, '%');
1da177e4
LT
1177 if (p) {
1178 /*
1179 * Verify the string as this thing may have come from
1180 * the user. There must be either one "%d" and no other "%"
1181 * characters.
1182 */
1183 if (p[1] != 'd' || strchr(p + 2, '%'))
1184 return -EINVAL;
1185
1186 /* Use one page as a bit array of possible slots */
cfcabdcc 1187 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1188 if (!inuse)
1189 return -ENOMEM;
1190
881d966b 1191 for_each_netdev(net, d) {
1da177e4
LT
1192 if (!sscanf(d->name, name, &i))
1193 continue;
1194 if (i < 0 || i >= max_netdevices)
1195 continue;
1196
1197 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1198 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1199 if (!strncmp(buf, d->name, IFNAMSIZ))
1200 set_bit(i, inuse);
1201 }
1202
1203 i = find_first_zero_bit(inuse, max_netdevices);
1204 free_page((unsigned long) inuse);
1205 }
1206
6224abda 1207 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1208 if (!__dev_get_by_name(net, buf))
1da177e4 1209 return i;
1da177e4
LT
1210
1211 /* It is possible to run out of possible slots
1212 * when the name is long and there isn't enough space left
1213 * for the digits, or if all bits are used.
1214 */
029b6d14 1215 return -ENFILE;
1da177e4
LT
1216}
1217
2c88b855
RV
1218static int dev_alloc_name_ns(struct net *net,
1219 struct net_device *dev,
1220 const char *name)
1221{
1222 char buf[IFNAMSIZ];
1223 int ret;
1224
c46d7642 1225 BUG_ON(!net);
2c88b855
RV
1226 ret = __dev_alloc_name(net, name, buf);
1227 if (ret >= 0)
1228 strlcpy(dev->name, buf, IFNAMSIZ);
1229 return ret;
1da177e4
LT
1230}
1231
b267b179
EB
1232/**
1233 * dev_alloc_name - allocate a name for a device
1234 * @dev: device
1235 * @name: name format string
1236 *
1237 * Passed a format string - eg "lt%d" it will try and find a suitable
1238 * id. It scans list of devices to build up a free map, then chooses
1239 * the first empty slot. The caller must hold the dev_base or rtnl lock
1240 * while allocating the name and adding the device in order to avoid
1241 * duplicates.
1242 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1243 * Returns the number of the unit assigned or a negative errno code.
1244 */
1245
1246int dev_alloc_name(struct net_device *dev, const char *name)
1247{
c46d7642 1248 return dev_alloc_name_ns(dev_net(dev), dev, name);
b267b179 1249}
d1b19dff 1250EXPORT_SYMBOL(dev_alloc_name);
b267b179 1251
0ad646c8
CW
1252int dev_get_valid_name(struct net *net, struct net_device *dev,
1253 const char *name)
828de4f6 1254{
55a5ec9b
DM
1255 BUG_ON(!net);
1256
1257 if (!dev_valid_name(name))
1258 return -EINVAL;
1259
1260 if (strchr(name, '%'))
1261 return dev_alloc_name_ns(net, dev, name);
1262 else if (__dev_get_by_name(net, name))
1263 return -EEXIST;
1264 else if (dev->name != name)
1265 strlcpy(dev->name, name, IFNAMSIZ);
1266
1267 return 0;
d9031024 1268}
0ad646c8 1269EXPORT_SYMBOL(dev_get_valid_name);
1da177e4
LT
1270
1271/**
1272 * dev_change_name - change name of a device
1273 * @dev: device
1274 * @newname: name (or format string) must be at least IFNAMSIZ
1275 *
1276 * Change name of a device, can pass format strings "eth%d".
1277 * for wildcarding.
1278 */
cf04a4c7 1279int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1280{
238fa362 1281 unsigned char old_assign_type;
fcc5a03a 1282 char oldname[IFNAMSIZ];
1da177e4 1283 int err = 0;
fcc5a03a 1284 int ret;
881d966b 1285 struct net *net;
1da177e4
LT
1286
1287 ASSERT_RTNL();
c346dca1 1288 BUG_ON(!dev_net(dev));
1da177e4 1289
c346dca1 1290 net = dev_net(dev);
8065a779
SWL
1291
1292 /* Some auto-enslaved devices e.g. failover slaves are
1293 * special, as userspace might rename the device after
1294 * the interface had been brought up and running since
1295 * the point kernel initiated auto-enslavement. Allow
1296 * live name change even when these slave devices are
1297 * up and running.
1298 *
1299 * Typically, users of these auto-enslaving devices
1300 * don't actually care about slave name change, as
1301 * they are supposed to operate on master interface
1302 * directly.
1303 */
1304 if (dev->flags & IFF_UP &&
1305 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1da177e4
LT
1306 return -EBUSY;
1307
30e6c9fa 1308 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1309
1310 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1311 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1312 return 0;
c91f6df2 1313 }
c8d90dca 1314
fcc5a03a
HX
1315 memcpy(oldname, dev->name, IFNAMSIZ);
1316
828de4f6 1317 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1318 if (err < 0) {
30e6c9fa 1319 write_seqcount_end(&devnet_rename_seq);
d9031024 1320 return err;
c91f6df2 1321 }
1da177e4 1322
6fe82a39
VF
1323 if (oldname[0] && !strchr(oldname, '%'))
1324 netdev_info(dev, "renamed from %s\n", oldname);
1325
238fa362
TG
1326 old_assign_type = dev->name_assign_type;
1327 dev->name_assign_type = NET_NAME_RENAMED;
1328
fcc5a03a 1329rollback:
a1b3f594
EB
1330 ret = device_rename(&dev->dev, dev->name);
1331 if (ret) {
1332 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1333 dev->name_assign_type = old_assign_type;
30e6c9fa 1334 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1335 return ret;
dcc99773 1336 }
7f988eab 1337
30e6c9fa 1338 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1339
5bb025fa
VF
1340 netdev_adjacent_rename_links(dev, oldname);
1341
7f988eab 1342 write_lock_bh(&dev_base_lock);
ff927412 1343 netdev_name_node_del(dev->name_node);
72c9528b
ED
1344 write_unlock_bh(&dev_base_lock);
1345
1346 synchronize_rcu();
1347
1348 write_lock_bh(&dev_base_lock);
ff927412 1349 netdev_name_node_add(net, dev->name_node);
7f988eab
HX
1350 write_unlock_bh(&dev_base_lock);
1351
056925ab 1352 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1353 ret = notifier_to_errno(ret);
1354
1355 if (ret) {
91e9c07b
ED
1356 /* err >= 0 after dev_alloc_name() or stores the first errno */
1357 if (err >= 0) {
fcc5a03a 1358 err = ret;
30e6c9fa 1359 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1360 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1361 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1362 dev->name_assign_type = old_assign_type;
1363 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1364 goto rollback;
91e9c07b 1365 } else {
7b6cd1ce 1366 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1367 dev->name, ret);
fcc5a03a
HX
1368 }
1369 }
1da177e4
LT
1370
1371 return err;
1372}
1373
0b815a1a
SH
1374/**
1375 * dev_set_alias - change ifalias of a device
1376 * @dev: device
1377 * @alias: name up to IFALIASZ
f0db275a 1378 * @len: limit of bytes to copy from info
0b815a1a
SH
1379 *
1380 * Set ifalias for a device,
1381 */
1382int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1383{
6c557001 1384 struct dev_ifalias *new_alias = NULL;
0b815a1a
SH
1385
1386 if (len >= IFALIASZ)
1387 return -EINVAL;
1388
6c557001
FW
1389 if (len) {
1390 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1391 if (!new_alias)
1392 return -ENOMEM;
1393
1394 memcpy(new_alias->ifalias, alias, len);
1395 new_alias->ifalias[len] = 0;
96ca4a2c
OH
1396 }
1397
6c557001
FW
1398 mutex_lock(&ifalias_mutex);
1399 rcu_swap_protected(dev->ifalias, new_alias,
1400 mutex_is_locked(&ifalias_mutex));
1401 mutex_unlock(&ifalias_mutex);
1402
1403 if (new_alias)
1404 kfree_rcu(new_alias, rcuhead);
0b815a1a 1405
0b815a1a
SH
1406 return len;
1407}
0fe554a4 1408EXPORT_SYMBOL(dev_set_alias);
0b815a1a 1409
6c557001
FW
1410/**
1411 * dev_get_alias - get ifalias of a device
1412 * @dev: device
20e88320 1413 * @name: buffer to store name of ifalias
6c557001
FW
1414 * @len: size of buffer
1415 *
1416 * get ifalias for a device. Caller must make sure dev cannot go
1417 * away, e.g. rcu read lock or own a reference count to device.
1418 */
1419int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1420{
1421 const struct dev_ifalias *alias;
1422 int ret = 0;
1423
1424 rcu_read_lock();
1425 alias = rcu_dereference(dev->ifalias);
1426 if (alias)
1427 ret = snprintf(name, len, "%s", alias->ifalias);
1428 rcu_read_unlock();
1429
1430 return ret;
1431}
0b815a1a 1432
d8a33ac4 1433/**
3041a069 1434 * netdev_features_change - device changes features
d8a33ac4
SH
1435 * @dev: device to cause notification
1436 *
1437 * Called to indicate a device has changed features.
1438 */
1439void netdev_features_change(struct net_device *dev)
1440{
056925ab 1441 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1442}
1443EXPORT_SYMBOL(netdev_features_change);
1444
1da177e4
LT
1445/**
1446 * netdev_state_change - device changes state
1447 * @dev: device to cause notification
1448 *
1449 * Called to indicate a device has changed state. This function calls
1450 * the notifier chains for netdev_chain and sends a NEWLINK message
1451 * to the routing socket.
1452 */
1453void netdev_state_change(struct net_device *dev)
1454{
1455 if (dev->flags & IFF_UP) {
51d0c047
DA
1456 struct netdev_notifier_change_info change_info = {
1457 .info.dev = dev,
1458 };
54951194 1459
51d0c047 1460 call_netdevice_notifiers_info(NETDEV_CHANGE,
54951194 1461 &change_info.info);
7f294054 1462 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1463 }
1464}
d1b19dff 1465EXPORT_SYMBOL(netdev_state_change);
1da177e4 1466
ee89bab1 1467/**
722c9a0c 1468 * netdev_notify_peers - notify network peers about existence of @dev
1469 * @dev: network device
ee89bab1
AW
1470 *
1471 * Generate traffic such that interested network peers are aware of
1472 * @dev, such as by generating a gratuitous ARP. This may be used when
1473 * a device wants to inform the rest of the network about some sort of
1474 * reconfiguration such as a failover event or virtual machine
1475 * migration.
1476 */
1477void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1478{
ee89bab1
AW
1479 rtnl_lock();
1480 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
37c343b4 1481 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
ee89bab1 1482 rtnl_unlock();
c1da4ac7 1483}
ee89bab1 1484EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1485
40c900aa 1486static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1da177e4 1487{
d314774c 1488 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1489 int ret;
1da177e4 1490
e46b66bc
BH
1491 ASSERT_RTNL();
1492
1da177e4
LT
1493 if (!netif_device_present(dev))
1494 return -ENODEV;
1495
ca99ca14
NH
1496 /* Block netpoll from trying to do any rx path servicing.
1497 * If we don't do this there is a chance ndo_poll_controller
1498 * or ndo_poll may be running while we open the device
1499 */
66b5552f 1500 netpoll_poll_disable(dev);
ca99ca14 1501
40c900aa 1502 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
3b8bcfd5
JB
1503 ret = notifier_to_errno(ret);
1504 if (ret)
1505 return ret;
1506
1da177e4 1507 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1508
d314774c
SH
1509 if (ops->ndo_validate_addr)
1510 ret = ops->ndo_validate_addr(dev);
bada339b 1511
d314774c
SH
1512 if (!ret && ops->ndo_open)
1513 ret = ops->ndo_open(dev);
1da177e4 1514
66b5552f 1515 netpoll_poll_enable(dev);
ca99ca14 1516
bada339b
JG
1517 if (ret)
1518 clear_bit(__LINK_STATE_START, &dev->state);
1519 else {
1da177e4 1520 dev->flags |= IFF_UP;
4417da66 1521 dev_set_rx_mode(dev);
1da177e4 1522 dev_activate(dev);
7bf23575 1523 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1524 }
bada339b 1525
1da177e4
LT
1526 return ret;
1527}
1528
1529/**
bd380811 1530 * dev_open - prepare an interface for use.
00f54e68
PM
1531 * @dev: device to open
1532 * @extack: netlink extended ack
1da177e4 1533 *
bd380811
PM
1534 * Takes a device from down to up state. The device's private open
1535 * function is invoked and then the multicast lists are loaded. Finally
1536 * the device is moved into the up state and a %NETDEV_UP message is
1537 * sent to the netdev notifier chain.
1538 *
1539 * Calling this function on an active interface is a nop. On a failure
1540 * a negative errno code is returned.
1da177e4 1541 */
00f54e68 1542int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
bd380811
PM
1543{
1544 int ret;
1545
bd380811
PM
1546 if (dev->flags & IFF_UP)
1547 return 0;
1548
40c900aa 1549 ret = __dev_open(dev, extack);
bd380811
PM
1550 if (ret < 0)
1551 return ret;
1552
7f294054 1553 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1554 call_netdevice_notifiers(NETDEV_UP, dev);
1555
1556 return ret;
1557}
1558EXPORT_SYMBOL(dev_open);
1559
7051b88a 1560static void __dev_close_many(struct list_head *head)
1da177e4 1561{
44345724 1562 struct net_device *dev;
e46b66bc 1563
bd380811 1564 ASSERT_RTNL();
9d5010db
DM
1565 might_sleep();
1566
5cde2829 1567 list_for_each_entry(dev, head, close_list) {
3f4df206 1568 /* Temporarily disable netpoll until the interface is down */
66b5552f 1569 netpoll_poll_disable(dev);
3f4df206 1570
44345724 1571 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1572
44345724 1573 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1574
44345724
OP
1575 /* Synchronize to scheduled poll. We cannot touch poll list, it
1576 * can be even on different cpu. So just clear netif_running().
1577 *
1578 * dev->stop() will invoke napi_disable() on all of it's
1579 * napi_struct instances on this device.
1580 */
4e857c58 1581 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1582 }
1da177e4 1583
44345724 1584 dev_deactivate_many(head);
d8b2a4d2 1585
5cde2829 1586 list_for_each_entry(dev, head, close_list) {
44345724 1587 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1588
44345724
OP
1589 /*
1590 * Call the device specific close. This cannot fail.
1591 * Only if device is UP
1592 *
1593 * We allow it to be called even after a DETACH hot-plug
1594 * event.
1595 */
1596 if (ops->ndo_stop)
1597 ops->ndo_stop(dev);
1598
44345724 1599 dev->flags &= ~IFF_UP;
66b5552f 1600 netpoll_poll_enable(dev);
44345724 1601 }
44345724
OP
1602}
1603
7051b88a 1604static void __dev_close(struct net_device *dev)
44345724
OP
1605{
1606 LIST_HEAD(single);
1607
5cde2829 1608 list_add(&dev->close_list, &single);
7051b88a 1609 __dev_close_many(&single);
f87e6f47 1610 list_del(&single);
44345724
OP
1611}
1612
7051b88a 1613void dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1614{
1615 struct net_device *dev, *tmp;
1da177e4 1616
5cde2829
EB
1617 /* Remove the devices that don't need to be closed */
1618 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1619 if (!(dev->flags & IFF_UP))
5cde2829 1620 list_del_init(&dev->close_list);
44345724
OP
1621
1622 __dev_close_many(head);
1da177e4 1623
5cde2829 1624 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1625 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1626 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1627 if (unlink)
1628 list_del_init(&dev->close_list);
44345724 1629 }
bd380811 1630}
99c4a26a 1631EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1632
1633/**
1634 * dev_close - shutdown an interface.
1635 * @dev: device to shutdown
1636 *
1637 * This function moves an active device into down state. A
1638 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1639 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1640 * chain.
1641 */
7051b88a 1642void dev_close(struct net_device *dev)
bd380811 1643{
e14a5993
ED
1644 if (dev->flags & IFF_UP) {
1645 LIST_HEAD(single);
1da177e4 1646
5cde2829 1647 list_add(&dev->close_list, &single);
99c4a26a 1648 dev_close_many(&single, true);
e14a5993
ED
1649 list_del(&single);
1650 }
1da177e4 1651}
d1b19dff 1652EXPORT_SYMBOL(dev_close);
1da177e4
LT
1653
1654
0187bdfb
BH
1655/**
1656 * dev_disable_lro - disable Large Receive Offload on a device
1657 * @dev: device
1658 *
1659 * Disable Large Receive Offload (LRO) on a net device. Must be
1660 * called under RTNL. This is needed if received packets may be
1661 * forwarded to another interface.
1662 */
1663void dev_disable_lro(struct net_device *dev)
1664{
fbe168ba
MK
1665 struct net_device *lower_dev;
1666 struct list_head *iter;
529d0489 1667
bc5787c6
MM
1668 dev->wanted_features &= ~NETIF_F_LRO;
1669 netdev_update_features(dev);
27660515 1670
22d5969f
MM
1671 if (unlikely(dev->features & NETIF_F_LRO))
1672 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1673
1674 netdev_for_each_lower_dev(dev, lower_dev, iter)
1675 dev_disable_lro(lower_dev);
0187bdfb
BH
1676}
1677EXPORT_SYMBOL(dev_disable_lro);
1678
56f5aa77
MC
1679/**
1680 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1681 * @dev: device
1682 *
1683 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1684 * called under RTNL. This is needed if Generic XDP is installed on
1685 * the device.
1686 */
1687static void dev_disable_gro_hw(struct net_device *dev)
1688{
1689 dev->wanted_features &= ~NETIF_F_GRO_HW;
1690 netdev_update_features(dev);
1691
1692 if (unlikely(dev->features & NETIF_F_GRO_HW))
1693 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1694}
1695
ede2762d
KT
1696const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1697{
1698#define N(val) \
1699 case NETDEV_##val: \
1700 return "NETDEV_" __stringify(val);
1701 switch (cmd) {
1702 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1703 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1704 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1705 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1706 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1707 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1708 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
9daae9bd
GP
1709 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1710 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1570415f 1711 N(PRE_CHANGEADDR)
3f5ecd8a 1712 }
ede2762d
KT
1713#undef N
1714 return "UNKNOWN_NETDEV_EVENT";
1715}
1716EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1717
351638e7
JP
1718static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1719 struct net_device *dev)
1720{
51d0c047
DA
1721 struct netdev_notifier_info info = {
1722 .dev = dev,
1723 };
351638e7 1724
351638e7
JP
1725 return nb->notifier_call(nb, val, &info);
1726}
0187bdfb 1727
881d966b
EB
1728static int dev_boot_phase = 1;
1729
1da177e4 1730/**
722c9a0c 1731 * register_netdevice_notifier - register a network notifier block
1732 * @nb: notifier
1da177e4 1733 *
722c9a0c 1734 * Register a notifier to be called when network device events occur.
1735 * The notifier passed is linked into the kernel structures and must
1736 * not be reused until it has been unregistered. A negative errno code
1737 * is returned on a failure.
1da177e4 1738 *
722c9a0c 1739 * When registered all registration and up events are replayed
1740 * to the new notifier to allow device to have a race free
1741 * view of the network device list.
1da177e4
LT
1742 */
1743
1744int register_netdevice_notifier(struct notifier_block *nb)
1745{
1746 struct net_device *dev;
fcc5a03a 1747 struct net_device *last;
881d966b 1748 struct net *net;
1da177e4
LT
1749 int err;
1750
328fbe74
KT
1751 /* Close race with setup_net() and cleanup_net() */
1752 down_write(&pernet_ops_rwsem);
1da177e4 1753 rtnl_lock();
f07d5b94 1754 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1755 if (err)
1756 goto unlock;
881d966b
EB
1757 if (dev_boot_phase)
1758 goto unlock;
1759 for_each_net(net) {
1760 for_each_netdev(net, dev) {
351638e7 1761 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1762 err = notifier_to_errno(err);
1763 if (err)
1764 goto rollback;
1765
1766 if (!(dev->flags & IFF_UP))
1767 continue;
1da177e4 1768
351638e7 1769 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1770 }
1da177e4 1771 }
fcc5a03a
HX
1772
1773unlock:
1da177e4 1774 rtnl_unlock();
328fbe74 1775 up_write(&pernet_ops_rwsem);
1da177e4 1776 return err;
fcc5a03a
HX
1777
1778rollback:
1779 last = dev;
881d966b
EB
1780 for_each_net(net) {
1781 for_each_netdev(net, dev) {
1782 if (dev == last)
8f891489 1783 goto outroll;
fcc5a03a 1784
881d966b 1785 if (dev->flags & IFF_UP) {
351638e7
JP
1786 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1787 dev);
1788 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1789 }
351638e7 1790 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1791 }
fcc5a03a 1792 }
c67625a1 1793
8f891489 1794outroll:
c67625a1 1795 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1796 goto unlock;
1da177e4 1797}
d1b19dff 1798EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1799
1800/**
722c9a0c 1801 * unregister_netdevice_notifier - unregister a network notifier block
1802 * @nb: notifier
1da177e4 1803 *
722c9a0c 1804 * Unregister a notifier previously registered by
1805 * register_netdevice_notifier(). The notifier is unlinked into the
1806 * kernel structures and may then be reused. A negative errno code
1807 * is returned on a failure.
7d3d43da 1808 *
722c9a0c 1809 * After unregistering unregister and down device events are synthesized
1810 * for all devices on the device list to the removed notifier to remove
1811 * the need for special case cleanup code.
1da177e4
LT
1812 */
1813
1814int unregister_netdevice_notifier(struct notifier_block *nb)
1815{
7d3d43da
EB
1816 struct net_device *dev;
1817 struct net *net;
9f514950
HX
1818 int err;
1819
328fbe74
KT
1820 /* Close race with setup_net() and cleanup_net() */
1821 down_write(&pernet_ops_rwsem);
9f514950 1822 rtnl_lock();
f07d5b94 1823 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1824 if (err)
1825 goto unlock;
1826
1827 for_each_net(net) {
1828 for_each_netdev(net, dev) {
1829 if (dev->flags & IFF_UP) {
351638e7
JP
1830 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1831 dev);
1832 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1833 }
351638e7 1834 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1835 }
1836 }
1837unlock:
9f514950 1838 rtnl_unlock();
328fbe74 1839 up_write(&pernet_ops_rwsem);
9f514950 1840 return err;
1da177e4 1841}
d1b19dff 1842EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1843
351638e7
JP
1844/**
1845 * call_netdevice_notifiers_info - call all network notifier blocks
1846 * @val: value passed unmodified to notifier function
351638e7
JP
1847 * @info: notifier information data
1848 *
1849 * Call all network notifier blocks. Parameters and return value
1850 * are as for raw_notifier_call_chain().
1851 */
1852
1d143d9f 1853static int call_netdevice_notifiers_info(unsigned long val,
1d143d9f 1854 struct netdev_notifier_info *info)
351638e7
JP
1855{
1856 ASSERT_RTNL();
351638e7
JP
1857 return raw_notifier_call_chain(&netdev_chain, val, info);
1858}
351638e7 1859
26372605
PM
1860static int call_netdevice_notifiers_extack(unsigned long val,
1861 struct net_device *dev,
1862 struct netlink_ext_ack *extack)
1863{
1864 struct netdev_notifier_info info = {
1865 .dev = dev,
1866 .extack = extack,
1867 };
1868
1869 return call_netdevice_notifiers_info(val, &info);
1870}
1871
1da177e4
LT
1872/**
1873 * call_netdevice_notifiers - call all network notifier blocks
1874 * @val: value passed unmodified to notifier function
c4ea43c5 1875 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1876 *
1877 * Call all network notifier blocks. Parameters and return value
f07d5b94 1878 * are as for raw_notifier_call_chain().
1da177e4
LT
1879 */
1880
ad7379d4 1881int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1882{
26372605 1883 return call_netdevice_notifiers_extack(val, dev, NULL);
1da177e4 1884}
edf947f1 1885EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1886
af7d6cce
SD
1887/**
1888 * call_netdevice_notifiers_mtu - call all network notifier blocks
1889 * @val: value passed unmodified to notifier function
1890 * @dev: net_device pointer passed unmodified to notifier function
1891 * @arg: additional u32 argument passed to the notifier function
1892 *
1893 * Call all network notifier blocks. Parameters and return value
1894 * are as for raw_notifier_call_chain().
1895 */
1896static int call_netdevice_notifiers_mtu(unsigned long val,
1897 struct net_device *dev, u32 arg)
1898{
1899 struct netdev_notifier_info_ext info = {
1900 .info.dev = dev,
1901 .ext.mtu = arg,
1902 };
1903
1904 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
1905
1906 return call_netdevice_notifiers_info(val, &info.info);
1907}
1908
1cf51900 1909#ifdef CONFIG_NET_INGRESS
aabf6772 1910static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
4577139b
DB
1911
1912void net_inc_ingress_queue(void)
1913{
aabf6772 1914 static_branch_inc(&ingress_needed_key);
4577139b
DB
1915}
1916EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1917
1918void net_dec_ingress_queue(void)
1919{
aabf6772 1920 static_branch_dec(&ingress_needed_key);
4577139b
DB
1921}
1922EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1923#endif
1924
1f211a1b 1925#ifdef CONFIG_NET_EGRESS
aabf6772 1926static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1f211a1b
DB
1927
1928void net_inc_egress_queue(void)
1929{
aabf6772 1930 static_branch_inc(&egress_needed_key);
1f211a1b
DB
1931}
1932EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1933
1934void net_dec_egress_queue(void)
1935{
aabf6772 1936 static_branch_dec(&egress_needed_key);
1f211a1b
DB
1937}
1938EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1939#endif
1940
39e83922 1941static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
e9666d10 1942#ifdef CONFIG_JUMP_LABEL
b90e5794 1943static atomic_t netstamp_needed_deferred;
13baa00a 1944static atomic_t netstamp_wanted;
5fa8bbda 1945static void netstamp_clear(struct work_struct *work)
1da177e4 1946{
b90e5794 1947 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
13baa00a 1948 int wanted;
b90e5794 1949
13baa00a
ED
1950 wanted = atomic_add_return(deferred, &netstamp_wanted);
1951 if (wanted > 0)
39e83922 1952 static_branch_enable(&netstamp_needed_key);
13baa00a 1953 else
39e83922 1954 static_branch_disable(&netstamp_needed_key);
5fa8bbda
ED
1955}
1956static DECLARE_WORK(netstamp_work, netstamp_clear);
b90e5794 1957#endif
5fa8bbda
ED
1958
1959void net_enable_timestamp(void)
1960{
e9666d10 1961#ifdef CONFIG_JUMP_LABEL
13baa00a
ED
1962 int wanted;
1963
1964 while (1) {
1965 wanted = atomic_read(&netstamp_wanted);
1966 if (wanted <= 0)
1967 break;
1968 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1969 return;
1970 }
1971 atomic_inc(&netstamp_needed_deferred);
1972 schedule_work(&netstamp_work);
1973#else
39e83922 1974 static_branch_inc(&netstamp_needed_key);
13baa00a 1975#endif
1da177e4 1976}
d1b19dff 1977EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1978
1979void net_disable_timestamp(void)
1980{
e9666d10 1981#ifdef CONFIG_JUMP_LABEL
13baa00a
ED
1982 int wanted;
1983
1984 while (1) {
1985 wanted = atomic_read(&netstamp_wanted);
1986 if (wanted <= 1)
1987 break;
1988 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1989 return;
1990 }
1991 atomic_dec(&netstamp_needed_deferred);
5fa8bbda
ED
1992 schedule_work(&netstamp_work);
1993#else
39e83922 1994 static_branch_dec(&netstamp_needed_key);
5fa8bbda 1995#endif
1da177e4 1996}
d1b19dff 1997EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1998
3b098e2d 1999static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 2000{
2456e855 2001 skb->tstamp = 0;
39e83922 2002 if (static_branch_unlikely(&netstamp_needed_key))
a61bbcf2 2003 __net_timestamp(skb);
1da177e4
LT
2004}
2005
39e83922
DB
2006#define net_timestamp_check(COND, SKB) \
2007 if (static_branch_unlikely(&netstamp_needed_key)) { \
2008 if ((COND) && !(SKB)->tstamp) \
2009 __net_timestamp(SKB); \
2010 } \
3b098e2d 2011
f4b05d27 2012bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
79b569f0
DL
2013{
2014 unsigned int len;
2015
2016 if (!(dev->flags & IFF_UP))
2017 return false;
2018
2019 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
2020 if (skb->len <= len)
2021 return true;
2022
2023 /* if TSO is enabled, we don't care about the length as the packet
2024 * could be forwarded without being segmented before
2025 */
2026 if (skb_is_gso(skb))
2027 return true;
2028
2029 return false;
2030}
1ee481fb 2031EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 2032
a0265d28
HX
2033int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2034{
4e3264d2 2035 int ret = ____dev_forward_skb(dev, skb);
a0265d28 2036
4e3264d2
MKL
2037 if (likely(!ret)) {
2038 skb->protocol = eth_type_trans(skb, dev);
2039 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2040 }
a0265d28 2041
4e3264d2 2042 return ret;
a0265d28
HX
2043}
2044EXPORT_SYMBOL_GPL(__dev_forward_skb);
2045
44540960
AB
2046/**
2047 * dev_forward_skb - loopback an skb to another netif
2048 *
2049 * @dev: destination network device
2050 * @skb: buffer to forward
2051 *
2052 * return values:
2053 * NET_RX_SUCCESS (no congestion)
6ec82562 2054 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
2055 *
2056 * dev_forward_skb can be used for injecting an skb from the
2057 * start_xmit function of one device into the receive queue
2058 * of another device.
2059 *
2060 * The receiving device may be in another namespace, so
2061 * we have to clear all information in the skb that could
2062 * impact namespace isolation.
2063 */
2064int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2065{
a0265d28 2066 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
2067}
2068EXPORT_SYMBOL_GPL(dev_forward_skb);
2069
71d9dec2
CG
2070static inline int deliver_skb(struct sk_buff *skb,
2071 struct packet_type *pt_prev,
2072 struct net_device *orig_dev)
2073{
1f8b977a 2074 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1080e512 2075 return -ENOMEM;
63354797 2076 refcount_inc(&skb->users);
71d9dec2
CG
2077 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2078}
2079
7866a621
SN
2080static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2081 struct packet_type **pt,
fbcb2170
JP
2082 struct net_device *orig_dev,
2083 __be16 type,
7866a621
SN
2084 struct list_head *ptype_list)
2085{
2086 struct packet_type *ptype, *pt_prev = *pt;
2087
2088 list_for_each_entry_rcu(ptype, ptype_list, list) {
2089 if (ptype->type != type)
2090 continue;
2091 if (pt_prev)
fbcb2170 2092 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
2093 pt_prev = ptype;
2094 }
2095 *pt = pt_prev;
2096}
2097
c0de08d0
EL
2098static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2099{
a3d744e9 2100 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
2101 return false;
2102
2103 if (ptype->id_match)
2104 return ptype->id_match(ptype, skb->sk);
2105 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2106 return true;
2107
2108 return false;
2109}
2110
9f9a742d
MR
2111/**
2112 * dev_nit_active - return true if any network interface taps are in use
2113 *
2114 * @dev: network device to check for the presence of taps
2115 */
2116bool dev_nit_active(struct net_device *dev)
2117{
2118 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2119}
2120EXPORT_SYMBOL_GPL(dev_nit_active);
2121
1da177e4
LT
2122/*
2123 * Support routine. Sends outgoing frames to any network
2124 * taps currently in use.
2125 */
2126
74b20582 2127void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
2128{
2129 struct packet_type *ptype;
71d9dec2
CG
2130 struct sk_buff *skb2 = NULL;
2131 struct packet_type *pt_prev = NULL;
7866a621 2132 struct list_head *ptype_list = &ptype_all;
a61bbcf2 2133
1da177e4 2134 rcu_read_lock();
7866a621
SN
2135again:
2136 list_for_each_entry_rcu(ptype, ptype_list, list) {
fa788d98
VW
2137 if (ptype->ignore_outgoing)
2138 continue;
2139
1da177e4
LT
2140 /* Never send packets back to the socket
2141 * they originated from - MvS (miquels@drinkel.ow.org)
2142 */
7866a621
SN
2143 if (skb_loop_sk(ptype, skb))
2144 continue;
71d9dec2 2145
7866a621
SN
2146 if (pt_prev) {
2147 deliver_skb(skb2, pt_prev, skb->dev);
2148 pt_prev = ptype;
2149 continue;
2150 }
1da177e4 2151
7866a621
SN
2152 /* need to clone skb, done only once */
2153 skb2 = skb_clone(skb, GFP_ATOMIC);
2154 if (!skb2)
2155 goto out_unlock;
70978182 2156
7866a621 2157 net_timestamp_set(skb2);
1da177e4 2158
7866a621
SN
2159 /* skb->nh should be correctly
2160 * set by sender, so that the second statement is
2161 * just protection against buggy protocols.
2162 */
2163 skb_reset_mac_header(skb2);
2164
2165 if (skb_network_header(skb2) < skb2->data ||
2166 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2167 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2168 ntohs(skb2->protocol),
2169 dev->name);
2170 skb_reset_network_header(skb2);
1da177e4 2171 }
7866a621
SN
2172
2173 skb2->transport_header = skb2->network_header;
2174 skb2->pkt_type = PACKET_OUTGOING;
2175 pt_prev = ptype;
2176 }
2177
2178 if (ptype_list == &ptype_all) {
2179 ptype_list = &dev->ptype_all;
2180 goto again;
1da177e4 2181 }
7866a621 2182out_unlock:
581fe0ea
WB
2183 if (pt_prev) {
2184 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2185 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2186 else
2187 kfree_skb(skb2);
2188 }
1da177e4
LT
2189 rcu_read_unlock();
2190}
74b20582 2191EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
1da177e4 2192
2c53040f
BH
2193/**
2194 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
2195 * @dev: Network device
2196 * @txq: number of queues available
2197 *
2198 * If real_num_tx_queues is changed the tc mappings may no longer be
2199 * valid. To resolve this verify the tc mapping remains valid and if
2200 * not NULL the mapping. With no priorities mapping to this
2201 * offset/count pair it will no longer be used. In the worst case TC0
2202 * is invalid nothing can be done so disable priority mappings. If is
2203 * expected that drivers will fix this mapping if they can before
2204 * calling netif_set_real_num_tx_queues.
2205 */
bb134d22 2206static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
2207{
2208 int i;
2209 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2210
2211 /* If TC0 is invalidated disable TC mapping */
2212 if (tc->offset + tc->count > txq) {
7b6cd1ce 2213 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
2214 dev->num_tc = 0;
2215 return;
2216 }
2217
2218 /* Invalidated prio to tc mappings set to TC0 */
2219 for (i = 1; i < TC_BITMASK + 1; i++) {
2220 int q = netdev_get_prio_tc_map(dev, i);
2221
2222 tc = &dev->tc_to_txq[q];
2223 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
2224 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2225 i, q);
4f57c087
JF
2226 netdev_set_prio_tc_map(dev, i, 0);
2227 }
2228 }
2229}
2230
8d059b0f
AD
2231int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2232{
2233 if (dev->num_tc) {
2234 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2235 int i;
2236
ffcfe25b 2237 /* walk through the TCs and see if it falls into any of them */
8d059b0f
AD
2238 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2239 if ((txq - tc->offset) < tc->count)
2240 return i;
2241 }
2242
ffcfe25b 2243 /* didn't find it, just return -1 to indicate no match */
8d059b0f
AD
2244 return -1;
2245 }
2246
2247 return 0;
2248}
8a5f2166 2249EXPORT_SYMBOL(netdev_txq_to_tc);
8d059b0f 2250
537c00de 2251#ifdef CONFIG_XPS
04157469
AN
2252struct static_key xps_needed __read_mostly;
2253EXPORT_SYMBOL(xps_needed);
2254struct static_key xps_rxqs_needed __read_mostly;
2255EXPORT_SYMBOL(xps_rxqs_needed);
537c00de
AD
2256static DEFINE_MUTEX(xps_map_mutex);
2257#define xmap_dereference(P) \
2258 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2259
6234f874
AD
2260static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2261 int tci, u16 index)
537c00de 2262{
10cdc3f3
AD
2263 struct xps_map *map = NULL;
2264 int pos;
537c00de 2265
10cdc3f3 2266 if (dev_maps)
80d19669 2267 map = xmap_dereference(dev_maps->attr_map[tci]);
6234f874
AD
2268 if (!map)
2269 return false;
537c00de 2270
6234f874
AD
2271 for (pos = map->len; pos--;) {
2272 if (map->queues[pos] != index)
2273 continue;
2274
2275 if (map->len > 1) {
2276 map->queues[pos] = map->queues[--map->len];
10cdc3f3 2277 break;
537c00de 2278 }
6234f874 2279
80d19669 2280 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
6234f874
AD
2281 kfree_rcu(map, rcu);
2282 return false;
537c00de
AD
2283 }
2284
6234f874 2285 return true;
10cdc3f3
AD
2286}
2287
6234f874
AD
2288static bool remove_xps_queue_cpu(struct net_device *dev,
2289 struct xps_dev_maps *dev_maps,
2290 int cpu, u16 offset, u16 count)
2291{
184c449f
AD
2292 int num_tc = dev->num_tc ? : 1;
2293 bool active = false;
2294 int tci;
6234f874 2295
184c449f
AD
2296 for (tci = cpu * num_tc; num_tc--; tci++) {
2297 int i, j;
2298
2299 for (i = count, j = offset; i--; j++) {
6358d49a 2300 if (!remove_xps_queue(dev_maps, tci, j))
184c449f
AD
2301 break;
2302 }
2303
2304 active |= i < 0;
6234f874
AD
2305 }
2306
184c449f 2307 return active;
6234f874
AD
2308}
2309
867d0ad4
SD
2310static void reset_xps_maps(struct net_device *dev,
2311 struct xps_dev_maps *dev_maps,
2312 bool is_rxqs_map)
2313{
2314 if (is_rxqs_map) {
2315 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2316 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2317 } else {
2318 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2319 }
2320 static_key_slow_dec_cpuslocked(&xps_needed);
2321 kfree_rcu(dev_maps, rcu);
2322}
2323
80d19669
AN
2324static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2325 struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2326 u16 offset, u16 count, bool is_rxqs_map)
2327{
2328 bool active = false;
2329 int i, j;
2330
2331 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2332 j < nr_ids;)
2333 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2334 count);
867d0ad4
SD
2335 if (!active)
2336 reset_xps_maps(dev, dev_maps, is_rxqs_map);
80d19669 2337
f28c020f
SD
2338 if (!is_rxqs_map) {
2339 for (i = offset + (count - 1); count--; i--) {
2340 netdev_queue_numa_node_write(
2341 netdev_get_tx_queue(dev, i),
2342 NUMA_NO_NODE);
80d19669 2343 }
80d19669
AN
2344 }
2345}
2346
6234f874
AD
2347static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2348 u16 count)
10cdc3f3 2349{
80d19669 2350 const unsigned long *possible_mask = NULL;
10cdc3f3 2351 struct xps_dev_maps *dev_maps;
80d19669 2352 unsigned int nr_ids;
10cdc3f3 2353
04157469
AN
2354 if (!static_key_false(&xps_needed))
2355 return;
10cdc3f3 2356
4d99f660 2357 cpus_read_lock();
04157469 2358 mutex_lock(&xps_map_mutex);
10cdc3f3 2359
04157469
AN
2360 if (static_key_false(&xps_rxqs_needed)) {
2361 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2362 if (dev_maps) {
2363 nr_ids = dev->num_rx_queues;
2364 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2365 offset, count, true);
2366 }
537c00de
AD
2367 }
2368
80d19669
AN
2369 dev_maps = xmap_dereference(dev->xps_cpus_map);
2370 if (!dev_maps)
2371 goto out_no_maps;
2372
2373 if (num_possible_cpus() > 1)
2374 possible_mask = cpumask_bits(cpu_possible_mask);
2375 nr_ids = nr_cpu_ids;
2376 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2377 false);
024e9679 2378
537c00de
AD
2379out_no_maps:
2380 mutex_unlock(&xps_map_mutex);
4d99f660 2381 cpus_read_unlock();
537c00de
AD
2382}
2383
6234f874
AD
2384static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2385{
2386 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2387}
2388
80d19669
AN
2389static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2390 u16 index, bool is_rxqs_map)
01c5f864
AD
2391{
2392 struct xps_map *new_map;
2393 int alloc_len = XPS_MIN_MAP_ALLOC;
2394 int i, pos;
2395
2396 for (pos = 0; map && pos < map->len; pos++) {
2397 if (map->queues[pos] != index)
2398 continue;
2399 return map;
2400 }
2401
80d19669 2402 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
01c5f864
AD
2403 if (map) {
2404 if (pos < map->alloc_len)
2405 return map;
2406
2407 alloc_len = map->alloc_len * 2;
2408 }
2409
80d19669
AN
2410 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2411 * map
2412 */
2413 if (is_rxqs_map)
2414 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2415 else
2416 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2417 cpu_to_node(attr_index));
01c5f864
AD
2418 if (!new_map)
2419 return NULL;
2420
2421 for (i = 0; i < pos; i++)
2422 new_map->queues[i] = map->queues[i];
2423 new_map->alloc_len = alloc_len;
2424 new_map->len = pos;
2425
2426 return new_map;
2427}
2428
4d99f660 2429/* Must be called under cpus_read_lock */
80d19669
AN
2430int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2431 u16 index, bool is_rxqs_map)
537c00de 2432{
80d19669 2433 const unsigned long *online_mask = NULL, *possible_mask = NULL;
01c5f864 2434 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
80d19669 2435 int i, j, tci, numa_node_id = -2;
184c449f 2436 int maps_sz, num_tc = 1, tc = 0;
537c00de 2437 struct xps_map *map, *new_map;
01c5f864 2438 bool active = false;
80d19669 2439 unsigned int nr_ids;
537c00de 2440
184c449f 2441 if (dev->num_tc) {
ffcfe25b 2442 /* Do not allow XPS on subordinate device directly */
184c449f 2443 num_tc = dev->num_tc;
ffcfe25b
AD
2444 if (num_tc < 0)
2445 return -EINVAL;
2446
2447 /* If queue belongs to subordinate dev use its map */
2448 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2449
184c449f
AD
2450 tc = netdev_txq_to_tc(dev, index);
2451 if (tc < 0)
2452 return -EINVAL;
2453 }
2454
537c00de 2455 mutex_lock(&xps_map_mutex);
80d19669
AN
2456 if (is_rxqs_map) {
2457 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2458 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2459 nr_ids = dev->num_rx_queues;
2460 } else {
2461 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2462 if (num_possible_cpus() > 1) {
2463 online_mask = cpumask_bits(cpu_online_mask);
2464 possible_mask = cpumask_bits(cpu_possible_mask);
2465 }
2466 dev_maps = xmap_dereference(dev->xps_cpus_map);
2467 nr_ids = nr_cpu_ids;
2468 }
537c00de 2469
80d19669
AN
2470 if (maps_sz < L1_CACHE_BYTES)
2471 maps_sz = L1_CACHE_BYTES;
537c00de 2472
01c5f864 2473 /* allocate memory for queue storage */
80d19669
AN
2474 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2475 j < nr_ids;) {
01c5f864
AD
2476 if (!new_dev_maps)
2477 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2478 if (!new_dev_maps) {
2479 mutex_unlock(&xps_map_mutex);
01c5f864 2480 return -ENOMEM;
2bb60cb9 2481 }
01c5f864 2482
80d19669
AN
2483 tci = j * num_tc + tc;
2484 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
01c5f864
AD
2485 NULL;
2486
80d19669 2487 map = expand_xps_map(map, j, index, is_rxqs_map);
01c5f864
AD
2488 if (!map)
2489 goto error;
2490
80d19669 2491 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
01c5f864
AD
2492 }
2493
2494 if (!new_dev_maps)
2495 goto out_no_new_maps;
2496
867d0ad4
SD
2497 if (!dev_maps) {
2498 /* Increment static keys at most once per type */
2499 static_key_slow_inc_cpuslocked(&xps_needed);
2500 if (is_rxqs_map)
2501 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2502 }
04157469 2503
80d19669
AN
2504 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2505 j < nr_ids;) {
184c449f 2506 /* copy maps belonging to foreign traffic classes */
80d19669 2507 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
184c449f 2508 /* fill in the new device map from the old device map */
80d19669
AN
2509 map = xmap_dereference(dev_maps->attr_map[tci]);
2510 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
184c449f
AD
2511 }
2512
2513 /* We need to explicitly update tci as prevous loop
2514 * could break out early if dev_maps is NULL.
2515 */
80d19669 2516 tci = j * num_tc + tc;
184c449f 2517
80d19669
AN
2518 if (netif_attr_test_mask(j, mask, nr_ids) &&
2519 netif_attr_test_online(j, online_mask, nr_ids)) {
2520 /* add tx-queue to CPU/rx-queue maps */
01c5f864
AD
2521 int pos = 0;
2522
80d19669 2523 map = xmap_dereference(new_dev_maps->attr_map[tci]);
01c5f864
AD
2524 while ((pos < map->len) && (map->queues[pos] != index))
2525 pos++;
2526
2527 if (pos == map->len)
2528 map->queues[map->len++] = index;
537c00de 2529#ifdef CONFIG_NUMA
80d19669
AN
2530 if (!is_rxqs_map) {
2531 if (numa_node_id == -2)
2532 numa_node_id = cpu_to_node(j);
2533 else if (numa_node_id != cpu_to_node(j))
2534 numa_node_id = -1;
2535 }
537c00de 2536#endif
01c5f864
AD
2537 } else if (dev_maps) {
2538 /* fill in the new device map from the old device map */
80d19669
AN
2539 map = xmap_dereference(dev_maps->attr_map[tci]);
2540 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
537c00de 2541 }
01c5f864 2542
184c449f
AD
2543 /* copy maps belonging to foreign traffic classes */
2544 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2545 /* fill in the new device map from the old device map */
80d19669
AN
2546 map = xmap_dereference(dev_maps->attr_map[tci]);
2547 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
184c449f 2548 }
537c00de
AD
2549 }
2550
80d19669
AN
2551 if (is_rxqs_map)
2552 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2553 else
2554 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
01c5f864 2555
537c00de 2556 /* Cleanup old maps */
184c449f
AD
2557 if (!dev_maps)
2558 goto out_no_old_maps;
2559
80d19669
AN
2560 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2561 j < nr_ids;) {
2562 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2563 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2564 map = xmap_dereference(dev_maps->attr_map[tci]);
01c5f864
AD
2565 if (map && map != new_map)
2566 kfree_rcu(map, rcu);
2567 }
537c00de
AD
2568 }
2569
184c449f
AD
2570 kfree_rcu(dev_maps, rcu);
2571
2572out_no_old_maps:
01c5f864
AD
2573 dev_maps = new_dev_maps;
2574 active = true;
537c00de 2575
01c5f864 2576out_no_new_maps:
80d19669
AN
2577 if (!is_rxqs_map) {
2578 /* update Tx queue numa node */
2579 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2580 (numa_node_id >= 0) ?
2581 numa_node_id : NUMA_NO_NODE);
2582 }
537c00de 2583
01c5f864
AD
2584 if (!dev_maps)
2585 goto out_no_maps;
2586
80d19669
AN
2587 /* removes tx-queue from unused CPUs/rx-queues */
2588 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2589 j < nr_ids;) {
2590 for (i = tc, tci = j * num_tc; i--; tci++)
184c449f 2591 active |= remove_xps_queue(dev_maps, tci, index);
80d19669
AN
2592 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2593 !netif_attr_test_online(j, online_mask, nr_ids))
184c449f
AD
2594 active |= remove_xps_queue(dev_maps, tci, index);
2595 for (i = num_tc - tc, tci++; --i; tci++)
2596 active |= remove_xps_queue(dev_maps, tci, index);
01c5f864
AD
2597 }
2598
2599 /* free map if not active */
867d0ad4
SD
2600 if (!active)
2601 reset_xps_maps(dev, dev_maps, is_rxqs_map);
01c5f864
AD
2602
2603out_no_maps:
537c00de
AD
2604 mutex_unlock(&xps_map_mutex);
2605
2606 return 0;
2607error:
01c5f864 2608 /* remove any maps that we added */
80d19669
AN
2609 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2610 j < nr_ids;) {
2611 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2612 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
184c449f 2613 map = dev_maps ?
80d19669 2614 xmap_dereference(dev_maps->attr_map[tci]) :
184c449f
AD
2615 NULL;
2616 if (new_map && new_map != map)
2617 kfree(new_map);
2618 }
01c5f864
AD
2619 }
2620
537c00de
AD
2621 mutex_unlock(&xps_map_mutex);
2622
537c00de
AD
2623 kfree(new_dev_maps);
2624 return -ENOMEM;
2625}
4d99f660 2626EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
80d19669
AN
2627
2628int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2629 u16 index)
2630{
4d99f660
AV
2631 int ret;
2632
2633 cpus_read_lock();
2634 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2635 cpus_read_unlock();
2636
2637 return ret;
80d19669 2638}
537c00de
AD
2639EXPORT_SYMBOL(netif_set_xps_queue);
2640
2641#endif
ffcfe25b
AD
2642static void netdev_unbind_all_sb_channels(struct net_device *dev)
2643{
2644 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2645
2646 /* Unbind any subordinate channels */
2647 while (txq-- != &dev->_tx[0]) {
2648 if (txq->sb_dev)
2649 netdev_unbind_sb_channel(dev, txq->sb_dev);
2650 }
2651}
2652
9cf1f6a8
AD
2653void netdev_reset_tc(struct net_device *dev)
2654{
6234f874
AD
2655#ifdef CONFIG_XPS
2656 netif_reset_xps_queues_gt(dev, 0);
2657#endif
ffcfe25b
AD
2658 netdev_unbind_all_sb_channels(dev);
2659
2660 /* Reset TC configuration of device */
9cf1f6a8
AD
2661 dev->num_tc = 0;
2662 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2663 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2664}
2665EXPORT_SYMBOL(netdev_reset_tc);
2666
2667int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2668{
2669 if (tc >= dev->num_tc)
2670 return -EINVAL;
2671
6234f874
AD
2672#ifdef CONFIG_XPS
2673 netif_reset_xps_queues(dev, offset, count);
2674#endif
9cf1f6a8
AD
2675 dev->tc_to_txq[tc].count = count;
2676 dev->tc_to_txq[tc].offset = offset;
2677 return 0;
2678}
2679EXPORT_SYMBOL(netdev_set_tc_queue);
2680
2681int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2682{
2683 if (num_tc > TC_MAX_QUEUE)
2684 return -EINVAL;
2685
6234f874
AD
2686#ifdef CONFIG_XPS
2687 netif_reset_xps_queues_gt(dev, 0);
2688#endif
ffcfe25b
AD
2689 netdev_unbind_all_sb_channels(dev);
2690
9cf1f6a8
AD
2691 dev->num_tc = num_tc;
2692 return 0;
2693}
2694EXPORT_SYMBOL(netdev_set_num_tc);
2695
ffcfe25b
AD
2696void netdev_unbind_sb_channel(struct net_device *dev,
2697 struct net_device *sb_dev)
2698{
2699 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2700
2701#ifdef CONFIG_XPS
2702 netif_reset_xps_queues_gt(sb_dev, 0);
2703#endif
2704 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2705 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2706
2707 while (txq-- != &dev->_tx[0]) {
2708 if (txq->sb_dev == sb_dev)
2709 txq->sb_dev = NULL;
2710 }
2711}
2712EXPORT_SYMBOL(netdev_unbind_sb_channel);
2713
2714int netdev_bind_sb_channel_queue(struct net_device *dev,
2715 struct net_device *sb_dev,
2716 u8 tc, u16 count, u16 offset)
2717{
2718 /* Make certain the sb_dev and dev are already configured */
2719 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2720 return -EINVAL;
2721
2722 /* We cannot hand out queues we don't have */
2723 if ((offset + count) > dev->real_num_tx_queues)
2724 return -EINVAL;
2725
2726 /* Record the mapping */
2727 sb_dev->tc_to_txq[tc].count = count;
2728 sb_dev->tc_to_txq[tc].offset = offset;
2729
2730 /* Provide a way for Tx queue to find the tc_to_txq map or
2731 * XPS map for itself.
2732 */
2733 while (count--)
2734 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2735
2736 return 0;
2737}
2738EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2739
2740int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2741{
2742 /* Do not use a multiqueue device to represent a subordinate channel */
2743 if (netif_is_multiqueue(dev))
2744 return -ENODEV;
2745
2746 /* We allow channels 1 - 32767 to be used for subordinate channels.
2747 * Channel 0 is meant to be "native" mode and used only to represent
2748 * the main root device. We allow writing 0 to reset the device back
2749 * to normal mode after being used as a subordinate channel.
2750 */
2751 if (channel > S16_MAX)
2752 return -EINVAL;
2753
2754 dev->num_tc = -channel;
2755
2756 return 0;
2757}
2758EXPORT_SYMBOL(netdev_set_sb_channel);
2759
f0796d5c
JF
2760/*
2761 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
3a053b1a 2762 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
f0796d5c 2763 */
e6484930 2764int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2765{
ac5b7019 2766 bool disabling;
1d24eb48
TH
2767 int rc;
2768
ac5b7019
JK
2769 disabling = txq < dev->real_num_tx_queues;
2770
e6484930
TH
2771 if (txq < 1 || txq > dev->num_tx_queues)
2772 return -EINVAL;
f0796d5c 2773
5c56580b
BH
2774 if (dev->reg_state == NETREG_REGISTERED ||
2775 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2776 ASSERT_RTNL();
2777
1d24eb48
TH
2778 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2779 txq);
bf264145
TH
2780 if (rc)
2781 return rc;
2782
4f57c087
JF
2783 if (dev->num_tc)
2784 netif_setup_tc(dev, txq);
2785
ac5b7019
JK
2786 dev->real_num_tx_queues = txq;
2787
2788 if (disabling) {
2789 synchronize_net();
e6484930 2790 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2791#ifdef CONFIG_XPS
2792 netif_reset_xps_queues_gt(dev, txq);
2793#endif
2794 }
ac5b7019
JK
2795 } else {
2796 dev->real_num_tx_queues = txq;
f0796d5c 2797 }
e6484930 2798
e6484930 2799 return 0;
f0796d5c
JF
2800}
2801EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2802
a953be53 2803#ifdef CONFIG_SYSFS
62fe0b40
BH
2804/**
2805 * netif_set_real_num_rx_queues - set actual number of RX queues used
2806 * @dev: Network device
2807 * @rxq: Actual number of RX queues
2808 *
2809 * This must be called either with the rtnl_lock held or before
2810 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2811 * negative error code. If called before registration, it always
2812 * succeeds.
62fe0b40
BH
2813 */
2814int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2815{
2816 int rc;
2817
bd25fa7b
TH
2818 if (rxq < 1 || rxq > dev->num_rx_queues)
2819 return -EINVAL;
2820
62fe0b40
BH
2821 if (dev->reg_state == NETREG_REGISTERED) {
2822 ASSERT_RTNL();
2823
62fe0b40
BH
2824 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2825 rxq);
2826 if (rc)
2827 return rc;
62fe0b40
BH
2828 }
2829
2830 dev->real_num_rx_queues = rxq;
2831 return 0;
2832}
2833EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2834#endif
2835
2c53040f
BH
2836/**
2837 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2838 *
2839 * This routine should set an upper limit on the number of RSS queues
2840 * used by default by multiqueue devices.
2841 */
a55b138b 2842int netif_get_num_default_rss_queues(void)
16917b87 2843{
40e4e713
HS
2844 return is_kdump_kernel() ?
2845 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
16917b87
YM
2846}
2847EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2848
3bcb846c 2849static void __netif_reschedule(struct Qdisc *q)
56079431 2850{
def82a1d
JP
2851 struct softnet_data *sd;
2852 unsigned long flags;
56079431 2853
def82a1d 2854 local_irq_save(flags);
903ceff7 2855 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2856 q->next_sched = NULL;
2857 *sd->output_queue_tailp = q;
2858 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2859 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2860 local_irq_restore(flags);
2861}
2862
2863void __netif_schedule(struct Qdisc *q)
2864{
2865 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2866 __netif_reschedule(q);
56079431
DV
2867}
2868EXPORT_SYMBOL(__netif_schedule);
2869
e6247027
ED
2870struct dev_kfree_skb_cb {
2871 enum skb_free_reason reason;
2872};
2873
2874static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2875{
e6247027
ED
2876 return (struct dev_kfree_skb_cb *)skb->cb;
2877}
2878
46e5da40
JF
2879void netif_schedule_queue(struct netdev_queue *txq)
2880{
2881 rcu_read_lock();
5be5515a 2882 if (!netif_xmit_stopped(txq)) {
46e5da40
JF
2883 struct Qdisc *q = rcu_dereference(txq->qdisc);
2884
2885 __netif_schedule(q);
2886 }
2887 rcu_read_unlock();
2888}
2889EXPORT_SYMBOL(netif_schedule_queue);
2890
46e5da40
JF
2891void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2892{
2893 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2894 struct Qdisc *q;
2895
2896 rcu_read_lock();
2897 q = rcu_dereference(dev_queue->qdisc);
2898 __netif_schedule(q);
2899 rcu_read_unlock();
2900 }
2901}
2902EXPORT_SYMBOL(netif_tx_wake_queue);
2903
e6247027 2904void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2905{
e6247027 2906 unsigned long flags;
56079431 2907
9899886d
MJ
2908 if (unlikely(!skb))
2909 return;
2910
63354797 2911 if (likely(refcount_read(&skb->users) == 1)) {
e6247027 2912 smp_rmb();
63354797
RE
2913 refcount_set(&skb->users, 0);
2914 } else if (likely(!refcount_dec_and_test(&skb->users))) {
e6247027 2915 return;
bea3348e 2916 }
e6247027
ED
2917 get_kfree_skb_cb(skb)->reason = reason;
2918 local_irq_save(flags);
2919 skb->next = __this_cpu_read(softnet_data.completion_queue);
2920 __this_cpu_write(softnet_data.completion_queue, skb);
2921 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2922 local_irq_restore(flags);
56079431 2923}
e6247027 2924EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2925
e6247027 2926void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2927{
2928 if (in_irq() || irqs_disabled())
e6247027 2929 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2930 else
2931 dev_kfree_skb(skb);
2932}
e6247027 2933EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2934
2935
bea3348e
SH
2936/**
2937 * netif_device_detach - mark device as removed
2938 * @dev: network device
2939 *
2940 * Mark device as removed from system and therefore no longer available.
2941 */
56079431
DV
2942void netif_device_detach(struct net_device *dev)
2943{
2944 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2945 netif_running(dev)) {
d543103a 2946 netif_tx_stop_all_queues(dev);
56079431
DV
2947 }
2948}
2949EXPORT_SYMBOL(netif_device_detach);
2950
bea3348e
SH
2951/**
2952 * netif_device_attach - mark device as attached
2953 * @dev: network device
2954 *
2955 * Mark device as attached from system and restart if needed.
2956 */
56079431
DV
2957void netif_device_attach(struct net_device *dev)
2958{
2959 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2960 netif_running(dev)) {
d543103a 2961 netif_tx_wake_all_queues(dev);
4ec93edb 2962 __netdev_watchdog_up(dev);
56079431
DV
2963 }
2964}
2965EXPORT_SYMBOL(netif_device_attach);
2966
5605c762
JP
2967/*
2968 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2969 * to be used as a distribution range.
2970 */
eadec877
AD
2971static u16 skb_tx_hash(const struct net_device *dev,
2972 const struct net_device *sb_dev,
2973 struct sk_buff *skb)
5605c762
JP
2974{
2975 u32 hash;
2976 u16 qoffset = 0;
1b837d48 2977 u16 qcount = dev->real_num_tx_queues;
5605c762 2978
eadec877
AD
2979 if (dev->num_tc) {
2980 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2981
2982 qoffset = sb_dev->tc_to_txq[tc].offset;
2983 qcount = sb_dev->tc_to_txq[tc].count;
2984 }
2985
5605c762
JP
2986 if (skb_rx_queue_recorded(skb)) {
2987 hash = skb_get_rx_queue(skb);
1b837d48
AD
2988 while (unlikely(hash >= qcount))
2989 hash -= qcount;
eadec877 2990 return hash + qoffset;
5605c762
JP
2991 }
2992
2993 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2994}
5605c762 2995
36c92474
BH
2996static void skb_warn_bad_offload(const struct sk_buff *skb)
2997{
84d15ae5 2998 static const netdev_features_t null_features;
36c92474 2999 struct net_device *dev = skb->dev;
88ad4175 3000 const char *name = "";
36c92474 3001
c846ad9b
BG
3002 if (!net_ratelimit())
3003 return;
3004
88ad4175
BM
3005 if (dev) {
3006 if (dev->dev.parent)
3007 name = dev_driver_string(dev->dev.parent);
3008 else
3009 name = netdev_name(dev);
3010 }
6413139d
WB
3011 skb_dump(KERN_WARNING, skb, false);
3012 WARN(1, "%s: caps=(%pNF, %pNF)\n",
88ad4175 3013 name, dev ? &dev->features : &null_features,
6413139d 3014 skb->sk ? &skb->sk->sk_route_caps : &null_features);
36c92474
BH
3015}
3016
1da177e4
LT
3017/*
3018 * Invalidate hardware checksum when packet is to be mangled, and
3019 * complete checksum manually on outgoing path.
3020 */
84fa7933 3021int skb_checksum_help(struct sk_buff *skb)
1da177e4 3022{
d3bc23e7 3023 __wsum csum;
663ead3b 3024 int ret = 0, offset;
1da177e4 3025
84fa7933 3026 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
3027 goto out_set_summed;
3028
3029 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
3030 skb_warn_bad_offload(skb);
3031 return -EINVAL;
1da177e4
LT
3032 }
3033
cef401de
ED
3034 /* Before computing a checksum, we should make sure no frag could
3035 * be modified by an external entity : checksum could be wrong.
3036 */
3037 if (skb_has_shared_frag(skb)) {
3038 ret = __skb_linearize(skb);
3039 if (ret)
3040 goto out;
3041 }
3042
55508d60 3043 offset = skb_checksum_start_offset(skb);
a030847e
HX
3044 BUG_ON(offset >= skb_headlen(skb));
3045 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3046
3047 offset += skb->csum_offset;
3048 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3049
3050 if (skb_cloned(skb) &&
3051 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
3052 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3053 if (ret)
3054 goto out;
3055 }
3056
4f2e4ad5 3057 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
a430a43d 3058out_set_summed:
1da177e4 3059 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 3060out:
1da177e4
LT
3061 return ret;
3062}
d1b19dff 3063EXPORT_SYMBOL(skb_checksum_help);
1da177e4 3064
b72b5bf6
DC
3065int skb_crc32c_csum_help(struct sk_buff *skb)
3066{
3067 __le32 crc32c_csum;
3068 int ret = 0, offset, start;
3069
3070 if (skb->ip_summed != CHECKSUM_PARTIAL)
3071 goto out;
3072
3073 if (unlikely(skb_is_gso(skb)))
3074 goto out;
3075
3076 /* Before computing a checksum, we should make sure no frag could
3077 * be modified by an external entity : checksum could be wrong.
3078 */
3079 if (unlikely(skb_has_shared_frag(skb))) {
3080 ret = __skb_linearize(skb);
3081 if (ret)
3082 goto out;
3083 }
3084 start = skb_checksum_start_offset(skb);
3085 offset = start + offsetof(struct sctphdr, checksum);
3086 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3087 ret = -EINVAL;
3088 goto out;
3089 }
3090 if (skb_cloned(skb) &&
3091 !skb_clone_writable(skb, offset + sizeof(__le32))) {
3092 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3093 if (ret)
3094 goto out;
3095 }
3096 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3097 skb->len - start, ~(__u32)0,
3098 crc32c_csum_stub));
3099 *(__le32 *)(skb->data + offset) = crc32c_csum;
3100 skb->ip_summed = CHECKSUM_NONE;
dba00306 3101 skb->csum_not_inet = 0;
b72b5bf6
DC
3102out:
3103 return ret;
3104}
3105
53d6471c 3106__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 3107{
252e3346 3108 __be16 type = skb->protocol;
f6a78bfc 3109
19acc327
PS
3110 /* Tunnel gso handlers can set protocol to ethernet. */
3111 if (type == htons(ETH_P_TEB)) {
3112 struct ethhdr *eth;
3113
3114 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3115 return 0;
3116
1dfe82eb 3117 eth = (struct ethhdr *)skb->data;
19acc327
PS
3118 type = eth->h_proto;
3119 }
3120
d4bcef3f 3121 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
3122}
3123
3124/**
3125 * skb_mac_gso_segment - mac layer segmentation handler.
3126 * @skb: buffer to segment
3127 * @features: features for the output path (see dev->features)
3128 */
3129struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3130 netdev_features_t features)
3131{
3132 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3133 struct packet_offload *ptype;
53d6471c
VY
3134 int vlan_depth = skb->mac_len;
3135 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
3136
3137 if (unlikely(!type))
3138 return ERR_PTR(-EINVAL);
3139
53d6471c 3140 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
3141
3142 rcu_read_lock();
22061d80 3143 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 3144 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 3145 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
3146 break;
3147 }
3148 }
3149 rcu_read_unlock();
3150
98e399f8 3151 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 3152
f6a78bfc
HX
3153 return segs;
3154}
05e8ef4a
PS
3155EXPORT_SYMBOL(skb_mac_gso_segment);
3156
3157
3158/* openvswitch calls this on rx path, so we need a different check.
3159 */
3160static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3161{
3162 if (tx_path)
0c19f846
WB
3163 return skb->ip_summed != CHECKSUM_PARTIAL &&
3164 skb->ip_summed != CHECKSUM_UNNECESSARY;
6e7bc478
ED
3165
3166 return skb->ip_summed == CHECKSUM_NONE;
05e8ef4a
PS
3167}
3168
3169/**
3170 * __skb_gso_segment - Perform segmentation on skb.
3171 * @skb: buffer to segment
3172 * @features: features for the output path (see dev->features)
3173 * @tx_path: whether it is called in TX path
3174 *
3175 * This function segments the given skb and returns a list of segments.
3176 *
3177 * It may return NULL if the skb requires no segmentation. This is
3178 * only possible when GSO is used for verifying header integrity.
9207f9d4
KK
3179 *
3180 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
05e8ef4a
PS
3181 */
3182struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3183 netdev_features_t features, bool tx_path)
3184{
b2504a5d
ED
3185 struct sk_buff *segs;
3186
05e8ef4a
PS
3187 if (unlikely(skb_needs_check(skb, tx_path))) {
3188 int err;
3189
b2504a5d 3190 /* We're going to init ->check field in TCP or UDP header */
a40e0a66 3191 err = skb_cow_head(skb, 0);
3192 if (err < 0)
05e8ef4a
PS
3193 return ERR_PTR(err);
3194 }
3195
802ab55a
AD
3196 /* Only report GSO partial support if it will enable us to
3197 * support segmentation on this frame without needing additional
3198 * work.
3199 */
3200 if (features & NETIF_F_GSO_PARTIAL) {
3201 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3202 struct net_device *dev = skb->dev;
3203
3204 partial_features |= dev->features & dev->gso_partial_features;
3205 if (!skb_gso_ok(skb, features | partial_features))
3206 features &= ~NETIF_F_GSO_PARTIAL;
3207 }
3208
9207f9d4
KK
3209 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
3210 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3211
68c33163 3212 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
3213 SKB_GSO_CB(skb)->encap_level = 0;
3214
05e8ef4a
PS
3215 skb_reset_mac_header(skb);
3216 skb_reset_mac_len(skb);
3217
b2504a5d
ED
3218 segs = skb_mac_gso_segment(skb, features);
3219
8d74e9f8 3220 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
b2504a5d
ED
3221 skb_warn_bad_offload(skb);
3222
3223 return segs;
05e8ef4a 3224}
12b0004d 3225EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 3226
fb286bb2
HX
3227/* Take action when hardware reception checksum errors are detected. */
3228#ifdef CONFIG_BUG
7fe50ac8 3229void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
fb286bb2
HX
3230{
3231 if (net_ratelimit()) {
7b6cd1ce 3232 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
6413139d 3233 skb_dump(KERN_ERR, skb, true);
fb286bb2
HX
3234 dump_stack();
3235 }
3236}
3237EXPORT_SYMBOL(netdev_rx_csum_fault);
3238#endif
3239
ab74cfeb 3240/* XXX: check that highmem exists at all on the given machine. */
c1e756bf 3241static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 3242{
3d3a8533 3243#ifdef CONFIG_HIGHMEM
1da177e4 3244 int i;
f4563a75 3245
5acbbd42 3246 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
3247 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3248 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
f4563a75 3249
ea2ab693 3250 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 3251 return 1;
ea2ab693 3252 }
5acbbd42 3253 }
3d3a8533 3254#endif
1da177e4
LT
3255 return 0;
3256}
1da177e4 3257
3b392ddb
SH
3258/* If MPLS offload request, verify we are testing hardware MPLS features
3259 * instead of standard features for the netdev.
3260 */
d0edc7bf 3261#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
3262static netdev_features_t net_mpls_features(struct sk_buff *skb,
3263 netdev_features_t features,
3264 __be16 type)
3265{
25cd9ba0 3266 if (eth_p_mpls(type))
3b392ddb
SH
3267 features &= skb->dev->mpls_features;
3268
3269 return features;
3270}
3271#else
3272static netdev_features_t net_mpls_features(struct sk_buff *skb,
3273 netdev_features_t features,
3274 __be16 type)
3275{
3276 return features;
3277}
3278#endif
3279
c8f44aff 3280static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 3281 netdev_features_t features)
f01a5236 3282{
53d6471c 3283 int tmp;
3b392ddb
SH
3284 __be16 type;
3285
3286 type = skb_network_protocol(skb, &tmp);
3287 features = net_mpls_features(skb, features, type);
53d6471c 3288
c0d680e5 3289 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 3290 !can_checksum_protocol(features, type)) {
996e8021 3291 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
f01a5236 3292 }
7be2c82c
ED
3293 if (illegal_highdma(skb->dev, skb))
3294 features &= ~NETIF_F_SG;
f01a5236
JG
3295
3296 return features;
3297}
3298
e38f3025
TM
3299netdev_features_t passthru_features_check(struct sk_buff *skb,
3300 struct net_device *dev,
3301 netdev_features_t features)
3302{
3303 return features;
3304}
3305EXPORT_SYMBOL(passthru_features_check);
3306
7ce23672 3307static netdev_features_t dflt_features_check(struct sk_buff *skb,
8cb65d00
TM
3308 struct net_device *dev,
3309 netdev_features_t features)
3310{
3311 return vlan_features_check(skb, features);
3312}
3313
cbc53e08
AD
3314static netdev_features_t gso_features_check(const struct sk_buff *skb,
3315 struct net_device *dev,
3316 netdev_features_t features)
3317{
3318 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3319
3320 if (gso_segs > dev->gso_max_segs)
3321 return features & ~NETIF_F_GSO_MASK;
3322
802ab55a
AD
3323 /* Support for GSO partial features requires software
3324 * intervention before we can actually process the packets
3325 * so we need to strip support for any partial features now
3326 * and we can pull them back in after we have partially
3327 * segmented the frame.
3328 */
3329 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3330 features &= ~dev->gso_partial_features;
3331
3332 /* Make sure to clear the IPv4 ID mangling feature if the
3333 * IPv4 header has the potential to be fragmented.
cbc53e08
AD
3334 */
3335 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3336 struct iphdr *iph = skb->encapsulation ?
3337 inner_ip_hdr(skb) : ip_hdr(skb);
3338
3339 if (!(iph->frag_off & htons(IP_DF)))
3340 features &= ~NETIF_F_TSO_MANGLEID;
3341 }
3342
3343 return features;
3344}
3345
c1e756bf 3346netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 3347{
5f35227e 3348 struct net_device *dev = skb->dev;
fcbeb976 3349 netdev_features_t features = dev->features;
58e998c6 3350
cbc53e08
AD
3351 if (skb_is_gso(skb))
3352 features = gso_features_check(skb, dev, features);
30b678d8 3353
5f35227e
JG
3354 /* If encapsulation offload request, verify we are testing
3355 * hardware encapsulation features instead of standard
3356 * features for the netdev
3357 */
3358 if (skb->encapsulation)
3359 features &= dev->hw_enc_features;
3360
f5a7fb88
TM
3361 if (skb_vlan_tagged(skb))
3362 features = netdev_intersect_features(features,
3363 dev->vlan_features |
3364 NETIF_F_HW_VLAN_CTAG_TX |
3365 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 3366
5f35227e
JG
3367 if (dev->netdev_ops->ndo_features_check)
3368 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3369 features);
8cb65d00
TM
3370 else
3371 features &= dflt_features_check(skb, dev, features);
5f35227e 3372
c1e756bf 3373 return harmonize_features(skb, features);
58e998c6 3374}
c1e756bf 3375EXPORT_SYMBOL(netif_skb_features);
58e998c6 3376
2ea25513 3377static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 3378 struct netdev_queue *txq, bool more)
f6a78bfc 3379{
2ea25513
DM
3380 unsigned int len;
3381 int rc;
00829823 3382
9f9a742d 3383 if (dev_nit_active(dev))
2ea25513 3384 dev_queue_xmit_nit(skb, dev);
fc741216 3385
2ea25513
DM
3386 len = skb->len;
3387 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 3388 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 3389 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 3390
2ea25513
DM
3391 return rc;
3392}
7b9c6090 3393
8dcda22a
DM
3394struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3395 struct netdev_queue *txq, int *ret)
7f2e870f
DM
3396{
3397 struct sk_buff *skb = first;
3398 int rc = NETDEV_TX_OK;
7b9c6090 3399
7f2e870f
DM
3400 while (skb) {
3401 struct sk_buff *next = skb->next;
fc70fb64 3402
a8305bff 3403 skb_mark_not_on_list(skb);
95f6b3dd 3404 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
3405 if (unlikely(!dev_xmit_complete(rc))) {
3406 skb->next = next;
3407 goto out;
3408 }
6afff0ca 3409
7f2e870f 3410 skb = next;
fe60faa5 3411 if (netif_tx_queue_stopped(txq) && skb) {
7f2e870f
DM
3412 rc = NETDEV_TX_BUSY;
3413 break;
9ccb8975 3414 }
7f2e870f 3415 }
9ccb8975 3416
7f2e870f
DM
3417out:
3418 *ret = rc;
3419 return skb;
3420}
b40863c6 3421
1ff0dc94
ED
3422static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3423 netdev_features_t features)
f6a78bfc 3424{
df8a39de 3425 if (skb_vlan_tag_present(skb) &&
5968250c
JP
3426 !vlan_hw_offload_capable(features, skb->vlan_proto))
3427 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
3428 return skb;
3429}
f6a78bfc 3430
43c26a1a
DC
3431int skb_csum_hwoffload_help(struct sk_buff *skb,
3432 const netdev_features_t features)
3433{
3434 if (unlikely(skb->csum_not_inet))
3435 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3436 skb_crc32c_csum_help(skb);
3437
3438 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3439}
3440EXPORT_SYMBOL(skb_csum_hwoffload_help);
3441
f53c7239 3442static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
eae3f88e
DM
3443{
3444 netdev_features_t features;
f6a78bfc 3445
eae3f88e
DM
3446 features = netif_skb_features(skb);
3447 skb = validate_xmit_vlan(skb, features);
3448 if (unlikely(!skb))
3449 goto out_null;
7b9c6090 3450
ebf4e808
IL
3451 skb = sk_validate_xmit_skb(skb, dev);
3452 if (unlikely(!skb))
3453 goto out_null;
3454
8b86a61d 3455 if (netif_needs_gso(skb, features)) {
ce93718f
DM
3456 struct sk_buff *segs;
3457
3458 segs = skb_gso_segment(skb, features);
cecda693 3459 if (IS_ERR(segs)) {
af6dabc9 3460 goto out_kfree_skb;
cecda693
JW
3461 } else if (segs) {
3462 consume_skb(skb);
3463 skb = segs;
f6a78bfc 3464 }
eae3f88e
DM
3465 } else {
3466 if (skb_needs_linearize(skb, features) &&
3467 __skb_linearize(skb))
3468 goto out_kfree_skb;
4ec93edb 3469
eae3f88e
DM
3470 /* If packet is not checksummed and device does not
3471 * support checksumming for this protocol, complete
3472 * checksumming here.
3473 */
3474 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3475 if (skb->encapsulation)
3476 skb_set_inner_transport_header(skb,
3477 skb_checksum_start_offset(skb));
3478 else
3479 skb_set_transport_header(skb,
3480 skb_checksum_start_offset(skb));
43c26a1a 3481 if (skb_csum_hwoffload_help(skb, features))
eae3f88e 3482 goto out_kfree_skb;
7b9c6090 3483 }
0c772159 3484 }
7b9c6090 3485
f53c7239 3486 skb = validate_xmit_xfrm(skb, features, again);
3dca3f38 3487
eae3f88e 3488 return skb;
fc70fb64 3489
f6a78bfc
HX
3490out_kfree_skb:
3491 kfree_skb(skb);
eae3f88e 3492out_null:
d21fd63e 3493 atomic_long_inc(&dev->tx_dropped);
eae3f88e
DM
3494 return NULL;
3495}
6afff0ca 3496
f53c7239 3497struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
55a93b3e
ED
3498{
3499 struct sk_buff *next, *head = NULL, *tail;
3500
bec3cfdc 3501 for (; skb != NULL; skb = next) {
55a93b3e 3502 next = skb->next;
a8305bff 3503 skb_mark_not_on_list(skb);
bec3cfdc
ED
3504
3505 /* in case skb wont be segmented, point to itself */
3506 skb->prev = skb;
3507
f53c7239 3508 skb = validate_xmit_skb(skb, dev, again);
bec3cfdc
ED
3509 if (!skb)
3510 continue;
55a93b3e 3511
bec3cfdc
ED
3512 if (!head)
3513 head = skb;
3514 else
3515 tail->next = skb;
3516 /* If skb was segmented, skb->prev points to
3517 * the last segment. If not, it still contains skb.
3518 */
3519 tail = skb->prev;
55a93b3e
ED
3520 }
3521 return head;
f6a78bfc 3522}
104ba78c 3523EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
f6a78bfc 3524
1def9238
ED
3525static void qdisc_pkt_len_init(struct sk_buff *skb)
3526{
3527 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3528
3529 qdisc_skb_cb(skb)->pkt_len = skb->len;
3530
3531 /* To get more precise estimation of bytes sent on wire,
3532 * we add to pkt_len the headers size of all segments
3533 */
a0dce875 3534 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
757b8b1d 3535 unsigned int hdr_len;
15e5a030 3536 u16 gso_segs = shinfo->gso_segs;
1def9238 3537
757b8b1d
ED
3538 /* mac layer + network layer */
3539 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3540
3541 /* + transport layer */
7c68d1a6
ED
3542 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3543 const struct tcphdr *th;
3544 struct tcphdr _tcphdr;
3545
3546 th = skb_header_pointer(skb, skb_transport_offset(skb),
3547 sizeof(_tcphdr), &_tcphdr);
3548 if (likely(th))
3549 hdr_len += __tcp_hdrlen(th);
3550 } else {
3551 struct udphdr _udphdr;
3552
3553 if (skb_header_pointer(skb, skb_transport_offset(skb),
3554 sizeof(_udphdr), &_udphdr))
3555 hdr_len += sizeof(struct udphdr);
3556 }
15e5a030
JW
3557
3558 if (shinfo->gso_type & SKB_GSO_DODGY)
3559 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3560 shinfo->gso_size);
3561
3562 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
3563 }
3564}
3565
bbd8a0d3
KK
3566static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3567 struct net_device *dev,
3568 struct netdev_queue *txq)
3569{
3570 spinlock_t *root_lock = qdisc_lock(q);
520ac30f 3571 struct sk_buff *to_free = NULL;
a2da570d 3572 bool contended;
bbd8a0d3
KK
3573 int rc;
3574
a2da570d 3575 qdisc_calculate_pkt_len(skb, q);
6b3ba914
JF
3576
3577 if (q->flags & TCQ_F_NOLOCK) {
d518d2ed
PA
3578 if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
3579 qdisc_run_begin(q)) {
3580 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
3581 &q->state))) {
3582 __qdisc_drop(skb, &to_free);
3583 rc = NET_XMIT_DROP;
3584 goto end_run;
3585 }
ba27b4cd
PA
3586 qdisc_bstats_cpu_update(q, skb);
3587
d518d2ed 3588 rc = NET_XMIT_SUCCESS;
ba27b4cd
PA
3589 if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
3590 __qdisc_run(q);
3591
d518d2ed 3592end_run:
ba27b4cd 3593 qdisc_run_end(q);
6b3ba914
JF
3594 } else {
3595 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
32f7b44d 3596 qdisc_run(q);
6b3ba914
JF
3597 }
3598
3599 if (unlikely(to_free))
3600 kfree_skb_list(to_free);
3601 return rc;
3602 }
3603
79640a4c
ED
3604 /*
3605 * Heuristic to force contended enqueues to serialize on a
3606 * separate lock before trying to get qdisc main lock.
f9eb8aea 3607 * This permits qdisc->running owner to get the lock more
9bf2b8c2 3608 * often and dequeue packets faster.
79640a4c 3609 */
a2da570d 3610 contended = qdisc_is_running(q);
79640a4c
ED
3611 if (unlikely(contended))
3612 spin_lock(&q->busylock);
3613
bbd8a0d3
KK
3614 spin_lock(root_lock);
3615 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
520ac30f 3616 __qdisc_drop(skb, &to_free);
bbd8a0d3
KK
3617 rc = NET_XMIT_DROP;
3618 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 3619 qdisc_run_begin(q)) {
bbd8a0d3
KK
3620 /*
3621 * This is a work-conserving queue; there are no old skbs
3622 * waiting to be sent out; and the qdisc is not running -
3623 * xmit the skb directly.
3624 */
bfe0d029 3625
bfe0d029
ED
3626 qdisc_bstats_update(q, skb);
3627
55a93b3e 3628 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
3629 if (unlikely(contended)) {
3630 spin_unlock(&q->busylock);
3631 contended = false;
3632 }
bbd8a0d3 3633 __qdisc_run(q);
6c148184 3634 }
bbd8a0d3 3635
6c148184 3636 qdisc_run_end(q);
bbd8a0d3
KK
3637 rc = NET_XMIT_SUCCESS;
3638 } else {
520ac30f 3639 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
79640a4c
ED
3640 if (qdisc_run_begin(q)) {
3641 if (unlikely(contended)) {
3642 spin_unlock(&q->busylock);
3643 contended = false;
3644 }
3645 __qdisc_run(q);
6c148184 3646 qdisc_run_end(q);
79640a4c 3647 }
bbd8a0d3
KK
3648 }
3649 spin_unlock(root_lock);
520ac30f
ED
3650 if (unlikely(to_free))
3651 kfree_skb_list(to_free);
79640a4c
ED
3652 if (unlikely(contended))
3653 spin_unlock(&q->busylock);
bbd8a0d3
KK
3654 return rc;
3655}
3656
86f8515f 3657#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
3658static void skb_update_prio(struct sk_buff *skb)
3659{
4dcb31d4
ED
3660 const struct netprio_map *map;
3661 const struct sock *sk;
3662 unsigned int prioidx;
5bc1421e 3663
4dcb31d4
ED
3664 if (skb->priority)
3665 return;
3666 map = rcu_dereference_bh(skb->dev->priomap);
3667 if (!map)
3668 return;
3669 sk = skb_to_full_sk(skb);
3670 if (!sk)
3671 return;
91c68ce2 3672
4dcb31d4
ED
3673 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3674
3675 if (prioidx < map->priomap_len)
3676 skb->priority = map->priomap[prioidx];
5bc1421e
NH
3677}
3678#else
3679#define skb_update_prio(skb)
3680#endif
3681
95603e22
MM
3682/**
3683 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
3684 * @net: network namespace this loopback is happening in
3685 * @sk: sk needed to be a netfilter okfn
95603e22
MM
3686 * @skb: buffer to transmit
3687 */
0c4b51f0 3688int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
3689{
3690 skb_reset_mac_header(skb);
3691 __skb_pull(skb, skb_network_offset(skb));
3692 skb->pkt_type = PACKET_LOOPBACK;
3693 skb->ip_summed = CHECKSUM_UNNECESSARY;
3694 WARN_ON(!skb_dst(skb));
3695 skb_dst_force(skb);
3696 netif_rx_ni(skb);
3697 return 0;
3698}
3699EXPORT_SYMBOL(dev_loopback_xmit);
3700
1f211a1b
DB
3701#ifdef CONFIG_NET_EGRESS
3702static struct sk_buff *
3703sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3704{
46209401 3705 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
1f211a1b
DB
3706 struct tcf_result cl_res;
3707
46209401 3708 if (!miniq)
1f211a1b
DB
3709 return skb;
3710
8dc07fdb 3711 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
46209401 3712 mini_qdisc_bstats_cpu_update(miniq, skb);
1f211a1b 3713
46209401 3714 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
1f211a1b
DB
3715 case TC_ACT_OK:
3716 case TC_ACT_RECLASSIFY:
3717 skb->tc_index = TC_H_MIN(cl_res.classid);
3718 break;
3719 case TC_ACT_SHOT:
46209401 3720 mini_qdisc_qstats_cpu_drop(miniq);
1f211a1b 3721 *ret = NET_XMIT_DROP;
7e2c3aea
DB
3722 kfree_skb(skb);
3723 return NULL;
1f211a1b
DB
3724 case TC_ACT_STOLEN:
3725 case TC_ACT_QUEUED:
e25ea21f 3726 case TC_ACT_TRAP:
1f211a1b 3727 *ret = NET_XMIT_SUCCESS;
7e2c3aea 3728 consume_skb(skb);
1f211a1b
DB
3729 return NULL;
3730 case TC_ACT_REDIRECT:
3731 /* No need to push/pop skb's mac_header here on egress! */
3732 skb_do_redirect(skb);
3733 *ret = NET_XMIT_SUCCESS;
3734 return NULL;
3735 default:
3736 break;
3737 }
3738
3739 return skb;
3740}
3741#endif /* CONFIG_NET_EGRESS */
3742
fc9bab24
AN
3743#ifdef CONFIG_XPS
3744static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3745 struct xps_dev_maps *dev_maps, unsigned int tci)
3746{
3747 struct xps_map *map;
3748 int queue_index = -1;
3749
3750 if (dev->num_tc) {
3751 tci *= dev->num_tc;
3752 tci += netdev_get_prio_tc_map(dev, skb->priority);
3753 }
3754
3755 map = rcu_dereference(dev_maps->attr_map[tci]);
3756 if (map) {
3757 if (map->len == 1)
3758 queue_index = map->queues[0];
3759 else
3760 queue_index = map->queues[reciprocal_scale(
3761 skb_get_hash(skb), map->len)];
3762 if (unlikely(queue_index >= dev->real_num_tx_queues))
3763 queue_index = -1;
3764 }
3765 return queue_index;
3766}
3767#endif
3768
eadec877
AD
3769static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3770 struct sk_buff *skb)
638b2a69
JP
3771{
3772#ifdef CONFIG_XPS
3773 struct xps_dev_maps *dev_maps;
fc9bab24 3774 struct sock *sk = skb->sk;
638b2a69
JP
3775 int queue_index = -1;
3776
04157469
AN
3777 if (!static_key_false(&xps_needed))
3778 return -1;
3779
638b2a69 3780 rcu_read_lock();
fc9bab24
AN
3781 if (!static_key_false(&xps_rxqs_needed))
3782 goto get_cpus_map;
3783
eadec877 3784 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
638b2a69 3785 if (dev_maps) {
fc9bab24 3786 int tci = sk_rx_queue_get(sk);
184c449f 3787
fc9bab24
AN
3788 if (tci >= 0 && tci < dev->num_rx_queues)
3789 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3790 tci);
3791 }
184c449f 3792
fc9bab24
AN
3793get_cpus_map:
3794 if (queue_index < 0) {
eadec877 3795 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
fc9bab24
AN
3796 if (dev_maps) {
3797 unsigned int tci = skb->sender_cpu - 1;
3798
3799 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3800 tci);
638b2a69
JP
3801 }
3802 }
3803 rcu_read_unlock();
3804
3805 return queue_index;
3806#else
3807 return -1;
3808#endif
3809}
3810
a4ea8a3d 3811u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
a350ecce 3812 struct net_device *sb_dev)
a4ea8a3d
AD
3813{
3814 return 0;
3815}
3816EXPORT_SYMBOL(dev_pick_tx_zero);
3817
3818u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
a350ecce 3819 struct net_device *sb_dev)
a4ea8a3d
AD
3820{
3821 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3822}
3823EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3824
b71b5837
PA
3825u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3826 struct net_device *sb_dev)
638b2a69
JP
3827{
3828 struct sock *sk = skb->sk;
3829 int queue_index = sk_tx_queue_get(sk);
3830
eadec877
AD
3831 sb_dev = sb_dev ? : dev;
3832
638b2a69
JP
3833 if (queue_index < 0 || skb->ooo_okay ||
3834 queue_index >= dev->real_num_tx_queues) {
eadec877 3835 int new_index = get_xps_queue(dev, sb_dev, skb);
f4563a75 3836
638b2a69 3837 if (new_index < 0)
eadec877 3838 new_index = skb_tx_hash(dev, sb_dev, skb);
638b2a69
JP
3839
3840 if (queue_index != new_index && sk &&
004a5d01 3841 sk_fullsock(sk) &&
638b2a69
JP
3842 rcu_access_pointer(sk->sk_dst_cache))
3843 sk_tx_queue_set(sk, new_index);
3844
3845 queue_index = new_index;
3846 }
3847
3848 return queue_index;
3849}
b71b5837 3850EXPORT_SYMBOL(netdev_pick_tx);
638b2a69 3851
4bd97d51
PA
3852struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
3853 struct sk_buff *skb,
3854 struct net_device *sb_dev)
638b2a69
JP
3855{
3856 int queue_index = 0;
3857
3858#ifdef CONFIG_XPS
52bd2d62
ED
3859 u32 sender_cpu = skb->sender_cpu - 1;
3860
3861 if (sender_cpu >= (u32)NR_CPUS)
638b2a69
JP
3862 skb->sender_cpu = raw_smp_processor_id() + 1;
3863#endif
3864
3865 if (dev->real_num_tx_queues != 1) {
3866 const struct net_device_ops *ops = dev->netdev_ops;
f4563a75 3867
638b2a69 3868 if (ops->ndo_select_queue)
a350ecce 3869 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
638b2a69 3870 else
4bd97d51 3871 queue_index = netdev_pick_tx(dev, skb, sb_dev);
638b2a69 3872
d584527c 3873 queue_index = netdev_cap_txqueue(dev, queue_index);
638b2a69
JP
3874 }
3875
3876 skb_set_queue_mapping(skb, queue_index);
3877 return netdev_get_tx_queue(dev, queue_index);
3878}
3879
d29f749e 3880/**
9d08dd3d 3881 * __dev_queue_xmit - transmit a buffer
d29f749e 3882 * @skb: buffer to transmit
eadec877 3883 * @sb_dev: suboordinate device used for L2 forwarding offload
d29f749e
DJ
3884 *
3885 * Queue a buffer for transmission to a network device. The caller must
3886 * have set the device and priority and built the buffer before calling
3887 * this function. The function can be called from an interrupt.
3888 *
3889 * A negative errno code is returned on a failure. A success does not
3890 * guarantee the frame will be transmitted as it may be dropped due
3891 * to congestion or traffic shaping.
3892 *
3893 * -----------------------------------------------------------------------------------
3894 * I notice this method can also return errors from the queue disciplines,
3895 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3896 * be positive.
3897 *
3898 * Regardless of the return value, the skb is consumed, so it is currently
3899 * difficult to retry a send to this method. (You can bump the ref count
3900 * before sending to hold a reference for retry if you are careful.)
3901 *
3902 * When calling this method, interrupts MUST be enabled. This is because
3903 * the BH enable code must have IRQs enabled so that it will not deadlock.
3904 * --BLG
3905 */
eadec877 3906static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
1da177e4
LT
3907{
3908 struct net_device *dev = skb->dev;
dc2b4847 3909 struct netdev_queue *txq;
1da177e4
LT
3910 struct Qdisc *q;
3911 int rc = -ENOMEM;
f53c7239 3912 bool again = false;
1da177e4 3913
6d1ccff6
ED
3914 skb_reset_mac_header(skb);
3915
e7fd2885
WB
3916 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3917 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3918
4ec93edb
YH
3919 /* Disable soft irqs for various locks below. Also
3920 * stops preemption for RCU.
1da177e4 3921 */
4ec93edb 3922 rcu_read_lock_bh();
1da177e4 3923
5bc1421e
NH
3924 skb_update_prio(skb);
3925
1f211a1b
DB
3926 qdisc_pkt_len_init(skb);
3927#ifdef CONFIG_NET_CLS_ACT
8dc07fdb 3928 skb->tc_at_ingress = 0;
1f211a1b 3929# ifdef CONFIG_NET_EGRESS
aabf6772 3930 if (static_branch_unlikely(&egress_needed_key)) {
1f211a1b
DB
3931 skb = sch_handle_egress(skb, &rc, dev);
3932 if (!skb)
3933 goto out;
3934 }
3935# endif
3936#endif
02875878
ED
3937 /* If device/qdisc don't need skb->dst, release it right now while
3938 * its hot in this cpu cache.
3939 */
3940 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3941 skb_dst_drop(skb);
3942 else
3943 skb_dst_force(skb);
3944
4bd97d51 3945 txq = netdev_core_pick_tx(dev, skb, sb_dev);
a898def2 3946 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3947
cf66ba58 3948 trace_net_dev_queue(skb);
1da177e4 3949 if (q->enqueue) {
bbd8a0d3 3950 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3951 goto out;
1da177e4
LT
3952 }
3953
3954 /* The device has no queue. Common case for software devices:
eb13da1a 3955 * loopback, all the sorts of tunnels...
1da177e4 3956
eb13da1a 3957 * Really, it is unlikely that netif_tx_lock protection is necessary
3958 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3959 * counters.)
3960 * However, it is possible, that they rely on protection
3961 * made by us here.
1da177e4 3962
eb13da1a 3963 * Check this and shot the lock. It is not prone from deadlocks.
3964 *Either shot noqueue qdisc, it is even simpler 8)
1da177e4
LT
3965 */
3966 if (dev->flags & IFF_UP) {
3967 int cpu = smp_processor_id(); /* ok because BHs are off */
3968
c773e847 3969 if (txq->xmit_lock_owner != cpu) {
97cdcf37 3970 if (dev_xmit_recursion())
745e20f1
ED
3971 goto recursion_alert;
3972
f53c7239 3973 skb = validate_xmit_skb(skb, dev, &again);
1f59533f 3974 if (!skb)
d21fd63e 3975 goto out;
1f59533f 3976
c773e847 3977 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3978
73466498 3979 if (!netif_xmit_stopped(txq)) {
97cdcf37 3980 dev_xmit_recursion_inc();
ce93718f 3981 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
97cdcf37 3982 dev_xmit_recursion_dec();
572a9d7b 3983 if (dev_xmit_complete(rc)) {
c773e847 3984 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3985 goto out;
3986 }
3987 }
c773e847 3988 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3989 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3990 dev->name);
1da177e4
LT
3991 } else {
3992 /* Recursion is detected! It is possible,
745e20f1
ED
3993 * unfortunately
3994 */
3995recursion_alert:
e87cc472
JP
3996 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3997 dev->name);
1da177e4
LT
3998 }
3999 }
4000
4001 rc = -ENETDOWN;
d4828d85 4002 rcu_read_unlock_bh();
1da177e4 4003
015f0688 4004 atomic_long_inc(&dev->tx_dropped);
1f59533f 4005 kfree_skb_list(skb);
1da177e4
LT
4006 return rc;
4007out:
d4828d85 4008 rcu_read_unlock_bh();
1da177e4
LT
4009 return rc;
4010}
f663dd9a 4011
2b4aa3ce 4012int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
4013{
4014 return __dev_queue_xmit(skb, NULL);
4015}
2b4aa3ce 4016EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 4017
eadec877 4018int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
f663dd9a 4019{
eadec877 4020 return __dev_queue_xmit(skb, sb_dev);
f663dd9a
JW
4021}
4022EXPORT_SYMBOL(dev_queue_xmit_accel);
4023
865b03f2
MK
4024int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4025{
4026 struct net_device *dev = skb->dev;
4027 struct sk_buff *orig_skb = skb;
4028 struct netdev_queue *txq;
4029 int ret = NETDEV_TX_BUSY;
4030 bool again = false;
4031
4032 if (unlikely(!netif_running(dev) ||
4033 !netif_carrier_ok(dev)))
4034 goto drop;
4035
4036 skb = validate_xmit_skb_list(skb, dev, &again);
4037 if (skb != orig_skb)
4038 goto drop;
4039
4040 skb_set_queue_mapping(skb, queue_id);
4041 txq = skb_get_tx_queue(dev, skb);
4042
4043 local_bh_disable();
4044
4045 HARD_TX_LOCK(dev, txq, smp_processor_id());
4046 if (!netif_xmit_frozen_or_drv_stopped(txq))
4047 ret = netdev_start_xmit(skb, dev, txq, false);
4048 HARD_TX_UNLOCK(dev, txq);
4049
4050 local_bh_enable();
4051
4052 if (!dev_xmit_complete(ret))
4053 kfree_skb(skb);
4054
4055 return ret;
4056drop:
4057 atomic_long_inc(&dev->tx_dropped);
4058 kfree_skb_list(skb);
4059 return NET_XMIT_DROP;
4060}
4061EXPORT_SYMBOL(dev_direct_xmit);
1da177e4 4062
eb13da1a 4063/*************************************************************************
4064 * Receiver routines
4065 *************************************************************************/
1da177e4 4066
6b2bedc3 4067int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
4068EXPORT_SYMBOL(netdev_max_backlog);
4069
3b098e2d 4070int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3 4071int netdev_budget __read_mostly = 300;
7acf8a1e 4072unsigned int __read_mostly netdev_budget_usecs = 2000;
3d48b53f
MT
4073int weight_p __read_mostly = 64; /* old backlog weight */
4074int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
4075int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
4076int dev_rx_weight __read_mostly = 64;
4077int dev_tx_weight __read_mostly = 64;
323ebb61
EC
4078/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4079int gro_normal_batch __read_mostly = 8;
1da177e4 4080
eecfd7c4
ED
4081/* Called with irq disabled */
4082static inline void ____napi_schedule(struct softnet_data *sd,
4083 struct napi_struct *napi)
4084{
4085 list_add_tail(&napi->poll_list, &sd->poll_list);
4086 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4087}
4088
bfb564e7
KK
4089#ifdef CONFIG_RPS
4090
4091/* One global table that all flow-based protocols share. */
6e3f7faf 4092struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 4093EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
4094u32 rps_cpu_mask __read_mostly;
4095EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 4096
dc05360f 4097struct static_key_false rps_needed __read_mostly;
3df97ba8 4098EXPORT_SYMBOL(rps_needed);
dc05360f 4099struct static_key_false rfs_needed __read_mostly;
13bfff25 4100EXPORT_SYMBOL(rfs_needed);
adc9300e 4101
c445477d
BH
4102static struct rps_dev_flow *
4103set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4104 struct rps_dev_flow *rflow, u16 next_cpu)
4105{
a31196b0 4106 if (next_cpu < nr_cpu_ids) {
c445477d
BH
4107#ifdef CONFIG_RFS_ACCEL
4108 struct netdev_rx_queue *rxqueue;
4109 struct rps_dev_flow_table *flow_table;
4110 struct rps_dev_flow *old_rflow;
4111 u32 flow_id;
4112 u16 rxq_index;
4113 int rc;
4114
4115 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
4116 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4117 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
4118 goto out;
4119 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4120 if (rxq_index == skb_get_rx_queue(skb))
4121 goto out;
4122
4123 rxqueue = dev->_rx + rxq_index;
4124 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4125 if (!flow_table)
4126 goto out;
61b905da 4127 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
4128 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4129 rxq_index, flow_id);
4130 if (rc < 0)
4131 goto out;
4132 old_rflow = rflow;
4133 rflow = &flow_table->flows[flow_id];
c445477d
BH
4134 rflow->filter = rc;
4135 if (old_rflow->filter == rflow->filter)
4136 old_rflow->filter = RPS_NO_FILTER;
4137 out:
4138#endif
4139 rflow->last_qtail =
09994d1b 4140 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
4141 }
4142
09994d1b 4143 rflow->cpu = next_cpu;
c445477d
BH
4144 return rflow;
4145}
4146
bfb564e7
KK
4147/*
4148 * get_rps_cpu is called from netif_receive_skb and returns the target
4149 * CPU from the RPS map of the receiving queue for a given skb.
4150 * rcu_read_lock must be held on entry.
4151 */
4152static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4153 struct rps_dev_flow **rflowp)
4154{
567e4b79
ED
4155 const struct rps_sock_flow_table *sock_flow_table;
4156 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 4157 struct rps_dev_flow_table *flow_table;
567e4b79 4158 struct rps_map *map;
bfb564e7 4159 int cpu = -1;
567e4b79 4160 u32 tcpu;
61b905da 4161 u32 hash;
bfb564e7
KK
4162
4163 if (skb_rx_queue_recorded(skb)) {
4164 u16 index = skb_get_rx_queue(skb);
567e4b79 4165
62fe0b40
BH
4166 if (unlikely(index >= dev->real_num_rx_queues)) {
4167 WARN_ONCE(dev->real_num_rx_queues > 1,
4168 "%s received packet on queue %u, but number "
4169 "of RX queues is %u\n",
4170 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
4171 goto done;
4172 }
567e4b79
ED
4173 rxqueue += index;
4174 }
bfb564e7 4175
567e4b79
ED
4176 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4177
4178 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 4179 map = rcu_dereference(rxqueue->rps_map);
567e4b79 4180 if (!flow_table && !map)
bfb564e7
KK
4181 goto done;
4182
2d47b459 4183 skb_reset_network_header(skb);
61b905da
TH
4184 hash = skb_get_hash(skb);
4185 if (!hash)
bfb564e7
KK
4186 goto done;
4187
fec5e652
TH
4188 sock_flow_table = rcu_dereference(rps_sock_flow_table);
4189 if (flow_table && sock_flow_table) {
fec5e652 4190 struct rps_dev_flow *rflow;
567e4b79
ED
4191 u32 next_cpu;
4192 u32 ident;
4193
4194 /* First check into global flow table if there is a match */
4195 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4196 if ((ident ^ hash) & ~rps_cpu_mask)
4197 goto try_rps;
fec5e652 4198
567e4b79
ED
4199 next_cpu = ident & rps_cpu_mask;
4200
4201 /* OK, now we know there is a match,
4202 * we can look at the local (per receive queue) flow table
4203 */
61b905da 4204 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
4205 tcpu = rflow->cpu;
4206
fec5e652
TH
4207 /*
4208 * If the desired CPU (where last recvmsg was done) is
4209 * different from current CPU (one in the rx-queue flow
4210 * table entry), switch if one of the following holds:
a31196b0 4211 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
4212 * - Current CPU is offline.
4213 * - The current CPU's queue tail has advanced beyond the
4214 * last packet that was enqueued using this table entry.
4215 * This guarantees that all previous packets for the flow
4216 * have been dequeued, thus preserving in order delivery.
4217 */
4218 if (unlikely(tcpu != next_cpu) &&
a31196b0 4219 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 4220 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
4221 rflow->last_qtail)) >= 0)) {
4222 tcpu = next_cpu;
c445477d 4223 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 4224 }
c445477d 4225
a31196b0 4226 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
4227 *rflowp = rflow;
4228 cpu = tcpu;
4229 goto done;
4230 }
4231 }
4232
567e4b79
ED
4233try_rps:
4234
0a9627f2 4235 if (map) {
8fc54f68 4236 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
4237 if (cpu_online(tcpu)) {
4238 cpu = tcpu;
4239 goto done;
4240 }
4241 }
4242
4243done:
0a9627f2
TH
4244 return cpu;
4245}
4246
c445477d
BH
4247#ifdef CONFIG_RFS_ACCEL
4248
4249/**
4250 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4251 * @dev: Device on which the filter was set
4252 * @rxq_index: RX queue index
4253 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4254 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4255 *
4256 * Drivers that implement ndo_rx_flow_steer() should periodically call
4257 * this function for each installed filter and remove the filters for
4258 * which it returns %true.
4259 */
4260bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4261 u32 flow_id, u16 filter_id)
4262{
4263 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4264 struct rps_dev_flow_table *flow_table;
4265 struct rps_dev_flow *rflow;
4266 bool expire = true;
a31196b0 4267 unsigned int cpu;
c445477d
BH
4268
4269 rcu_read_lock();
4270 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4271 if (flow_table && flow_id <= flow_table->mask) {
4272 rflow = &flow_table->flows[flow_id];
6aa7de05 4273 cpu = READ_ONCE(rflow->cpu);
a31196b0 4274 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
4275 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4276 rflow->last_qtail) <
4277 (int)(10 * flow_table->mask)))
4278 expire = false;
4279 }
4280 rcu_read_unlock();
4281 return expire;
4282}
4283EXPORT_SYMBOL(rps_may_expire_flow);
4284
4285#endif /* CONFIG_RFS_ACCEL */
4286
0a9627f2 4287/* Called from hardirq (IPI) context */
e36fa2f7 4288static void rps_trigger_softirq(void *data)
0a9627f2 4289{
e36fa2f7
ED
4290 struct softnet_data *sd = data;
4291
eecfd7c4 4292 ____napi_schedule(sd, &sd->backlog);
dee42870 4293 sd->received_rps++;
0a9627f2 4294}
e36fa2f7 4295
fec5e652 4296#endif /* CONFIG_RPS */
0a9627f2 4297
e36fa2f7
ED
4298/*
4299 * Check if this softnet_data structure is another cpu one
4300 * If yes, queue it to our IPI list and return 1
4301 * If no, return 0
4302 */
4303static int rps_ipi_queued(struct softnet_data *sd)
4304{
4305#ifdef CONFIG_RPS
903ceff7 4306 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
4307
4308 if (sd != mysd) {
4309 sd->rps_ipi_next = mysd->rps_ipi_list;
4310 mysd->rps_ipi_list = sd;
4311
4312 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4313 return 1;
4314 }
4315#endif /* CONFIG_RPS */
4316 return 0;
4317}
4318
99bbc707
WB
4319#ifdef CONFIG_NET_FLOW_LIMIT
4320int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4321#endif
4322
4323static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4324{
4325#ifdef CONFIG_NET_FLOW_LIMIT
4326 struct sd_flow_limit *fl;
4327 struct softnet_data *sd;
4328 unsigned int old_flow, new_flow;
4329
4330 if (qlen < (netdev_max_backlog >> 1))
4331 return false;
4332
903ceff7 4333 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
4334
4335 rcu_read_lock();
4336 fl = rcu_dereference(sd->flow_limit);
4337 if (fl) {
3958afa1 4338 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
4339 old_flow = fl->history[fl->history_head];
4340 fl->history[fl->history_head] = new_flow;
4341
4342 fl->history_head++;
4343 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4344
4345 if (likely(fl->buckets[old_flow]))
4346 fl->buckets[old_flow]--;
4347
4348 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4349 fl->count++;
4350 rcu_read_unlock();
4351 return true;
4352 }
4353 }
4354 rcu_read_unlock();
4355#endif
4356 return false;
4357}
4358
0a9627f2
TH
4359/*
4360 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4361 * queue (may be a remote CPU queue).
4362 */
fec5e652
TH
4363static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4364 unsigned int *qtail)
0a9627f2 4365{
e36fa2f7 4366 struct softnet_data *sd;
0a9627f2 4367 unsigned long flags;
99bbc707 4368 unsigned int qlen;
0a9627f2 4369
e36fa2f7 4370 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
4371
4372 local_irq_save(flags);
0a9627f2 4373
e36fa2f7 4374 rps_lock(sd);
e9e4dd32
JA
4375 if (!netif_running(skb->dev))
4376 goto drop;
99bbc707
WB
4377 qlen = skb_queue_len(&sd->input_pkt_queue);
4378 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 4379 if (qlen) {
0a9627f2 4380enqueue:
e36fa2f7 4381 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 4382 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 4383 rps_unlock(sd);
152102c7 4384 local_irq_restore(flags);
0a9627f2
TH
4385 return NET_RX_SUCCESS;
4386 }
4387
ebda37c2
ED
4388 /* Schedule NAPI for backlog device
4389 * We can use non atomic operation since we own the queue lock
4390 */
4391 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 4392 if (!rps_ipi_queued(sd))
eecfd7c4 4393 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
4394 }
4395 goto enqueue;
4396 }
4397
e9e4dd32 4398drop:
dee42870 4399 sd->dropped++;
e36fa2f7 4400 rps_unlock(sd);
0a9627f2 4401
0a9627f2
TH
4402 local_irq_restore(flags);
4403
caf586e5 4404 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
4405 kfree_skb(skb);
4406 return NET_RX_DROP;
4407}
1da177e4 4408
e817f856
JDB
4409static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4410{
4411 struct net_device *dev = skb->dev;
4412 struct netdev_rx_queue *rxqueue;
4413
4414 rxqueue = dev->_rx;
4415
4416 if (skb_rx_queue_recorded(skb)) {
4417 u16 index = skb_get_rx_queue(skb);
4418
4419 if (unlikely(index >= dev->real_num_rx_queues)) {
4420 WARN_ONCE(dev->real_num_rx_queues > 1,
4421 "%s received packet on queue %u, but number "
4422 "of RX queues is %u\n",
4423 dev->name, index, dev->real_num_rx_queues);
4424
4425 return rxqueue; /* Return first rxqueue */
4426 }
4427 rxqueue += index;
4428 }
4429 return rxqueue;
4430}
4431
d4455169 4432static u32 netif_receive_generic_xdp(struct sk_buff *skb,
02671e23 4433 struct xdp_buff *xdp,
d4455169
JF
4434 struct bpf_prog *xdp_prog)
4435{
e817f856 4436 struct netdev_rx_queue *rxqueue;
198d83bb 4437 void *orig_data, *orig_data_end;
de8f3a83 4438 u32 metalen, act = XDP_DROP;
29724956
JDB
4439 __be16 orig_eth_type;
4440 struct ethhdr *eth;
4441 bool orig_bcast;
d4455169
JF
4442 int hlen, off;
4443 u32 mac_len;
4444
4445 /* Reinjected packets coming from act_mirred or similar should
4446 * not get XDP generic processing.
4447 */
cd11b164 4448 if (skb_cloned(skb) || skb_is_tc_redirected(skb))
d4455169
JF
4449 return XDP_PASS;
4450
de8f3a83
DB
4451 /* XDP packets must be linear and must have sufficient headroom
4452 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4453 * native XDP provides, thus we need to do it here as well.
4454 */
4455 if (skb_is_nonlinear(skb) ||
4456 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4457 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4458 int troom = skb->tail + skb->data_len - skb->end;
4459
4460 /* In case we have to go down the path and also linearize,
4461 * then lets do the pskb_expand_head() work just once here.
4462 */
4463 if (pskb_expand_head(skb,
4464 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4465 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4466 goto do_drop;
2d17d8d7 4467 if (skb_linearize(skb))
de8f3a83
DB
4468 goto do_drop;
4469 }
d4455169
JF
4470
4471 /* The XDP program wants to see the packet starting at the MAC
4472 * header.
4473 */
4474 mac_len = skb->data - skb_mac_header(skb);
4475 hlen = skb_headlen(skb) + mac_len;
02671e23
BT
4476 xdp->data = skb->data - mac_len;
4477 xdp->data_meta = xdp->data;
4478 xdp->data_end = xdp->data + hlen;
4479 xdp->data_hard_start = skb->data - skb_headroom(skb);
4480 orig_data_end = xdp->data_end;
4481 orig_data = xdp->data;
29724956
JDB
4482 eth = (struct ethhdr *)xdp->data;
4483 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4484 orig_eth_type = eth->h_proto;
d4455169 4485
e817f856 4486 rxqueue = netif_get_rxqueue(skb);
02671e23 4487 xdp->rxq = &rxqueue->xdp_rxq;
e817f856 4488
02671e23 4489 act = bpf_prog_run_xdp(xdp_prog, xdp);
d4455169 4490
065af355 4491 /* check if bpf_xdp_adjust_head was used */
02671e23 4492 off = xdp->data - orig_data;
065af355
JDB
4493 if (off) {
4494 if (off > 0)
4495 __skb_pull(skb, off);
4496 else if (off < 0)
4497 __skb_push(skb, -off);
4498
4499 skb->mac_header += off;
4500 skb_reset_network_header(skb);
4501 }
d4455169 4502
198d83bb
NS
4503 /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4504 * pckt.
4505 */
02671e23 4506 off = orig_data_end - xdp->data_end;
f7613120 4507 if (off != 0) {
02671e23 4508 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
f7613120 4509 skb->len -= off;
02671e23 4510
f7613120 4511 }
198d83bb 4512
29724956
JDB
4513 /* check if XDP changed eth hdr such SKB needs update */
4514 eth = (struct ethhdr *)xdp->data;
4515 if ((orig_eth_type != eth->h_proto) ||
4516 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4517 __skb_push(skb, ETH_HLEN);
4518 skb->protocol = eth_type_trans(skb, skb->dev);
4519 }
4520
d4455169 4521 switch (act) {
6103aa96 4522 case XDP_REDIRECT:
d4455169
JF
4523 case XDP_TX:
4524 __skb_push(skb, mac_len);
de8f3a83 4525 break;
d4455169 4526 case XDP_PASS:
02671e23 4527 metalen = xdp->data - xdp->data_meta;
de8f3a83
DB
4528 if (metalen)
4529 skb_metadata_set(skb, metalen);
d4455169 4530 break;
d4455169
JF
4531 default:
4532 bpf_warn_invalid_xdp_action(act);
4533 /* fall through */
4534 case XDP_ABORTED:
4535 trace_xdp_exception(skb->dev, xdp_prog, act);
4536 /* fall through */
4537 case XDP_DROP:
4538 do_drop:
4539 kfree_skb(skb);
4540 break;
4541 }
4542
4543 return act;
4544}
4545
4546/* When doing generic XDP we have to bypass the qdisc layer and the
4547 * network taps in order to match in-driver-XDP behavior.
4548 */
7c497478 4549void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
d4455169
JF
4550{
4551 struct net_device *dev = skb->dev;
4552 struct netdev_queue *txq;
4553 bool free_skb = true;
4554 int cpu, rc;
4555
4bd97d51 4556 txq = netdev_core_pick_tx(dev, skb, NULL);
d4455169
JF
4557 cpu = smp_processor_id();
4558 HARD_TX_LOCK(dev, txq, cpu);
4559 if (!netif_xmit_stopped(txq)) {
4560 rc = netdev_start_xmit(skb, dev, txq, 0);
4561 if (dev_xmit_complete(rc))
4562 free_skb = false;
4563 }
4564 HARD_TX_UNLOCK(dev, txq);
4565 if (free_skb) {
4566 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4567 kfree_skb(skb);
4568 }
4569}
7c497478 4570EXPORT_SYMBOL_GPL(generic_xdp_tx);
d4455169 4571
02786475 4572static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
d4455169 4573
7c497478 4574int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
d4455169 4575{
d4455169 4576 if (xdp_prog) {
02671e23
BT
4577 struct xdp_buff xdp;
4578 u32 act;
6103aa96 4579 int err;
d4455169 4580
02671e23 4581 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
d4455169 4582 if (act != XDP_PASS) {
6103aa96
JF
4583 switch (act) {
4584 case XDP_REDIRECT:
2facaad6 4585 err = xdp_do_generic_redirect(skb->dev, skb,
02671e23 4586 &xdp, xdp_prog);
6103aa96
JF
4587 if (err)
4588 goto out_redir;
02671e23 4589 break;
6103aa96 4590 case XDP_TX:
d4455169 4591 generic_xdp_tx(skb, xdp_prog);
6103aa96
JF
4592 break;
4593 }
d4455169
JF
4594 return XDP_DROP;
4595 }
4596 }
4597 return XDP_PASS;
6103aa96 4598out_redir:
6103aa96
JF
4599 kfree_skb(skb);
4600 return XDP_DROP;
d4455169 4601}
7c497478 4602EXPORT_SYMBOL_GPL(do_xdp_generic);
d4455169 4603
ae78dbfa 4604static int netif_rx_internal(struct sk_buff *skb)
1da177e4 4605{
b0e28f1e 4606 int ret;
1da177e4 4607
588f0330 4608 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 4609
cf66ba58 4610 trace_netif_rx(skb);
d4455169 4611
df334545 4612#ifdef CONFIG_RPS
dc05360f 4613 if (static_branch_unlikely(&rps_needed)) {
fec5e652 4614 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
4615 int cpu;
4616
cece1945 4617 preempt_disable();
b0e28f1e 4618 rcu_read_lock();
fec5e652
TH
4619
4620 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
4621 if (cpu < 0)
4622 cpu = smp_processor_id();
fec5e652
TH
4623
4624 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4625
b0e28f1e 4626 rcu_read_unlock();
cece1945 4627 preempt_enable();
adc9300e
ED
4628 } else
4629#endif
fec5e652
TH
4630 {
4631 unsigned int qtail;
f4563a75 4632
fec5e652
TH
4633 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4634 put_cpu();
4635 }
b0e28f1e 4636 return ret;
1da177e4 4637}
ae78dbfa
BH
4638
4639/**
4640 * netif_rx - post buffer to the network code
4641 * @skb: buffer to post
4642 *
4643 * This function receives a packet from a device driver and queues it for
4644 * the upper (protocol) levels to process. It always succeeds. The buffer
4645 * may be dropped during processing for congestion control or by the
4646 * protocol layers.
4647 *
4648 * return values:
4649 * NET_RX_SUCCESS (no congestion)
4650 * NET_RX_DROP (packet was dropped)
4651 *
4652 */
4653
4654int netif_rx(struct sk_buff *skb)
4655{
b0e3f1bd
GB
4656 int ret;
4657
ae78dbfa
BH
4658 trace_netif_rx_entry(skb);
4659
b0e3f1bd
GB
4660 ret = netif_rx_internal(skb);
4661 trace_netif_rx_exit(ret);
4662
4663 return ret;
ae78dbfa 4664}
d1b19dff 4665EXPORT_SYMBOL(netif_rx);
1da177e4
LT
4666
4667int netif_rx_ni(struct sk_buff *skb)
4668{
4669 int err;
4670
ae78dbfa
BH
4671 trace_netif_rx_ni_entry(skb);
4672
1da177e4 4673 preempt_disable();
ae78dbfa 4674 err = netif_rx_internal(skb);
1da177e4
LT
4675 if (local_softirq_pending())
4676 do_softirq();
4677 preempt_enable();
b0e3f1bd 4678 trace_netif_rx_ni_exit(err);
1da177e4
LT
4679
4680 return err;
4681}
1da177e4
LT
4682EXPORT_SYMBOL(netif_rx_ni);
4683
0766f788 4684static __latent_entropy void net_tx_action(struct softirq_action *h)
1da177e4 4685{
903ceff7 4686 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
4687
4688 if (sd->completion_queue) {
4689 struct sk_buff *clist;
4690
4691 local_irq_disable();
4692 clist = sd->completion_queue;
4693 sd->completion_queue = NULL;
4694 local_irq_enable();
4695
4696 while (clist) {
4697 struct sk_buff *skb = clist;
f4563a75 4698
1da177e4
LT
4699 clist = clist->next;
4700
63354797 4701 WARN_ON(refcount_read(&skb->users));
e6247027
ED
4702 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4703 trace_consume_skb(skb);
4704 else
4705 trace_kfree_skb(skb, net_tx_action);
15fad714
JDB
4706
4707 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4708 __kfree_skb(skb);
4709 else
4710 __kfree_skb_defer(skb);
1da177e4 4711 }
15fad714
JDB
4712
4713 __kfree_skb_flush();
1da177e4
LT
4714 }
4715
4716 if (sd->output_queue) {
37437bb2 4717 struct Qdisc *head;
1da177e4
LT
4718
4719 local_irq_disable();
4720 head = sd->output_queue;
4721 sd->output_queue = NULL;
a9cbd588 4722 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
4723 local_irq_enable();
4724
4725 while (head) {
37437bb2 4726 struct Qdisc *q = head;
6b3ba914 4727 spinlock_t *root_lock = NULL;
37437bb2 4728
1da177e4
LT
4729 head = head->next_sched;
4730
6b3ba914
JF
4731 if (!(q->flags & TCQ_F_NOLOCK)) {
4732 root_lock = qdisc_lock(q);
4733 spin_lock(root_lock);
4734 }
3bcb846c
ED
4735 /* We need to make sure head->next_sched is read
4736 * before clearing __QDISC_STATE_SCHED
4737 */
4738 smp_mb__before_atomic();
4739 clear_bit(__QDISC_STATE_SCHED, &q->state);
4740 qdisc_run(q);
6b3ba914
JF
4741 if (root_lock)
4742 spin_unlock(root_lock);
1da177e4
LT
4743 }
4744 }
f53c7239
SK
4745
4746 xfrm_dev_backlog(sd);
1da177e4
LT
4747}
4748
181402a5 4749#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
da678292
MM
4750/* This hook is defined here for ATM LANE */
4751int (*br_fdb_test_addr_hook)(struct net_device *dev,
4752 unsigned char *addr) __read_mostly;
4fb019a0 4753EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 4754#endif
1da177e4 4755
1f211a1b
DB
4756static inline struct sk_buff *
4757sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4758 struct net_device *orig_dev)
f697c3e8 4759{
e7582bab 4760#ifdef CONFIG_NET_CLS_ACT
46209401 4761 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
d2788d34 4762 struct tcf_result cl_res;
24824a09 4763
c9e99fd0
DB
4764 /* If there's at least one ingress present somewhere (so
4765 * we get here via enabled static key), remaining devices
4766 * that are not configured with an ingress qdisc will bail
d2788d34 4767 * out here.
c9e99fd0 4768 */
46209401 4769 if (!miniq)
4577139b 4770 return skb;
46209401 4771
f697c3e8
HX
4772 if (*pt_prev) {
4773 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4774 *pt_prev = NULL;
1da177e4
LT
4775 }
4776
3365495c 4777 qdisc_skb_cb(skb)->pkt_len = skb->len;
8dc07fdb 4778 skb->tc_at_ingress = 1;
46209401 4779 mini_qdisc_bstats_cpu_update(miniq, skb);
c9e99fd0 4780
46209401 4781 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
d2788d34
DB
4782 case TC_ACT_OK:
4783 case TC_ACT_RECLASSIFY:
4784 skb->tc_index = TC_H_MIN(cl_res.classid);
4785 break;
4786 case TC_ACT_SHOT:
46209401 4787 mini_qdisc_qstats_cpu_drop(miniq);
8a3a4c6e
ED
4788 kfree_skb(skb);
4789 return NULL;
d2788d34
DB
4790 case TC_ACT_STOLEN:
4791 case TC_ACT_QUEUED:
e25ea21f 4792 case TC_ACT_TRAP:
8a3a4c6e 4793 consume_skb(skb);
d2788d34 4794 return NULL;
27b29f63
AS
4795 case TC_ACT_REDIRECT:
4796 /* skb_mac_header check was done by cls/act_bpf, so
4797 * we can safely push the L2 header back before
4798 * redirecting to another netdev
4799 */
4800 __skb_push(skb, skb->mac_len);
4801 skb_do_redirect(skb);
4802 return NULL;
720f22fe 4803 case TC_ACT_CONSUMED:
cd11b164 4804 return NULL;
d2788d34
DB
4805 default:
4806 break;
f697c3e8 4807 }
e7582bab 4808#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
4809 return skb;
4810}
1da177e4 4811
24b27fc4
MB
4812/**
4813 * netdev_is_rx_handler_busy - check if receive handler is registered
4814 * @dev: device to check
4815 *
4816 * Check if a receive handler is already registered for a given device.
4817 * Return true if there one.
4818 *
4819 * The caller must hold the rtnl_mutex.
4820 */
4821bool netdev_is_rx_handler_busy(struct net_device *dev)
4822{
4823 ASSERT_RTNL();
4824 return dev && rtnl_dereference(dev->rx_handler);
4825}
4826EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4827
ab95bfe0
JP
4828/**
4829 * netdev_rx_handler_register - register receive handler
4830 * @dev: device to register a handler for
4831 * @rx_handler: receive handler to register
93e2c32b 4832 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 4833 *
e227867f 4834 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
4835 * called from __netif_receive_skb. A negative errno code is returned
4836 * on a failure.
4837 *
4838 * The caller must hold the rtnl_mutex.
8a4eb573
JP
4839 *
4840 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
4841 */
4842int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
4843 rx_handler_func_t *rx_handler,
4844 void *rx_handler_data)
ab95bfe0 4845{
1b7cd004 4846 if (netdev_is_rx_handler_busy(dev))
ab95bfe0
JP
4847 return -EBUSY;
4848
f5426250
PA
4849 if (dev->priv_flags & IFF_NO_RX_HANDLER)
4850 return -EINVAL;
4851
00cfec37 4852 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 4853 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
4854 rcu_assign_pointer(dev->rx_handler, rx_handler);
4855
4856 return 0;
4857}
4858EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4859
4860/**
4861 * netdev_rx_handler_unregister - unregister receive handler
4862 * @dev: device to unregister a handler from
4863 *
166ec369 4864 * Unregister a receive handler from a device.
ab95bfe0
JP
4865 *
4866 * The caller must hold the rtnl_mutex.
4867 */
4868void netdev_rx_handler_unregister(struct net_device *dev)
4869{
4870
4871 ASSERT_RTNL();
a9b3cd7f 4872 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
4873 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4874 * section has a guarantee to see a non NULL rx_handler_data
4875 * as well.
4876 */
4877 synchronize_net();
a9b3cd7f 4878 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
4879}
4880EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4881
b4b9e355
MG
4882/*
4883 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4884 * the special handling of PFMEMALLOC skbs.
4885 */
4886static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4887{
4888 switch (skb->protocol) {
2b8837ae
JP
4889 case htons(ETH_P_ARP):
4890 case htons(ETH_P_IP):
4891 case htons(ETH_P_IPV6):
4892 case htons(ETH_P_8021Q):
4893 case htons(ETH_P_8021AD):
b4b9e355
MG
4894 return true;
4895 default:
4896 return false;
4897 }
4898}
4899
e687ad60
PN
4900static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4901 int *ret, struct net_device *orig_dev)
4902{
e7582bab 4903#ifdef CONFIG_NETFILTER_INGRESS
e687ad60 4904 if (nf_hook_ingress_active(skb)) {
2c1e2703
AC
4905 int ingress_retval;
4906
e687ad60
PN
4907 if (*pt_prev) {
4908 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4909 *pt_prev = NULL;
4910 }
4911
2c1e2703
AC
4912 rcu_read_lock();
4913 ingress_retval = nf_hook_ingress(skb);
4914 rcu_read_unlock();
4915 return ingress_retval;
e687ad60 4916 }
e7582bab 4917#endif /* CONFIG_NETFILTER_INGRESS */
e687ad60
PN
4918 return 0;
4919}
e687ad60 4920
88eb1944
EC
4921static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
4922 struct packet_type **ppt_prev)
1da177e4
LT
4923{
4924 struct packet_type *ptype, *pt_prev;
ab95bfe0 4925 rx_handler_func_t *rx_handler;
f2ccd8fa 4926 struct net_device *orig_dev;
8a4eb573 4927 bool deliver_exact = false;
1da177e4 4928 int ret = NET_RX_DROP;
252e3346 4929 __be16 type;
1da177e4 4930
588f0330 4931 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 4932
cf66ba58 4933 trace_netif_receive_skb(skb);
9b22ea56 4934
cc9bd5ce 4935 orig_dev = skb->dev;
8f903c70 4936
c1d2bbe1 4937 skb_reset_network_header(skb);
fda55eca
ED
4938 if (!skb_transport_header_was_set(skb))
4939 skb_reset_transport_header(skb);
0b5c9db1 4940 skb_reset_mac_len(skb);
1da177e4
LT
4941
4942 pt_prev = NULL;
4943
63d8ea7f 4944another_round:
b6858177 4945 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
4946
4947 __this_cpu_inc(softnet_data.processed);
4948
458bf2f2
SH
4949 if (static_branch_unlikely(&generic_xdp_needed_key)) {
4950 int ret2;
4951
4952 preempt_disable();
4953 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4954 preempt_enable();
4955
4956 if (ret2 != XDP_PASS)
4957 return NET_RX_DROP;
4958 skb_reset_mac_len(skb);
4959 }
4960
8ad227ff
PM
4961 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4962 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 4963 skb = skb_vlan_untag(skb);
bcc6d479 4964 if (unlikely(!skb))
2c17d27c 4965 goto out;
bcc6d479
JP
4966 }
4967
e7246e12
WB
4968 if (skb_skip_tc_classify(skb))
4969 goto skip_classify;
1da177e4 4970
9754e293 4971 if (pfmemalloc)
b4b9e355
MG
4972 goto skip_taps;
4973
1da177e4 4974 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
4975 if (pt_prev)
4976 ret = deliver_skb(skb, pt_prev, orig_dev);
4977 pt_prev = ptype;
4978 }
4979
4980 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4981 if (pt_prev)
4982 ret = deliver_skb(skb, pt_prev, orig_dev);
4983 pt_prev = ptype;
1da177e4
LT
4984 }
4985
b4b9e355 4986skip_taps:
1cf51900 4987#ifdef CONFIG_NET_INGRESS
aabf6772 4988 if (static_branch_unlikely(&ingress_needed_key)) {
1f211a1b 4989 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4577139b 4990 if (!skb)
2c17d27c 4991 goto out;
e687ad60
PN
4992
4993 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 4994 goto out;
4577139b 4995 }
1cf51900 4996#endif
a5135bcf 4997 skb_reset_tc(skb);
e7246e12 4998skip_classify:
9754e293 4999 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
5000 goto drop;
5001
df8a39de 5002 if (skb_vlan_tag_present(skb)) {
2425717b
JF
5003 if (pt_prev) {
5004 ret = deliver_skb(skb, pt_prev, orig_dev);
5005 pt_prev = NULL;
5006 }
48cc32d3 5007 if (vlan_do_receive(&skb))
2425717b
JF
5008 goto another_round;
5009 else if (unlikely(!skb))
2c17d27c 5010 goto out;
2425717b
JF
5011 }
5012
48cc32d3 5013 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
5014 if (rx_handler) {
5015 if (pt_prev) {
5016 ret = deliver_skb(skb, pt_prev, orig_dev);
5017 pt_prev = NULL;
5018 }
8a4eb573
JP
5019 switch (rx_handler(&skb)) {
5020 case RX_HANDLER_CONSUMED:
3bc1b1ad 5021 ret = NET_RX_SUCCESS;
2c17d27c 5022 goto out;
8a4eb573 5023 case RX_HANDLER_ANOTHER:
63d8ea7f 5024 goto another_round;
8a4eb573
JP
5025 case RX_HANDLER_EXACT:
5026 deliver_exact = true;
5027 case RX_HANDLER_PASS:
5028 break;
5029 default:
5030 BUG();
5031 }
ab95bfe0 5032 }
1da177e4 5033
df8a39de 5034 if (unlikely(skb_vlan_tag_present(skb))) {
36b2f61a
GV
5035check_vlan_id:
5036 if (skb_vlan_tag_get_id(skb)) {
5037 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5038 * find vlan device.
5039 */
d4b812de 5040 skb->pkt_type = PACKET_OTHERHOST;
36b2f61a
GV
5041 } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
5042 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
5043 /* Outer header is 802.1P with vlan 0, inner header is
5044 * 802.1Q or 802.1AD and vlan_do_receive() above could
5045 * not find vlan dev for vlan id 0.
5046 */
5047 __vlan_hwaccel_clear_tag(skb);
5048 skb = skb_vlan_untag(skb);
5049 if (unlikely(!skb))
5050 goto out;
5051 if (vlan_do_receive(&skb))
5052 /* After stripping off 802.1P header with vlan 0
5053 * vlan dev is found for inner header.
5054 */
5055 goto another_round;
5056 else if (unlikely(!skb))
5057 goto out;
5058 else
5059 /* We have stripped outer 802.1P vlan 0 header.
5060 * But could not find vlan dev.
5061 * check again for vlan id to set OTHERHOST.
5062 */
5063 goto check_vlan_id;
5064 }
d4b812de
ED
5065 /* Note: we might in the future use prio bits
5066 * and set skb->priority like in vlan_do_receive()
5067 * For the time being, just ignore Priority Code Point
5068 */
b1817524 5069 __vlan_hwaccel_clear_tag(skb);
d4b812de 5070 }
48cc32d3 5071
7866a621
SN
5072 type = skb->protocol;
5073
63d8ea7f 5074 /* deliver only exact match when indicated */
7866a621
SN
5075 if (likely(!deliver_exact)) {
5076 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5077 &ptype_base[ntohs(type) &
5078 PTYPE_HASH_MASK]);
5079 }
1f3c8804 5080
7866a621
SN
5081 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5082 &orig_dev->ptype_specific);
5083
5084 if (unlikely(skb->dev != orig_dev)) {
5085 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5086 &skb->dev->ptype_specific);
1da177e4
LT
5087 }
5088
5089 if (pt_prev) {
1f8b977a 5090 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
0e698bf6 5091 goto drop;
88eb1944 5092 *ppt_prev = pt_prev;
1da177e4 5093 } else {
b4b9e355 5094drop:
6e7333d3
JW
5095 if (!deliver_exact)
5096 atomic_long_inc(&skb->dev->rx_dropped);
5097 else
5098 atomic_long_inc(&skb->dev->rx_nohandler);
1da177e4
LT
5099 kfree_skb(skb);
5100 /* Jamal, now you will not able to escape explaining
5101 * me how you were going to use this. :-)
5102 */
5103 ret = NET_RX_DROP;
5104 }
5105
2c17d27c 5106out:
9754e293
DM
5107 return ret;
5108}
5109
88eb1944
EC
5110static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5111{
5112 struct net_device *orig_dev = skb->dev;
5113 struct packet_type *pt_prev = NULL;
5114 int ret;
5115
5116 ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5117 if (pt_prev)
f5737cba
PA
5118 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5119 skb->dev, pt_prev, orig_dev);
88eb1944
EC
5120 return ret;
5121}
5122
1c601d82
JDB
5123/**
5124 * netif_receive_skb_core - special purpose version of netif_receive_skb
5125 * @skb: buffer to process
5126 *
5127 * More direct receive version of netif_receive_skb(). It should
5128 * only be used by callers that have a need to skip RPS and Generic XDP.
5129 * Caller must also take care of handling if (page_is_)pfmemalloc.
5130 *
5131 * This function may only be called from softirq context and interrupts
5132 * should be enabled.
5133 *
5134 * Return values (usually ignored):
5135 * NET_RX_SUCCESS: no congestion
5136 * NET_RX_DROP: packet was dropped
5137 */
5138int netif_receive_skb_core(struct sk_buff *skb)
5139{
5140 int ret;
5141
5142 rcu_read_lock();
88eb1944 5143 ret = __netif_receive_skb_one_core(skb, false);
1c601d82
JDB
5144 rcu_read_unlock();
5145
5146 return ret;
5147}
5148EXPORT_SYMBOL(netif_receive_skb_core);
5149
88eb1944
EC
5150static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5151 struct packet_type *pt_prev,
5152 struct net_device *orig_dev)
4ce0017a
EC
5153{
5154 struct sk_buff *skb, *next;
5155
88eb1944
EC
5156 if (!pt_prev)
5157 return;
5158 if (list_empty(head))
5159 return;
17266ee9 5160 if (pt_prev->list_func != NULL)
fdf71426
PA
5161 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5162 ip_list_rcv, head, pt_prev, orig_dev);
17266ee9 5163 else
9a5a90d1
AL
5164 list_for_each_entry_safe(skb, next, head, list) {
5165 skb_list_del_init(skb);
fdf71426 5166 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
9a5a90d1 5167 }
88eb1944
EC
5168}
5169
5170static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5171{
5172 /* Fast-path assumptions:
5173 * - There is no RX handler.
5174 * - Only one packet_type matches.
5175 * If either of these fails, we will end up doing some per-packet
5176 * processing in-line, then handling the 'last ptype' for the whole
5177 * sublist. This can't cause out-of-order delivery to any single ptype,
5178 * because the 'last ptype' must be constant across the sublist, and all
5179 * other ptypes are handled per-packet.
5180 */
5181 /* Current (common) ptype of sublist */
5182 struct packet_type *pt_curr = NULL;
5183 /* Current (common) orig_dev of sublist */
5184 struct net_device *od_curr = NULL;
5185 struct list_head sublist;
5186 struct sk_buff *skb, *next;
5187
9af86f93 5188 INIT_LIST_HEAD(&sublist);
88eb1944
EC
5189 list_for_each_entry_safe(skb, next, head, list) {
5190 struct net_device *orig_dev = skb->dev;
5191 struct packet_type *pt_prev = NULL;
5192
22f6bbb7 5193 skb_list_del_init(skb);
88eb1944 5194 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
9af86f93
EC
5195 if (!pt_prev)
5196 continue;
88eb1944
EC
5197 if (pt_curr != pt_prev || od_curr != orig_dev) {
5198 /* dispatch old sublist */
88eb1944
EC
5199 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5200 /* start new sublist */
9af86f93 5201 INIT_LIST_HEAD(&sublist);
88eb1944
EC
5202 pt_curr = pt_prev;
5203 od_curr = orig_dev;
5204 }
9af86f93 5205 list_add_tail(&skb->list, &sublist);
88eb1944
EC
5206 }
5207
5208 /* dispatch final sublist */
9af86f93 5209 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
4ce0017a
EC
5210}
5211
9754e293
DM
5212static int __netif_receive_skb(struct sk_buff *skb)
5213{
5214 int ret;
5215
5216 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
f1083048 5217 unsigned int noreclaim_flag;
9754e293
DM
5218
5219 /*
5220 * PFMEMALLOC skbs are special, they should
5221 * - be delivered to SOCK_MEMALLOC sockets only
5222 * - stay away from userspace
5223 * - have bounded memory usage
5224 *
5225 * Use PF_MEMALLOC as this saves us from propagating the allocation
5226 * context down to all allocation sites.
5227 */
f1083048 5228 noreclaim_flag = memalloc_noreclaim_save();
88eb1944 5229 ret = __netif_receive_skb_one_core(skb, true);
f1083048 5230 memalloc_noreclaim_restore(noreclaim_flag);
9754e293 5231 } else
88eb1944 5232 ret = __netif_receive_skb_one_core(skb, false);
9754e293 5233
1da177e4
LT
5234 return ret;
5235}
0a9627f2 5236
4ce0017a
EC
5237static void __netif_receive_skb_list(struct list_head *head)
5238{
5239 unsigned long noreclaim_flag = 0;
5240 struct sk_buff *skb, *next;
5241 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5242
5243 list_for_each_entry_safe(skb, next, head, list) {
5244 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5245 struct list_head sublist;
5246
5247 /* Handle the previous sublist */
5248 list_cut_before(&sublist, head, &skb->list);
b9f463d6
EC
5249 if (!list_empty(&sublist))
5250 __netif_receive_skb_list_core(&sublist, pfmemalloc);
4ce0017a
EC
5251 pfmemalloc = !pfmemalloc;
5252 /* See comments in __netif_receive_skb */
5253 if (pfmemalloc)
5254 noreclaim_flag = memalloc_noreclaim_save();
5255 else
5256 memalloc_noreclaim_restore(noreclaim_flag);
5257 }
5258 }
5259 /* Handle the remaining sublist */
b9f463d6
EC
5260 if (!list_empty(head))
5261 __netif_receive_skb_list_core(head, pfmemalloc);
4ce0017a
EC
5262 /* Restore pflags */
5263 if (pfmemalloc)
5264 memalloc_noreclaim_restore(noreclaim_flag);
5265}
5266
f4e63525 5267static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
b5cdae32 5268{
58038695 5269 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
b5cdae32
DM
5270 struct bpf_prog *new = xdp->prog;
5271 int ret = 0;
5272
5273 switch (xdp->command) {
58038695 5274 case XDP_SETUP_PROG:
b5cdae32
DM
5275 rcu_assign_pointer(dev->xdp_prog, new);
5276 if (old)
5277 bpf_prog_put(old);
5278
5279 if (old && !new) {
02786475 5280 static_branch_dec(&generic_xdp_needed_key);
b5cdae32 5281 } else if (new && !old) {
02786475 5282 static_branch_inc(&generic_xdp_needed_key);
b5cdae32 5283 dev_disable_lro(dev);
56f5aa77 5284 dev_disable_gro_hw(dev);
b5cdae32
DM
5285 }
5286 break;
b5cdae32
DM
5287
5288 case XDP_QUERY_PROG:
58038695 5289 xdp->prog_id = old ? old->aux->id : 0;
b5cdae32
DM
5290 break;
5291
5292 default:
5293 ret = -EINVAL;
5294 break;
5295 }
5296
5297 return ret;
5298}
5299
ae78dbfa 5300static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 5301{
2c17d27c
JA
5302 int ret;
5303
588f0330 5304 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 5305
c1f19b51
RC
5306 if (skb_defer_rx_timestamp(skb))
5307 return NET_RX_SUCCESS;
5308
bbbe211c 5309 rcu_read_lock();
df334545 5310#ifdef CONFIG_RPS
dc05360f 5311 if (static_branch_unlikely(&rps_needed)) {
3b098e2d 5312 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 5313 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 5314
3b098e2d
ED
5315 if (cpu >= 0) {
5316 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5317 rcu_read_unlock();
adc9300e 5318 return ret;
3b098e2d 5319 }
fec5e652 5320 }
1e94d72f 5321#endif
2c17d27c
JA
5322 ret = __netif_receive_skb(skb);
5323 rcu_read_unlock();
5324 return ret;
0a9627f2 5325}
ae78dbfa 5326
7da517a3
EC
5327static void netif_receive_skb_list_internal(struct list_head *head)
5328{
7da517a3 5329 struct sk_buff *skb, *next;
8c057efa 5330 struct list_head sublist;
7da517a3 5331
8c057efa 5332 INIT_LIST_HEAD(&sublist);
7da517a3
EC
5333 list_for_each_entry_safe(skb, next, head, list) {
5334 net_timestamp_check(netdev_tstamp_prequeue, skb);
22f6bbb7 5335 skb_list_del_init(skb);
8c057efa
EC
5336 if (!skb_defer_rx_timestamp(skb))
5337 list_add_tail(&skb->list, &sublist);
7da517a3 5338 }
8c057efa 5339 list_splice_init(&sublist, head);
7da517a3 5340
7da517a3
EC
5341 rcu_read_lock();
5342#ifdef CONFIG_RPS
dc05360f 5343 if (static_branch_unlikely(&rps_needed)) {
7da517a3
EC
5344 list_for_each_entry_safe(skb, next, head, list) {
5345 struct rps_dev_flow voidflow, *rflow = &voidflow;
5346 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5347
5348 if (cpu >= 0) {
8c057efa 5349 /* Will be handled, remove from list */
22f6bbb7 5350 skb_list_del_init(skb);
8c057efa 5351 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
7da517a3
EC
5352 }
5353 }
5354 }
5355#endif
5356 __netif_receive_skb_list(head);
5357 rcu_read_unlock();
5358}
5359
ae78dbfa
BH
5360/**
5361 * netif_receive_skb - process receive buffer from network
5362 * @skb: buffer to process
5363 *
5364 * netif_receive_skb() is the main receive data processing function.
5365 * It always succeeds. The buffer may be dropped during processing
5366 * for congestion control or by the protocol layers.
5367 *
5368 * This function may only be called from softirq context and interrupts
5369 * should be enabled.
5370 *
5371 * Return values (usually ignored):
5372 * NET_RX_SUCCESS: no congestion
5373 * NET_RX_DROP: packet was dropped
5374 */
04eb4489 5375int netif_receive_skb(struct sk_buff *skb)
ae78dbfa 5376{
b0e3f1bd
GB
5377 int ret;
5378
ae78dbfa
BH
5379 trace_netif_receive_skb_entry(skb);
5380
b0e3f1bd
GB
5381 ret = netif_receive_skb_internal(skb);
5382 trace_netif_receive_skb_exit(ret);
5383
5384 return ret;
ae78dbfa 5385}
04eb4489 5386EXPORT_SYMBOL(netif_receive_skb);
1da177e4 5387
f6ad8c1b
EC
5388/**
5389 * netif_receive_skb_list - process many receive buffers from network
5390 * @head: list of skbs to process.
5391 *
7da517a3
EC
5392 * Since return value of netif_receive_skb() is normally ignored, and
5393 * wouldn't be meaningful for a list, this function returns void.
f6ad8c1b
EC
5394 *
5395 * This function may only be called from softirq context and interrupts
5396 * should be enabled.
5397 */
5398void netif_receive_skb_list(struct list_head *head)
5399{
7da517a3 5400 struct sk_buff *skb;
f6ad8c1b 5401
b9f463d6
EC
5402 if (list_empty(head))
5403 return;
b0e3f1bd
GB
5404 if (trace_netif_receive_skb_list_entry_enabled()) {
5405 list_for_each_entry(skb, head, list)
5406 trace_netif_receive_skb_list_entry(skb);
5407 }
7da517a3 5408 netif_receive_skb_list_internal(head);
b0e3f1bd 5409 trace_netif_receive_skb_list_exit(0);
f6ad8c1b
EC
5410}
5411EXPORT_SYMBOL(netif_receive_skb_list);
5412
41852497 5413DEFINE_PER_CPU(struct work_struct, flush_works);
145dd5f9
PA
5414
5415/* Network device is going away, flush any packets still pending */
5416static void flush_backlog(struct work_struct *work)
6e583ce5 5417{
6e583ce5 5418 struct sk_buff *skb, *tmp;
145dd5f9
PA
5419 struct softnet_data *sd;
5420
5421 local_bh_disable();
5422 sd = this_cpu_ptr(&softnet_data);
6e583ce5 5423
145dd5f9 5424 local_irq_disable();
e36fa2f7 5425 rps_lock(sd);
6e7676c1 5426 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
41852497 5427 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
e36fa2f7 5428 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 5429 kfree_skb(skb);
76cc8b13 5430 input_queue_head_incr(sd);
6e583ce5 5431 }
6e7676c1 5432 }
e36fa2f7 5433 rps_unlock(sd);
145dd5f9 5434 local_irq_enable();
6e7676c1
CG
5435
5436 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
41852497 5437 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
6e7676c1
CG
5438 __skb_unlink(skb, &sd->process_queue);
5439 kfree_skb(skb);
76cc8b13 5440 input_queue_head_incr(sd);
6e7676c1
CG
5441 }
5442 }
145dd5f9
PA
5443 local_bh_enable();
5444}
5445
41852497 5446static void flush_all_backlogs(void)
145dd5f9
PA
5447{
5448 unsigned int cpu;
5449
5450 get_online_cpus();
5451
41852497
ED
5452 for_each_online_cpu(cpu)
5453 queue_work_on(cpu, system_highpri_wq,
5454 per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
5455
5456 for_each_online_cpu(cpu)
41852497 5457 flush_work(per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
5458
5459 put_online_cpus();
6e583ce5
SH
5460}
5461
aaa5d90b
PA
5462INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
5463INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
d565b0a1
HX
5464static int napi_gro_complete(struct sk_buff *skb)
5465{
22061d80 5466 struct packet_offload *ptype;
d565b0a1 5467 __be16 type = skb->protocol;
22061d80 5468 struct list_head *head = &offload_base;
d565b0a1
HX
5469 int err = -ENOENT;
5470
c3c7c254
ED
5471 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5472
fc59f9a3
HX
5473 if (NAPI_GRO_CB(skb)->count == 1) {
5474 skb_shinfo(skb)->gso_size = 0;
d565b0a1 5475 goto out;
fc59f9a3 5476 }
d565b0a1
HX
5477
5478 rcu_read_lock();
5479 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 5480 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
5481 continue;
5482
aaa5d90b
PA
5483 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5484 ipv6_gro_complete, inet_gro_complete,
5485 skb, 0);
d565b0a1
HX
5486 break;
5487 }
5488 rcu_read_unlock();
5489
5490 if (err) {
5491 WARN_ON(&ptype->list == head);
5492 kfree_skb(skb);
5493 return NET_RX_SUCCESS;
5494 }
5495
5496out:
ae78dbfa 5497 return netif_receive_skb_internal(skb);
d565b0a1
HX
5498}
5499
6312fe77 5500static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
07d78363 5501 bool flush_old)
d565b0a1 5502{
6312fe77 5503 struct list_head *head = &napi->gro_hash[index].list;
d4546c25 5504 struct sk_buff *skb, *p;
2e71a6f8 5505
07d78363 5506 list_for_each_entry_safe_reverse(skb, p, head, list) {
2e71a6f8
ED
5507 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5508 return;
992cba7e 5509 skb_list_del_init(skb);
d565b0a1 5510 napi_gro_complete(skb);
6312fe77 5511 napi->gro_hash[index].count--;
d565b0a1 5512 }
d9f37d01
LR
5513
5514 if (!napi->gro_hash[index].count)
5515 __clear_bit(index, &napi->gro_bitmask);
d565b0a1 5516}
07d78363 5517
6312fe77 5518/* napi->gro_hash[].list contains packets ordered by age.
07d78363
DM
5519 * youngest packets at the head of it.
5520 * Complete skbs in reverse order to reduce latencies.
5521 */
5522void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5523{
42519ede
ED
5524 unsigned long bitmask = napi->gro_bitmask;
5525 unsigned int i, base = ~0U;
07d78363 5526
42519ede
ED
5527 while ((i = ffs(bitmask)) != 0) {
5528 bitmask >>= i;
5529 base += i;
5530 __napi_gro_flush_chain(napi, base, flush_old);
d9f37d01 5531 }
07d78363 5532}
86cac58b 5533EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 5534
07d78363
DM
5535static struct list_head *gro_list_prepare(struct napi_struct *napi,
5536 struct sk_buff *skb)
89c5fa33 5537{
89c5fa33 5538 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 5539 u32 hash = skb_get_hash_raw(skb);
07d78363 5540 struct list_head *head;
d4546c25 5541 struct sk_buff *p;
89c5fa33 5542
6312fe77 5543 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
07d78363 5544 list_for_each_entry(p, head, list) {
89c5fa33
ED
5545 unsigned long diffs;
5546
0b4cec8c
TH
5547 NAPI_GRO_CB(p)->flush = 0;
5548
5549 if (hash != skb_get_hash_raw(p)) {
5550 NAPI_GRO_CB(p)->same_flow = 0;
5551 continue;
5552 }
5553
89c5fa33 5554 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
b1817524
MM
5555 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5556 if (skb_vlan_tag_present(p))
5557 diffs |= p->vlan_tci ^ skb->vlan_tci;
ce87fc6c 5558 diffs |= skb_metadata_dst_cmp(p, skb);
de8f3a83 5559 diffs |= skb_metadata_differs(p, skb);
89c5fa33
ED
5560 if (maclen == ETH_HLEN)
5561 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 5562 skb_mac_header(skb));
89c5fa33
ED
5563 else if (!diffs)
5564 diffs = memcmp(skb_mac_header(p),
a50e233c 5565 skb_mac_header(skb),
89c5fa33
ED
5566 maclen);
5567 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33 5568 }
07d78363
DM
5569
5570 return head;
89c5fa33
ED
5571}
5572
299603e8
JC
5573static void skb_gro_reset_offset(struct sk_buff *skb)
5574{
5575 const struct skb_shared_info *pinfo = skb_shinfo(skb);
5576 const skb_frag_t *frag0 = &pinfo->frags[0];
5577
5578 NAPI_GRO_CB(skb)->data_offset = 0;
5579 NAPI_GRO_CB(skb)->frag0 = NULL;
5580 NAPI_GRO_CB(skb)->frag0_len = 0;
5581
5582 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
5583 pinfo->nr_frags &&
5584 !PageHighMem(skb_frag_page(frag0))) {
5585 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
7cfd5fd5
ED
5586 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5587 skb_frag_size(frag0),
5588 skb->end - skb->tail);
89c5fa33
ED
5589 }
5590}
5591
a50e233c
ED
5592static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5593{
5594 struct skb_shared_info *pinfo = skb_shinfo(skb);
5595
5596 BUG_ON(skb->end - skb->tail < grow);
5597
5598 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5599
5600 skb->data_len -= grow;
5601 skb->tail += grow;
5602
b54c9d5b 5603 skb_frag_off_add(&pinfo->frags[0], grow);
a50e233c
ED
5604 skb_frag_size_sub(&pinfo->frags[0], grow);
5605
5606 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5607 skb_frag_unref(skb, 0);
5608 memmove(pinfo->frags, pinfo->frags + 1,
5609 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5610 }
5611}
5612
6312fe77 5613static void gro_flush_oldest(struct list_head *head)
07d78363 5614{
6312fe77 5615 struct sk_buff *oldest;
07d78363 5616
6312fe77 5617 oldest = list_last_entry(head, struct sk_buff, list);
07d78363 5618
6312fe77 5619 /* We are called with head length >= MAX_GRO_SKBS, so this is
07d78363
DM
5620 * impossible.
5621 */
5622 if (WARN_ON_ONCE(!oldest))
5623 return;
5624
d9f37d01
LR
5625 /* Do not adjust napi->gro_hash[].count, caller is adding a new
5626 * SKB to the chain.
07d78363 5627 */
ece23711 5628 skb_list_del_init(oldest);
07d78363
DM
5629 napi_gro_complete(oldest);
5630}
5631
aaa5d90b
PA
5632INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
5633 struct sk_buff *));
5634INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
5635 struct sk_buff *));
bb728820 5636static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1 5637{
6312fe77 5638 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
d4546c25 5639 struct list_head *head = &offload_base;
22061d80 5640 struct packet_offload *ptype;
d565b0a1 5641 __be16 type = skb->protocol;
07d78363 5642 struct list_head *gro_head;
d4546c25 5643 struct sk_buff *pp = NULL;
5b252f0c 5644 enum gro_result ret;
d4546c25 5645 int same_flow;
a50e233c 5646 int grow;
d565b0a1 5647
b5cdae32 5648 if (netif_elide_gro(skb->dev))
d565b0a1
HX
5649 goto normal;
5650
07d78363 5651 gro_head = gro_list_prepare(napi, skb);
89c5fa33 5652
d565b0a1
HX
5653 rcu_read_lock();
5654 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 5655 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
5656 continue;
5657
86911732 5658 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 5659 skb_reset_mac_len(skb);
d565b0a1 5660 NAPI_GRO_CB(skb)->same_flow = 0;
d61d072e 5661 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5d38a079 5662 NAPI_GRO_CB(skb)->free = 0;
fac8e0f5 5663 NAPI_GRO_CB(skb)->encap_mark = 0;
fcd91dd4 5664 NAPI_GRO_CB(skb)->recursion_counter = 0;
a0ca153f 5665 NAPI_GRO_CB(skb)->is_fou = 0;
1530545e 5666 NAPI_GRO_CB(skb)->is_atomic = 1;
15e2396d 5667 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 5668
662880f4
TH
5669 /* Setup for GRO checksum validation */
5670 switch (skb->ip_summed) {
5671 case CHECKSUM_COMPLETE:
5672 NAPI_GRO_CB(skb)->csum = skb->csum;
5673 NAPI_GRO_CB(skb)->csum_valid = 1;
5674 NAPI_GRO_CB(skb)->csum_cnt = 0;
5675 break;
5676 case CHECKSUM_UNNECESSARY:
5677 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5678 NAPI_GRO_CB(skb)->csum_valid = 0;
5679 break;
5680 default:
5681 NAPI_GRO_CB(skb)->csum_cnt = 0;
5682 NAPI_GRO_CB(skb)->csum_valid = 0;
5683 }
d565b0a1 5684
aaa5d90b
PA
5685 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
5686 ipv6_gro_receive, inet_gro_receive,
5687 gro_head, skb);
d565b0a1
HX
5688 break;
5689 }
5690 rcu_read_unlock();
5691
5692 if (&ptype->list == head)
5693 goto normal;
5694
25393d3f
SK
5695 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
5696 ret = GRO_CONSUMED;
5697 goto ok;
5698 }
5699
0da2afd5 5700 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 5701 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 5702
d565b0a1 5703 if (pp) {
992cba7e 5704 skb_list_del_init(pp);
d4546c25 5705 napi_gro_complete(pp);
6312fe77 5706 napi->gro_hash[hash].count--;
d565b0a1
HX
5707 }
5708
0da2afd5 5709 if (same_flow)
d565b0a1
HX
5710 goto ok;
5711
600adc18 5712 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 5713 goto normal;
d565b0a1 5714
6312fe77
LR
5715 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
5716 gro_flush_oldest(gro_head);
600adc18 5717 } else {
6312fe77 5718 napi->gro_hash[hash].count++;
600adc18 5719 }
d565b0a1 5720 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 5721 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 5722 NAPI_GRO_CB(skb)->last = skb;
86911732 5723 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
07d78363 5724 list_add(&skb->list, gro_head);
5d0d9be8 5725 ret = GRO_HELD;
d565b0a1 5726
ad0f9904 5727pull:
a50e233c
ED
5728 grow = skb_gro_offset(skb) - skb_headlen(skb);
5729 if (grow > 0)
5730 gro_pull_from_frag0(skb, grow);
d565b0a1 5731ok:
d9f37d01
LR
5732 if (napi->gro_hash[hash].count) {
5733 if (!test_bit(hash, &napi->gro_bitmask))
5734 __set_bit(hash, &napi->gro_bitmask);
5735 } else if (test_bit(hash, &napi->gro_bitmask)) {
5736 __clear_bit(hash, &napi->gro_bitmask);
5737 }
5738
5d0d9be8 5739 return ret;
d565b0a1
HX
5740
5741normal:
ad0f9904
HX
5742 ret = GRO_NORMAL;
5743 goto pull;
5d38a079 5744}
96e93eab 5745
bf5a755f
JC
5746struct packet_offload *gro_find_receive_by_type(__be16 type)
5747{
5748 struct list_head *offload_head = &offload_base;
5749 struct packet_offload *ptype;
5750
5751 list_for_each_entry_rcu(ptype, offload_head, list) {
5752 if (ptype->type != type || !ptype->callbacks.gro_receive)
5753 continue;
5754 return ptype;
5755 }
5756 return NULL;
5757}
e27a2f83 5758EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
5759
5760struct packet_offload *gro_find_complete_by_type(__be16 type)
5761{
5762 struct list_head *offload_head = &offload_base;
5763 struct packet_offload *ptype;
5764
5765 list_for_each_entry_rcu(ptype, offload_head, list) {
5766 if (ptype->type != type || !ptype->callbacks.gro_complete)
5767 continue;
5768 return ptype;
5769 }
5770 return NULL;
5771}
e27a2f83 5772EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 5773
e44699d2
MK
5774static void napi_skb_free_stolen_head(struct sk_buff *skb)
5775{
5776 skb_dst_drop(skb);
174e2381 5777 skb_ext_put(skb);
e44699d2
MK
5778 kmem_cache_free(skbuff_head_cache, skb);
5779}
5780
bb728820 5781static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 5782{
5d0d9be8
HX
5783 switch (ret) {
5784 case GRO_NORMAL:
ae78dbfa 5785 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
5786 ret = GRO_DROP;
5787 break;
5d38a079 5788
5d0d9be8 5789 case GRO_DROP:
5d38a079
HX
5790 kfree_skb(skb);
5791 break;
5b252f0c 5792
daa86548 5793 case GRO_MERGED_FREE:
e44699d2
MK
5794 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5795 napi_skb_free_stolen_head(skb);
5796 else
d7e8883c 5797 __kfree_skb(skb);
daa86548
ED
5798 break;
5799
5b252f0c
BH
5800 case GRO_HELD:
5801 case GRO_MERGED:
25393d3f 5802 case GRO_CONSUMED:
5b252f0c 5803 break;
5d38a079
HX
5804 }
5805
c7c4b3b6 5806 return ret;
5d0d9be8 5807}
5d0d9be8 5808
c7c4b3b6 5809gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 5810{
b0e3f1bd
GB
5811 gro_result_t ret;
5812
93f93a44 5813 skb_mark_napi_id(skb, napi);
ae78dbfa 5814 trace_napi_gro_receive_entry(skb);
86911732 5815
a50e233c
ED
5816 skb_gro_reset_offset(skb);
5817
b0e3f1bd
GB
5818 ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
5819 trace_napi_gro_receive_exit(ret);
5820
5821 return ret;
d565b0a1
HX
5822}
5823EXPORT_SYMBOL(napi_gro_receive);
5824
d0c2b0d2 5825static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 5826{
93a35f59
ED
5827 if (unlikely(skb->pfmemalloc)) {
5828 consume_skb(skb);
5829 return;
5830 }
96e93eab 5831 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
5832 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5833 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
b1817524 5834 __vlan_hwaccel_clear_tag(skb);
66c46d74 5835 skb->dev = napi->dev;
6d152e23 5836 skb->skb_iif = 0;
33d9a2c7
ED
5837
5838 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
5839 skb->pkt_type = PACKET_HOST;
5840
c3caf119
JC
5841 skb->encapsulation = 0;
5842 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 5843 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
174e2381 5844 skb_ext_reset(skb);
96e93eab
HX
5845
5846 napi->skb = skb;
5847}
96e93eab 5848
76620aaf 5849struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 5850{
5d38a079 5851 struct sk_buff *skb = napi->skb;
5d38a079
HX
5852
5853 if (!skb) {
fd11a83d 5854 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
e2f9dc3b
ED
5855 if (skb) {
5856 napi->skb = skb;
5857 skb_mark_napi_id(skb, napi);
5858 }
80595d59 5859 }
96e93eab
HX
5860 return skb;
5861}
76620aaf 5862EXPORT_SYMBOL(napi_get_frags);
96e93eab 5863
323ebb61
EC
5864/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5865static void gro_normal_list(struct napi_struct *napi)
5866{
5867 if (!napi->rx_count)
5868 return;
5869 netif_receive_skb_list_internal(&napi->rx_list);
5870 INIT_LIST_HEAD(&napi->rx_list);
5871 napi->rx_count = 0;
5872}
5873
5874/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5875 * pass the whole batch up to the stack.
5876 */
5877static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
5878{
5879 list_add_tail(&skb->list, &napi->rx_list);
5880 if (++napi->rx_count >= gro_normal_batch)
5881 gro_normal_list(napi);
5882}
5883
a50e233c
ED
5884static gro_result_t napi_frags_finish(struct napi_struct *napi,
5885 struct sk_buff *skb,
5886 gro_result_t ret)
96e93eab 5887{
5d0d9be8
HX
5888 switch (ret) {
5889 case GRO_NORMAL:
a50e233c
ED
5890 case GRO_HELD:
5891 __skb_push(skb, ETH_HLEN);
5892 skb->protocol = eth_type_trans(skb, skb->dev);
323ebb61
EC
5893 if (ret == GRO_NORMAL)
5894 gro_normal_one(napi, skb);
86911732 5895 break;
5d38a079 5896
5d0d9be8 5897 case GRO_DROP:
5d0d9be8
HX
5898 napi_reuse_skb(napi, skb);
5899 break;
5b252f0c 5900
e44699d2
MK
5901 case GRO_MERGED_FREE:
5902 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5903 napi_skb_free_stolen_head(skb);
5904 else
5905 napi_reuse_skb(napi, skb);
5906 break;
5907
5b252f0c 5908 case GRO_MERGED:
25393d3f 5909 case GRO_CONSUMED:
5b252f0c 5910 break;
5d0d9be8 5911 }
5d38a079 5912
c7c4b3b6 5913 return ret;
5d38a079 5914}
5d0d9be8 5915
a50e233c
ED
5916/* Upper GRO stack assumes network header starts at gro_offset=0
5917 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5918 * We copy ethernet header into skb->data to have a common layout.
5919 */
4adb9c4a 5920static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
5921{
5922 struct sk_buff *skb = napi->skb;
a50e233c
ED
5923 const struct ethhdr *eth;
5924 unsigned int hlen = sizeof(*eth);
76620aaf
HX
5925
5926 napi->skb = NULL;
5927
a50e233c
ED
5928 skb_reset_mac_header(skb);
5929 skb_gro_reset_offset(skb);
5930
a50e233c
ED
5931 if (unlikely(skb_gro_header_hard(skb, hlen))) {
5932 eth = skb_gro_header_slow(skb, hlen, 0);
5933 if (unlikely(!eth)) {
4da46ceb
AC
5934 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5935 __func__, napi->dev->name);
a50e233c
ED
5936 napi_reuse_skb(napi, skb);
5937 return NULL;
5938 }
5939 } else {
a4270d67 5940 eth = (const struct ethhdr *)skb->data;
a50e233c
ED
5941 gro_pull_from_frag0(skb, hlen);
5942 NAPI_GRO_CB(skb)->frag0 += hlen;
5943 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 5944 }
a50e233c
ED
5945 __skb_pull(skb, hlen);
5946
5947 /*
5948 * This works because the only protocols we care about don't require
5949 * special handling.
5950 * We'll fix it up properly in napi_frags_finish()
5951 */
5952 skb->protocol = eth->h_proto;
76620aaf 5953
76620aaf
HX
5954 return skb;
5955}
76620aaf 5956
c7c4b3b6 5957gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 5958{
b0e3f1bd 5959 gro_result_t ret;
76620aaf 5960 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
5961
5962 if (!skb)
c7c4b3b6 5963 return GRO_DROP;
5d0d9be8 5964
ae78dbfa
BH
5965 trace_napi_gro_frags_entry(skb);
5966
b0e3f1bd
GB
5967 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5968 trace_napi_gro_frags_exit(ret);
5969
5970 return ret;
5d0d9be8 5971}
5d38a079
HX
5972EXPORT_SYMBOL(napi_gro_frags);
5973
573e8fca
TH
5974/* Compute the checksum from gro_offset and return the folded value
5975 * after adding in any pseudo checksum.
5976 */
5977__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5978{
5979 __wsum wsum;
5980 __sum16 sum;
5981
5982 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5983
5984 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5985 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
14641931 5986 /* See comments in __skb_checksum_complete(). */
573e8fca
TH
5987 if (likely(!sum)) {
5988 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5989 !skb->csum_complete_sw)
7fe50ac8 5990 netdev_rx_csum_fault(skb->dev, skb);
573e8fca
TH
5991 }
5992
5993 NAPI_GRO_CB(skb)->csum = wsum;
5994 NAPI_GRO_CB(skb)->csum_valid = 1;
5995
5996 return sum;
5997}
5998EXPORT_SYMBOL(__skb_gro_checksum_complete);
5999
773fc8f6 6000static void net_rps_send_ipi(struct softnet_data *remsd)
6001{
6002#ifdef CONFIG_RPS
6003 while (remsd) {
6004 struct softnet_data *next = remsd->rps_ipi_next;
6005
6006 if (cpu_online(remsd->cpu))
6007 smp_call_function_single_async(remsd->cpu, &remsd->csd);
6008 remsd = next;
6009 }
6010#endif
6011}
6012
e326bed2 6013/*
855abcf0 6014 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
6015 * Note: called with local irq disabled, but exits with local irq enabled.
6016 */
6017static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6018{
6019#ifdef CONFIG_RPS
6020 struct softnet_data *remsd = sd->rps_ipi_list;
6021
6022 if (remsd) {
6023 sd->rps_ipi_list = NULL;
6024
6025 local_irq_enable();
6026
6027 /* Send pending IPI's to kick RPS processing on remote cpus. */
773fc8f6 6028 net_rps_send_ipi(remsd);
e326bed2
ED
6029 } else
6030#endif
6031 local_irq_enable();
6032}
6033
d75b1ade
ED
6034static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6035{
6036#ifdef CONFIG_RPS
6037 return sd->rps_ipi_list != NULL;
6038#else
6039 return false;
6040#endif
6041}
6042
bea3348e 6043static int process_backlog(struct napi_struct *napi, int quota)
1da177e4 6044{
eecfd7c4 6045 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
145dd5f9
PA
6046 bool again = true;
6047 int work = 0;
1da177e4 6048
e326bed2
ED
6049 /* Check if we have pending ipi, its better to send them now,
6050 * not waiting net_rx_action() end.
6051 */
d75b1ade 6052 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
6053 local_irq_disable();
6054 net_rps_action_and_irq_enable(sd);
6055 }
d75b1ade 6056
3d48b53f 6057 napi->weight = dev_rx_weight;
145dd5f9 6058 while (again) {
1da177e4 6059 struct sk_buff *skb;
6e7676c1
CG
6060
6061 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 6062 rcu_read_lock();
6e7676c1 6063 __netif_receive_skb(skb);
2c17d27c 6064 rcu_read_unlock();
76cc8b13 6065 input_queue_head_incr(sd);
145dd5f9 6066 if (++work >= quota)
76cc8b13 6067 return work;
145dd5f9 6068
6e7676c1 6069 }
1da177e4 6070
145dd5f9 6071 local_irq_disable();
e36fa2f7 6072 rps_lock(sd);
11ef7a89 6073 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
6074 /*
6075 * Inline a custom version of __napi_complete().
6076 * only current cpu owns and manipulates this napi,
11ef7a89
TH
6077 * and NAPI_STATE_SCHED is the only possible flag set
6078 * on backlog.
6079 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
6080 * and we dont need an smp_mb() memory barrier.
6081 */
eecfd7c4 6082 napi->state = 0;
145dd5f9
PA
6083 again = false;
6084 } else {
6085 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6086 &sd->process_queue);
bea3348e 6087 }
e36fa2f7 6088 rps_unlock(sd);
145dd5f9 6089 local_irq_enable();
6e7676c1 6090 }
1da177e4 6091
bea3348e
SH
6092 return work;
6093}
1da177e4 6094
bea3348e
SH
6095/**
6096 * __napi_schedule - schedule for receive
c4ea43c5 6097 * @n: entry to schedule
bea3348e 6098 *
bc9ad166
ED
6099 * The entry's receive function will be scheduled to run.
6100 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 6101 */
b5606c2d 6102void __napi_schedule(struct napi_struct *n)
bea3348e
SH
6103{
6104 unsigned long flags;
1da177e4 6105
bea3348e 6106 local_irq_save(flags);
903ceff7 6107 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 6108 local_irq_restore(flags);
1da177e4 6109}
bea3348e
SH
6110EXPORT_SYMBOL(__napi_schedule);
6111
39e6c820
ED
6112/**
6113 * napi_schedule_prep - check if napi can be scheduled
6114 * @n: napi context
6115 *
6116 * Test if NAPI routine is already running, and if not mark
6117 * it as running. This is used as a condition variable
6118 * insure only one NAPI poll instance runs. We also make
6119 * sure there is no pending NAPI disable.
6120 */
6121bool napi_schedule_prep(struct napi_struct *n)
6122{
6123 unsigned long val, new;
6124
6125 do {
6126 val = READ_ONCE(n->state);
6127 if (unlikely(val & NAPIF_STATE_DISABLE))
6128 return false;
6129 new = val | NAPIF_STATE_SCHED;
6130
6131 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6132 * This was suggested by Alexander Duyck, as compiler
6133 * emits better code than :
6134 * if (val & NAPIF_STATE_SCHED)
6135 * new |= NAPIF_STATE_MISSED;
6136 */
6137 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6138 NAPIF_STATE_MISSED;
6139 } while (cmpxchg(&n->state, val, new) != val);
6140
6141 return !(val & NAPIF_STATE_SCHED);
6142}
6143EXPORT_SYMBOL(napi_schedule_prep);
6144
bc9ad166
ED
6145/**
6146 * __napi_schedule_irqoff - schedule for receive
6147 * @n: entry to schedule
6148 *
6149 * Variant of __napi_schedule() assuming hard irqs are masked
6150 */
6151void __napi_schedule_irqoff(struct napi_struct *n)
6152{
6153 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6154}
6155EXPORT_SYMBOL(__napi_schedule_irqoff);
6156
364b6055 6157bool napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1 6158{
39e6c820 6159 unsigned long flags, val, new;
d565b0a1
HX
6160
6161 /*
217f6974
ED
6162 * 1) Don't let napi dequeue from the cpu poll list
6163 * just in case its running on a different cpu.
6164 * 2) If we are busy polling, do nothing here, we have
6165 * the guarantee we will be called later.
d565b0a1 6166 */
217f6974
ED
6167 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6168 NAPIF_STATE_IN_BUSY_POLL)))
364b6055 6169 return false;
d565b0a1 6170
323ebb61
EC
6171 gro_normal_list(n);
6172
d9f37d01 6173 if (n->gro_bitmask) {
3b47d303 6174 unsigned long timeout = 0;
d75b1ade 6175
3b47d303
ED
6176 if (work_done)
6177 timeout = n->dev->gro_flush_timeout;
6178
605108ac
PA
6179 /* When the NAPI instance uses a timeout and keeps postponing
6180 * it, we need to bound somehow the time packets are kept in
6181 * the GRO layer
6182 */
6183 napi_gro_flush(n, !!timeout);
3b47d303
ED
6184 if (timeout)
6185 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6186 HRTIMER_MODE_REL_PINNED);
3b47d303 6187 }
02c1602e 6188 if (unlikely(!list_empty(&n->poll_list))) {
d75b1ade
ED
6189 /* If n->poll_list is not empty, we need to mask irqs */
6190 local_irq_save(flags);
02c1602e 6191 list_del_init(&n->poll_list);
d75b1ade
ED
6192 local_irq_restore(flags);
6193 }
39e6c820
ED
6194
6195 do {
6196 val = READ_ONCE(n->state);
6197
6198 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6199
6200 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
6201
6202 /* If STATE_MISSED was set, leave STATE_SCHED set,
6203 * because we will call napi->poll() one more time.
6204 * This C code was suggested by Alexander Duyck to help gcc.
6205 */
6206 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6207 NAPIF_STATE_SCHED;
6208 } while (cmpxchg(&n->state, val, new) != val);
6209
6210 if (unlikely(val & NAPIF_STATE_MISSED)) {
6211 __napi_schedule(n);
6212 return false;
6213 }
6214
364b6055 6215 return true;
d565b0a1 6216}
3b47d303 6217EXPORT_SYMBOL(napi_complete_done);
d565b0a1 6218
af12fa6e 6219/* must be called under rcu_read_lock(), as we dont take a reference */
02d62e86 6220static struct napi_struct *napi_by_id(unsigned int napi_id)
af12fa6e
ET
6221{
6222 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6223 struct napi_struct *napi;
6224
6225 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6226 if (napi->napi_id == napi_id)
6227 return napi;
6228
6229 return NULL;
6230}
02d62e86
ED
6231
6232#if defined(CONFIG_NET_RX_BUSY_POLL)
217f6974 6233
ce6aea93 6234#define BUSY_POLL_BUDGET 8
217f6974
ED
6235
6236static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
6237{
6238 int rc;
6239
39e6c820
ED
6240 /* Busy polling means there is a high chance device driver hard irq
6241 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6242 * set in napi_schedule_prep().
6243 * Since we are about to call napi->poll() once more, we can safely
6244 * clear NAPI_STATE_MISSED.
6245 *
6246 * Note: x86 could use a single "lock and ..." instruction
6247 * to perform these two clear_bit()
6248 */
6249 clear_bit(NAPI_STATE_MISSED, &napi->state);
217f6974
ED
6250 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6251
6252 local_bh_disable();
6253
6254 /* All we really want here is to re-enable device interrupts.
6255 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6256 */
6257 rc = napi->poll(napi, BUSY_POLL_BUDGET);
323ebb61
EC
6258 /* We can't gro_normal_list() here, because napi->poll() might have
6259 * rearmed the napi (napi_complete_done()) in which case it could
6260 * already be running on another CPU.
6261 */
1e22391e 6262 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
217f6974 6263 netpoll_poll_unlock(have_poll_lock);
323ebb61
EC
6264 if (rc == BUSY_POLL_BUDGET) {
6265 /* As the whole budget was spent, we still own the napi so can
6266 * safely handle the rx_list.
6267 */
6268 gro_normal_list(napi);
217f6974 6269 __napi_schedule(napi);
323ebb61 6270 }
217f6974 6271 local_bh_enable();
217f6974
ED
6272}
6273
7db6b048
SS
6274void napi_busy_loop(unsigned int napi_id,
6275 bool (*loop_end)(void *, unsigned long),
6276 void *loop_end_arg)
02d62e86 6277{
7db6b048 6278 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
217f6974 6279 int (*napi_poll)(struct napi_struct *napi, int budget);
217f6974 6280 void *have_poll_lock = NULL;
02d62e86 6281 struct napi_struct *napi;
217f6974
ED
6282
6283restart:
217f6974 6284 napi_poll = NULL;
02d62e86 6285
2a028ecb 6286 rcu_read_lock();
02d62e86 6287
545cd5e5 6288 napi = napi_by_id(napi_id);
02d62e86
ED
6289 if (!napi)
6290 goto out;
6291
217f6974
ED
6292 preempt_disable();
6293 for (;;) {
2b5cd0df
AD
6294 int work = 0;
6295
2a028ecb 6296 local_bh_disable();
217f6974
ED
6297 if (!napi_poll) {
6298 unsigned long val = READ_ONCE(napi->state);
6299
6300 /* If multiple threads are competing for this napi,
6301 * we avoid dirtying napi->state as much as we can.
6302 */
6303 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6304 NAPIF_STATE_IN_BUSY_POLL))
6305 goto count;
6306 if (cmpxchg(&napi->state, val,
6307 val | NAPIF_STATE_IN_BUSY_POLL |
6308 NAPIF_STATE_SCHED) != val)
6309 goto count;
6310 have_poll_lock = netpoll_poll_lock(napi);
6311 napi_poll = napi->poll;
6312 }
2b5cd0df
AD
6313 work = napi_poll(napi, BUSY_POLL_BUDGET);
6314 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
323ebb61 6315 gro_normal_list(napi);
217f6974 6316count:
2b5cd0df 6317 if (work > 0)
7db6b048 6318 __NET_ADD_STATS(dev_net(napi->dev),
2b5cd0df 6319 LINUX_MIB_BUSYPOLLRXPACKETS, work);
2a028ecb 6320 local_bh_enable();
02d62e86 6321
7db6b048 6322 if (!loop_end || loop_end(loop_end_arg, start_time))
217f6974 6323 break;
02d62e86 6324
217f6974
ED
6325 if (unlikely(need_resched())) {
6326 if (napi_poll)
6327 busy_poll_stop(napi, have_poll_lock);
6328 preempt_enable();
6329 rcu_read_unlock();
6330 cond_resched();
7db6b048 6331 if (loop_end(loop_end_arg, start_time))
2b5cd0df 6332 return;
217f6974
ED
6333 goto restart;
6334 }
6cdf89b1 6335 cpu_relax();
217f6974
ED
6336 }
6337 if (napi_poll)
6338 busy_poll_stop(napi, have_poll_lock);
6339 preempt_enable();
02d62e86 6340out:
2a028ecb 6341 rcu_read_unlock();
02d62e86 6342}
7db6b048 6343EXPORT_SYMBOL(napi_busy_loop);
02d62e86
ED
6344
6345#endif /* CONFIG_NET_RX_BUSY_POLL */
af12fa6e 6346
149d6ad8 6347static void napi_hash_add(struct napi_struct *napi)
af12fa6e 6348{
d64b5e85
ED
6349 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
6350 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
52bd2d62 6351 return;
af12fa6e 6352
52bd2d62 6353 spin_lock(&napi_hash_lock);
af12fa6e 6354
545cd5e5 6355 /* 0..NR_CPUS range is reserved for sender_cpu use */
52bd2d62 6356 do {
545cd5e5
AD
6357 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6358 napi_gen_id = MIN_NAPI_ID;
52bd2d62
ED
6359 } while (napi_by_id(napi_gen_id));
6360 napi->napi_id = napi_gen_id;
af12fa6e 6361
52bd2d62
ED
6362 hlist_add_head_rcu(&napi->napi_hash_node,
6363 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
af12fa6e 6364
52bd2d62 6365 spin_unlock(&napi_hash_lock);
af12fa6e 6366}
af12fa6e
ET
6367
6368/* Warning : caller is responsible to make sure rcu grace period
6369 * is respected before freeing memory containing @napi
6370 */
34cbe27e 6371bool napi_hash_del(struct napi_struct *napi)
af12fa6e 6372{
34cbe27e
ED
6373 bool rcu_sync_needed = false;
6374
af12fa6e
ET
6375 spin_lock(&napi_hash_lock);
6376
34cbe27e
ED
6377 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
6378 rcu_sync_needed = true;
af12fa6e 6379 hlist_del_rcu(&napi->napi_hash_node);
34cbe27e 6380 }
af12fa6e 6381 spin_unlock(&napi_hash_lock);
34cbe27e 6382 return rcu_sync_needed;
af12fa6e
ET
6383}
6384EXPORT_SYMBOL_GPL(napi_hash_del);
6385
3b47d303
ED
6386static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6387{
6388 struct napi_struct *napi;
6389
6390 napi = container_of(timer, struct napi_struct, timer);
39e6c820
ED
6391
6392 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6393 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6394 */
d9f37d01 6395 if (napi->gro_bitmask && !napi_disable_pending(napi) &&
39e6c820
ED
6396 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6397 __napi_schedule_irqoff(napi);
3b47d303
ED
6398
6399 return HRTIMER_NORESTART;
6400}
6401
7c4ec749 6402static void init_gro_hash(struct napi_struct *napi)
d565b0a1 6403{
07d78363
DM
6404 int i;
6405
6312fe77
LR
6406 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6407 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6408 napi->gro_hash[i].count = 0;
6409 }
7c4ec749
DM
6410 napi->gro_bitmask = 0;
6411}
6412
6413void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6414 int (*poll)(struct napi_struct *, int), int weight)
6415{
6416 INIT_LIST_HEAD(&napi->poll_list);
6417 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6418 napi->timer.function = napi_watchdog;
6419 init_gro_hash(napi);
5d38a079 6420 napi->skb = NULL;
323ebb61
EC
6421 INIT_LIST_HEAD(&napi->rx_list);
6422 napi->rx_count = 0;
d565b0a1 6423 napi->poll = poll;
82dc3c63 6424 if (weight > NAPI_POLL_WEIGHT)
bf29e9e9
QC
6425 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6426 weight);
d565b0a1
HX
6427 napi->weight = weight;
6428 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 6429 napi->dev = dev;
5d38a079 6430#ifdef CONFIG_NETPOLL
d565b0a1
HX
6431 napi->poll_owner = -1;
6432#endif
6433 set_bit(NAPI_STATE_SCHED, &napi->state);
93d05d4a 6434 napi_hash_add(napi);
d565b0a1
HX
6435}
6436EXPORT_SYMBOL(netif_napi_add);
6437
3b47d303
ED
6438void napi_disable(struct napi_struct *n)
6439{
6440 might_sleep();
6441 set_bit(NAPI_STATE_DISABLE, &n->state);
6442
6443 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6444 msleep(1);
2d8bff12
NH
6445 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6446 msleep(1);
3b47d303
ED
6447
6448 hrtimer_cancel(&n->timer);
6449
6450 clear_bit(NAPI_STATE_DISABLE, &n->state);
6451}
6452EXPORT_SYMBOL(napi_disable);
6453
07d78363 6454static void flush_gro_hash(struct napi_struct *napi)
d4546c25 6455{
07d78363 6456 int i;
d4546c25 6457
07d78363
DM
6458 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6459 struct sk_buff *skb, *n;
6460
6312fe77 6461 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
07d78363 6462 kfree_skb(skb);
6312fe77 6463 napi->gro_hash[i].count = 0;
07d78363 6464 }
d4546c25
DM
6465}
6466
93d05d4a 6467/* Must be called in process context */
d565b0a1
HX
6468void netif_napi_del(struct napi_struct *napi)
6469{
93d05d4a
ED
6470 might_sleep();
6471 if (napi_hash_del(napi))
6472 synchronize_net();
d7b06636 6473 list_del_init(&napi->dev_list);
76620aaf 6474 napi_free_frags(napi);
d565b0a1 6475
07d78363 6476 flush_gro_hash(napi);
d9f37d01 6477 napi->gro_bitmask = 0;
d565b0a1
HX
6478}
6479EXPORT_SYMBOL(netif_napi_del);
6480
726ce70e
HX
6481static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6482{
6483 void *have;
6484 int work, weight;
6485
6486 list_del_init(&n->poll_list);
6487
6488 have = netpoll_poll_lock(n);
6489
6490 weight = n->weight;
6491
6492 /* This NAPI_STATE_SCHED test is for avoiding a race
6493 * with netpoll's poll_napi(). Only the entity which
6494 * obtains the lock and sees NAPI_STATE_SCHED set will
6495 * actually make the ->poll() call. Therefore we avoid
6496 * accidentally calling ->poll() when NAPI is not scheduled.
6497 */
6498 work = 0;
6499 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6500 work = n->poll(n, weight);
1db19db7 6501 trace_napi_poll(n, work, weight);
726ce70e
HX
6502 }
6503
6504 WARN_ON_ONCE(work > weight);
6505
6506 if (likely(work < weight))
6507 goto out_unlock;
6508
6509 /* Drivers must not modify the NAPI state if they
6510 * consume the entire weight. In such cases this code
6511 * still "owns" the NAPI instance and therefore can
6512 * move the instance around on the list at-will.
6513 */
6514 if (unlikely(napi_disable_pending(n))) {
6515 napi_complete(n);
6516 goto out_unlock;
6517 }
6518
323ebb61
EC
6519 gro_normal_list(n);
6520
d9f37d01 6521 if (n->gro_bitmask) {
726ce70e
HX
6522 /* flush too old packets
6523 * If HZ < 1000, flush all packets.
6524 */
6525 napi_gro_flush(n, HZ >= 1000);
6526 }
6527
001ce546
HX
6528 /* Some drivers may have called napi_schedule
6529 * prior to exhausting their budget.
6530 */
6531 if (unlikely(!list_empty(&n->poll_list))) {
6532 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6533 n->dev ? n->dev->name : "backlog");
6534 goto out_unlock;
6535 }
6536
726ce70e
HX
6537 list_add_tail(&n->poll_list, repoll);
6538
6539out_unlock:
6540 netpoll_poll_unlock(have);
6541
6542 return work;
6543}
6544
0766f788 6545static __latent_entropy void net_rx_action(struct softirq_action *h)
1da177e4 6546{
903ceff7 6547 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7acf8a1e
MW
6548 unsigned long time_limit = jiffies +
6549 usecs_to_jiffies(netdev_budget_usecs);
51b0bded 6550 int budget = netdev_budget;
d75b1ade
ED
6551 LIST_HEAD(list);
6552 LIST_HEAD(repoll);
53fb95d3 6553
1da177e4 6554 local_irq_disable();
d75b1ade
ED
6555 list_splice_init(&sd->poll_list, &list);
6556 local_irq_enable();
1da177e4 6557
ceb8d5bf 6558 for (;;) {
bea3348e 6559 struct napi_struct *n;
1da177e4 6560
ceb8d5bf
HX
6561 if (list_empty(&list)) {
6562 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
f52dffe0 6563 goto out;
ceb8d5bf
HX
6564 break;
6565 }
6566
6bd373eb
HX
6567 n = list_first_entry(&list, struct napi_struct, poll_list);
6568 budget -= napi_poll(n, &repoll);
6569
d75b1ade 6570 /* If softirq window is exhausted then punt.
24f8b238
SH
6571 * Allow this to run for 2 jiffies since which will allow
6572 * an average latency of 1.5/HZ.
bea3348e 6573 */
ceb8d5bf
HX
6574 if (unlikely(budget <= 0 ||
6575 time_after_eq(jiffies, time_limit))) {
6576 sd->time_squeeze++;
6577 break;
6578 }
1da177e4 6579 }
d75b1ade 6580
d75b1ade
ED
6581 local_irq_disable();
6582
6583 list_splice_tail_init(&sd->poll_list, &list);
6584 list_splice_tail(&repoll, &list);
6585 list_splice(&list, &sd->poll_list);
6586 if (!list_empty(&sd->poll_list))
6587 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6588
e326bed2 6589 net_rps_action_and_irq_enable(sd);
f52dffe0
ED
6590out:
6591 __kfree_skb_flush();
1da177e4
LT
6592}
6593
aa9d8560 6594struct netdev_adjacent {
9ff162a8 6595 struct net_device *dev;
5d261913
VF
6596
6597 /* upper master flag, there can only be one master device per list */
9ff162a8 6598 bool master;
5d261913 6599
5d261913
VF
6600 /* counter for the number of times this device was added to us */
6601 u16 ref_nr;
6602
402dae96
VF
6603 /* private field for the users */
6604 void *private;
6605
9ff162a8
JP
6606 struct list_head list;
6607 struct rcu_head rcu;
9ff162a8
JP
6608};
6609
6ea29da1 6610static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 6611 struct list_head *adj_list)
9ff162a8 6612{
5d261913 6613 struct netdev_adjacent *adj;
5d261913 6614
2f268f12 6615 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
6616 if (adj->dev == adj_dev)
6617 return adj;
9ff162a8
JP
6618 }
6619 return NULL;
6620}
6621
f1170fd4
DA
6622static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6623{
6624 struct net_device *dev = data;
6625
6626 return upper_dev == dev;
6627}
6628
9ff162a8
JP
6629/**
6630 * netdev_has_upper_dev - Check if device is linked to an upper device
6631 * @dev: device
6632 * @upper_dev: upper device to check
6633 *
6634 * Find out if a device is linked to specified upper device and return true
6635 * in case it is. Note that this checks only immediate upper device,
6636 * not through a complete stack of devices. The caller must hold the RTNL lock.
6637 */
6638bool netdev_has_upper_dev(struct net_device *dev,
6639 struct net_device *upper_dev)
6640{
6641 ASSERT_RTNL();
6642
f1170fd4
DA
6643 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6644 upper_dev);
9ff162a8
JP
6645}
6646EXPORT_SYMBOL(netdev_has_upper_dev);
6647
1a3f060c
DA
6648/**
6649 * netdev_has_upper_dev_all - Check if device is linked to an upper device
6650 * @dev: device
6651 * @upper_dev: upper device to check
6652 *
6653 * Find out if a device is linked to specified upper device and return true
6654 * in case it is. Note that this checks the entire upper device chain.
6655 * The caller must hold rcu lock.
6656 */
6657
1a3f060c
DA
6658bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6659 struct net_device *upper_dev)
6660{
6661 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6662 upper_dev);
6663}
6664EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6665
9ff162a8
JP
6666/**
6667 * netdev_has_any_upper_dev - Check if device is linked to some device
6668 * @dev: device
6669 *
6670 * Find out if a device is linked to an upper device and return true in case
6671 * it is. The caller must hold the RTNL lock.
6672 */
25cc72a3 6673bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
6674{
6675 ASSERT_RTNL();
6676
f1170fd4 6677 return !list_empty(&dev->adj_list.upper);
9ff162a8 6678}
25cc72a3 6679EXPORT_SYMBOL(netdev_has_any_upper_dev);
9ff162a8
JP
6680
6681/**
6682 * netdev_master_upper_dev_get - Get master upper device
6683 * @dev: device
6684 *
6685 * Find a master upper device and return pointer to it or NULL in case
6686 * it's not there. The caller must hold the RTNL lock.
6687 */
6688struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6689{
aa9d8560 6690 struct netdev_adjacent *upper;
9ff162a8
JP
6691
6692 ASSERT_RTNL();
6693
2f268f12 6694 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
6695 return NULL;
6696
2f268f12 6697 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 6698 struct netdev_adjacent, list);
9ff162a8
JP
6699 if (likely(upper->master))
6700 return upper->dev;
6701 return NULL;
6702}
6703EXPORT_SYMBOL(netdev_master_upper_dev_get);
6704
0f524a80
DA
6705/**
6706 * netdev_has_any_lower_dev - Check if device is linked to some device
6707 * @dev: device
6708 *
6709 * Find out if a device is linked to a lower device and return true in case
6710 * it is. The caller must hold the RTNL lock.
6711 */
6712static bool netdev_has_any_lower_dev(struct net_device *dev)
6713{
6714 ASSERT_RTNL();
6715
6716 return !list_empty(&dev->adj_list.lower);
6717}
6718
b6ccba4c
VF
6719void *netdev_adjacent_get_private(struct list_head *adj_list)
6720{
6721 struct netdev_adjacent *adj;
6722
6723 adj = list_entry(adj_list, struct netdev_adjacent, list);
6724
6725 return adj->private;
6726}
6727EXPORT_SYMBOL(netdev_adjacent_get_private);
6728
44a40855
VY
6729/**
6730 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6731 * @dev: device
6732 * @iter: list_head ** of the current position
6733 *
6734 * Gets the next device from the dev's upper list, starting from iter
6735 * position. The caller must hold RCU read lock.
6736 */
6737struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6738 struct list_head **iter)
6739{
6740 struct netdev_adjacent *upper;
6741
6742 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6743
6744 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6745
6746 if (&upper->list == &dev->adj_list.upper)
6747 return NULL;
6748
6749 *iter = &upper->list;
6750
6751 return upper->dev;
6752}
6753EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6754
1a3f060c
DA
6755static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6756 struct list_head **iter)
6757{
6758 struct netdev_adjacent *upper;
6759
6760 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6761
6762 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6763
6764 if (&upper->list == &dev->adj_list.upper)
6765 return NULL;
6766
6767 *iter = &upper->list;
6768
6769 return upper->dev;
6770}
6771
6772int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6773 int (*fn)(struct net_device *dev,
6774 void *data),
6775 void *data)
6776{
6777 struct net_device *udev;
6778 struct list_head *iter;
6779 int ret;
6780
6781 for (iter = &dev->adj_list.upper,
6782 udev = netdev_next_upper_dev_rcu(dev, &iter);
6783 udev;
6784 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6785 /* first is the upper device itself */
6786 ret = fn(udev, data);
6787 if (ret)
6788 return ret;
6789
6790 /* then look at all of its upper devices */
6791 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
6792 if (ret)
6793 return ret;
6794 }
6795
6796 return 0;
6797}
6798EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6799
31088a11
VF
6800/**
6801 * netdev_lower_get_next_private - Get the next ->private from the
6802 * lower neighbour list
6803 * @dev: device
6804 * @iter: list_head ** of the current position
6805 *
6806 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6807 * list, starting from iter position. The caller must hold either hold the
6808 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 6809 * list will remain unchanged.
31088a11
VF
6810 */
6811void *netdev_lower_get_next_private(struct net_device *dev,
6812 struct list_head **iter)
6813{
6814 struct netdev_adjacent *lower;
6815
6816 lower = list_entry(*iter, struct netdev_adjacent, list);
6817
6818 if (&lower->list == &dev->adj_list.lower)
6819 return NULL;
6820
6859e7df 6821 *iter = lower->list.next;
31088a11
VF
6822
6823 return lower->private;
6824}
6825EXPORT_SYMBOL(netdev_lower_get_next_private);
6826
6827/**
6828 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6829 * lower neighbour list, RCU
6830 * variant
6831 * @dev: device
6832 * @iter: list_head ** of the current position
6833 *
6834 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6835 * list, starting from iter position. The caller must hold RCU read lock.
6836 */
6837void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6838 struct list_head **iter)
6839{
6840 struct netdev_adjacent *lower;
6841
6842 WARN_ON_ONCE(!rcu_read_lock_held());
6843
6844 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6845
6846 if (&lower->list == &dev->adj_list.lower)
6847 return NULL;
6848
6859e7df 6849 *iter = &lower->list;
31088a11
VF
6850
6851 return lower->private;
6852}
6853EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
6854
4085ebe8
VY
6855/**
6856 * netdev_lower_get_next - Get the next device from the lower neighbour
6857 * list
6858 * @dev: device
6859 * @iter: list_head ** of the current position
6860 *
6861 * Gets the next netdev_adjacent from the dev's lower neighbour
6862 * list, starting from iter position. The caller must hold RTNL lock or
6863 * its own locking that guarantees that the neighbour lower
b469139e 6864 * list will remain unchanged.
4085ebe8
VY
6865 */
6866void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
6867{
6868 struct netdev_adjacent *lower;
6869
cfdd28be 6870 lower = list_entry(*iter, struct netdev_adjacent, list);
4085ebe8
VY
6871
6872 if (&lower->list == &dev->adj_list.lower)
6873 return NULL;
6874
cfdd28be 6875 *iter = lower->list.next;
4085ebe8
VY
6876
6877 return lower->dev;
6878}
6879EXPORT_SYMBOL(netdev_lower_get_next);
6880
1a3f060c
DA
6881static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6882 struct list_head **iter)
6883{
6884 struct netdev_adjacent *lower;
6885
46b5ab1a 6886 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
1a3f060c
DA
6887
6888 if (&lower->list == &dev->adj_list.lower)
6889 return NULL;
6890
46b5ab1a 6891 *iter = &lower->list;
1a3f060c
DA
6892
6893 return lower->dev;
6894}
6895
6896int netdev_walk_all_lower_dev(struct net_device *dev,
6897 int (*fn)(struct net_device *dev,
6898 void *data),
6899 void *data)
6900{
6901 struct net_device *ldev;
6902 struct list_head *iter;
6903 int ret;
6904
6905 for (iter = &dev->adj_list.lower,
6906 ldev = netdev_next_lower_dev(dev, &iter);
6907 ldev;
6908 ldev = netdev_next_lower_dev(dev, &iter)) {
6909 /* first is the lower device itself */
6910 ret = fn(ldev, data);
6911 if (ret)
6912 return ret;
6913
6914 /* then look at all of its lower devices */
6915 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6916 if (ret)
6917 return ret;
6918 }
6919
6920 return 0;
6921}
6922EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6923
1a3f060c
DA
6924static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6925 struct list_head **iter)
6926{
6927 struct netdev_adjacent *lower;
6928
6929 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6930 if (&lower->list == &dev->adj_list.lower)
6931 return NULL;
6932
6933 *iter = &lower->list;
6934
6935 return lower->dev;
6936}
6937
6938int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6939 int (*fn)(struct net_device *dev,
6940 void *data),
6941 void *data)
6942{
6943 struct net_device *ldev;
6944 struct list_head *iter;
6945 int ret;
6946
6947 for (iter = &dev->adj_list.lower,
6948 ldev = netdev_next_lower_dev_rcu(dev, &iter);
6949 ldev;
6950 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6951 /* first is the lower device itself */
6952 ret = fn(ldev, data);
6953 if (ret)
6954 return ret;
6955
6956 /* then look at all of its lower devices */
6957 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6958 if (ret)
6959 return ret;
6960 }
6961
6962 return 0;
6963}
6964EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6965
e001bfad 6966/**
6967 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6968 * lower neighbour list, RCU
6969 * variant
6970 * @dev: device
6971 *
6972 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6973 * list. The caller must hold RCU read lock.
6974 */
6975void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6976{
6977 struct netdev_adjacent *lower;
6978
6979 lower = list_first_or_null_rcu(&dev->adj_list.lower,
6980 struct netdev_adjacent, list);
6981 if (lower)
6982 return lower->private;
6983 return NULL;
6984}
6985EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6986
9ff162a8
JP
6987/**
6988 * netdev_master_upper_dev_get_rcu - Get master upper device
6989 * @dev: device
6990 *
6991 * Find a master upper device and return pointer to it or NULL in case
6992 * it's not there. The caller must hold the RCU read lock.
6993 */
6994struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6995{
aa9d8560 6996 struct netdev_adjacent *upper;
9ff162a8 6997
2f268f12 6998 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 6999 struct netdev_adjacent, list);
9ff162a8
JP
7000 if (upper && likely(upper->master))
7001 return upper->dev;
7002 return NULL;
7003}
7004EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7005
0a59f3a9 7006static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
7007 struct net_device *adj_dev,
7008 struct list_head *dev_list)
7009{
7010 char linkname[IFNAMSIZ+7];
f4563a75 7011
3ee32707
VF
7012 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7013 "upper_%s" : "lower_%s", adj_dev->name);
7014 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7015 linkname);
7016}
0a59f3a9 7017static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
7018 char *name,
7019 struct list_head *dev_list)
7020{
7021 char linkname[IFNAMSIZ+7];
f4563a75 7022
3ee32707
VF
7023 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7024 "upper_%s" : "lower_%s", name);
7025 sysfs_remove_link(&(dev->dev.kobj), linkname);
7026}
7027
7ce64c79
AF
7028static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7029 struct net_device *adj_dev,
7030 struct list_head *dev_list)
7031{
7032 return (dev_list == &dev->adj_list.upper ||
7033 dev_list == &dev->adj_list.lower) &&
7034 net_eq(dev_net(dev), dev_net(adj_dev));
7035}
3ee32707 7036
5d261913
VF
7037static int __netdev_adjacent_dev_insert(struct net_device *dev,
7038 struct net_device *adj_dev,
7863c054 7039 struct list_head *dev_list,
402dae96 7040 void *private, bool master)
5d261913
VF
7041{
7042 struct netdev_adjacent *adj;
842d67a7 7043 int ret;
5d261913 7044
6ea29da1 7045 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
7046
7047 if (adj) {
790510d9 7048 adj->ref_nr += 1;
67b62f98
DA
7049 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7050 dev->name, adj_dev->name, adj->ref_nr);
7051
5d261913
VF
7052 return 0;
7053 }
7054
7055 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7056 if (!adj)
7057 return -ENOMEM;
7058
7059 adj->dev = adj_dev;
7060 adj->master = master;
790510d9 7061 adj->ref_nr = 1;
402dae96 7062 adj->private = private;
5d261913 7063 dev_hold(adj_dev);
2f268f12 7064
67b62f98
DA
7065 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7066 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
5d261913 7067
7ce64c79 7068 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 7069 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
7070 if (ret)
7071 goto free_adj;
7072 }
7073
7863c054 7074 /* Ensure that master link is always the first item in list. */
842d67a7
VF
7075 if (master) {
7076 ret = sysfs_create_link(&(dev->dev.kobj),
7077 &(adj_dev->dev.kobj), "master");
7078 if (ret)
5831d66e 7079 goto remove_symlinks;
842d67a7 7080
7863c054 7081 list_add_rcu(&adj->list, dev_list);
842d67a7 7082 } else {
7863c054 7083 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 7084 }
5d261913
VF
7085
7086 return 0;
842d67a7 7087
5831d66e 7088remove_symlinks:
7ce64c79 7089 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 7090 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
7091free_adj:
7092 kfree(adj);
974daef7 7093 dev_put(adj_dev);
842d67a7
VF
7094
7095 return ret;
5d261913
VF
7096}
7097
1d143d9f 7098static void __netdev_adjacent_dev_remove(struct net_device *dev,
7099 struct net_device *adj_dev,
93409033 7100 u16 ref_nr,
1d143d9f 7101 struct list_head *dev_list)
5d261913
VF
7102{
7103 struct netdev_adjacent *adj;
7104
67b62f98
DA
7105 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7106 dev->name, adj_dev->name, ref_nr);
7107
6ea29da1 7108 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 7109
2f268f12 7110 if (!adj) {
67b62f98 7111 pr_err("Adjacency does not exist for device %s from %s\n",
2f268f12 7112 dev->name, adj_dev->name);
67b62f98
DA
7113 WARN_ON(1);
7114 return;
2f268f12 7115 }
5d261913 7116
93409033 7117 if (adj->ref_nr > ref_nr) {
67b62f98
DA
7118 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7119 dev->name, adj_dev->name, ref_nr,
7120 adj->ref_nr - ref_nr);
93409033 7121 adj->ref_nr -= ref_nr;
5d261913
VF
7122 return;
7123 }
7124
842d67a7
VF
7125 if (adj->master)
7126 sysfs_remove_link(&(dev->dev.kobj), "master");
7127
7ce64c79 7128 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 7129 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 7130
5d261913 7131 list_del_rcu(&adj->list);
67b62f98 7132 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
2f268f12 7133 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
7134 dev_put(adj_dev);
7135 kfree_rcu(adj, rcu);
7136}
7137
1d143d9f 7138static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7139 struct net_device *upper_dev,
7140 struct list_head *up_list,
7141 struct list_head *down_list,
7142 void *private, bool master)
5d261913
VF
7143{
7144 int ret;
7145
790510d9 7146 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
93409033 7147 private, master);
5d261913
VF
7148 if (ret)
7149 return ret;
7150
790510d9 7151 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
93409033 7152 private, false);
5d261913 7153 if (ret) {
790510d9 7154 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
5d261913
VF
7155 return ret;
7156 }
7157
7158 return 0;
7159}
7160
1d143d9f 7161static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7162 struct net_device *upper_dev,
93409033 7163 u16 ref_nr,
1d143d9f 7164 struct list_head *up_list,
7165 struct list_head *down_list)
5d261913 7166{
93409033
AC
7167 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7168 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
5d261913
VF
7169}
7170
1d143d9f 7171static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7172 struct net_device *upper_dev,
7173 void *private, bool master)
2f268f12 7174{
f1170fd4
DA
7175 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7176 &dev->adj_list.upper,
7177 &upper_dev->adj_list.lower,
7178 private, master);
5d261913
VF
7179}
7180
1d143d9f 7181static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7182 struct net_device *upper_dev)
2f268f12 7183{
93409033 7184 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
2f268f12
VF
7185 &dev->adj_list.upper,
7186 &upper_dev->adj_list.lower);
7187}
5d261913 7188
9ff162a8 7189static int __netdev_upper_dev_link(struct net_device *dev,
402dae96 7190 struct net_device *upper_dev, bool master,
42ab19ee
DA
7191 void *upper_priv, void *upper_info,
7192 struct netlink_ext_ack *extack)
9ff162a8 7193{
51d0c047
DA
7194 struct netdev_notifier_changeupper_info changeupper_info = {
7195 .info = {
7196 .dev = dev,
42ab19ee 7197 .extack = extack,
51d0c047
DA
7198 },
7199 .upper_dev = upper_dev,
7200 .master = master,
7201 .linking = true,
7202 .upper_info = upper_info,
7203 };
50d629e7 7204 struct net_device *master_dev;
5d261913 7205 int ret = 0;
9ff162a8
JP
7206
7207 ASSERT_RTNL();
7208
7209 if (dev == upper_dev)
7210 return -EBUSY;
7211
7212 /* To prevent loops, check if dev is not upper device to upper_dev. */
f1170fd4 7213 if (netdev_has_upper_dev(upper_dev, dev))
9ff162a8
JP
7214 return -EBUSY;
7215
50d629e7
MM
7216 if (!master) {
7217 if (netdev_has_upper_dev(dev, upper_dev))
7218 return -EEXIST;
7219 } else {
7220 master_dev = netdev_master_upper_dev_get(dev);
7221 if (master_dev)
7222 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7223 }
9ff162a8 7224
51d0c047 7225 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
7226 &changeupper_info.info);
7227 ret = notifier_to_errno(ret);
7228 if (ret)
7229 return ret;
7230
6dffb044 7231 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
402dae96 7232 master);
5d261913
VF
7233 if (ret)
7234 return ret;
9ff162a8 7235
51d0c047 7236 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
b03804e7
IS
7237 &changeupper_info.info);
7238 ret = notifier_to_errno(ret);
7239 if (ret)
f1170fd4 7240 goto rollback;
b03804e7 7241
9ff162a8 7242 return 0;
5d261913 7243
f1170fd4 7244rollback:
2f268f12 7245 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
7246
7247 return ret;
9ff162a8
JP
7248}
7249
7250/**
7251 * netdev_upper_dev_link - Add a link to the upper device
7252 * @dev: device
7253 * @upper_dev: new upper device
7a006d59 7254 * @extack: netlink extended ack
9ff162a8
JP
7255 *
7256 * Adds a link to device which is upper to this one. The caller must hold
7257 * the RTNL lock. On a failure a negative errno code is returned.
7258 * On success the reference counts are adjusted and the function
7259 * returns zero.
7260 */
7261int netdev_upper_dev_link(struct net_device *dev,
42ab19ee
DA
7262 struct net_device *upper_dev,
7263 struct netlink_ext_ack *extack)
9ff162a8 7264{
42ab19ee
DA
7265 return __netdev_upper_dev_link(dev, upper_dev, false,
7266 NULL, NULL, extack);
9ff162a8
JP
7267}
7268EXPORT_SYMBOL(netdev_upper_dev_link);
7269
7270/**
7271 * netdev_master_upper_dev_link - Add a master link to the upper device
7272 * @dev: device
7273 * @upper_dev: new upper device
6dffb044 7274 * @upper_priv: upper device private
29bf24af 7275 * @upper_info: upper info to be passed down via notifier
7a006d59 7276 * @extack: netlink extended ack
9ff162a8
JP
7277 *
7278 * Adds a link to device which is upper to this one. In this case, only
7279 * one master upper device can be linked, although other non-master devices
7280 * might be linked as well. The caller must hold the RTNL lock.
7281 * On a failure a negative errno code is returned. On success the reference
7282 * counts are adjusted and the function returns zero.
7283 */
7284int netdev_master_upper_dev_link(struct net_device *dev,
6dffb044 7285 struct net_device *upper_dev,
42ab19ee
DA
7286 void *upper_priv, void *upper_info,
7287 struct netlink_ext_ack *extack)
9ff162a8 7288{
29bf24af 7289 return __netdev_upper_dev_link(dev, upper_dev, true,
42ab19ee 7290 upper_priv, upper_info, extack);
9ff162a8
JP
7291}
7292EXPORT_SYMBOL(netdev_master_upper_dev_link);
7293
7294/**
7295 * netdev_upper_dev_unlink - Removes a link to upper device
7296 * @dev: device
7297 * @upper_dev: new upper device
7298 *
7299 * Removes a link to device which is upper to this one. The caller must hold
7300 * the RTNL lock.
7301 */
7302void netdev_upper_dev_unlink(struct net_device *dev,
7303 struct net_device *upper_dev)
7304{
51d0c047
DA
7305 struct netdev_notifier_changeupper_info changeupper_info = {
7306 .info = {
7307 .dev = dev,
7308 },
7309 .upper_dev = upper_dev,
7310 .linking = false,
7311 };
f4563a75 7312
9ff162a8
JP
7313 ASSERT_RTNL();
7314
0e4ead9d 7315 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
0e4ead9d 7316
51d0c047 7317 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
7318 &changeupper_info.info);
7319
2f268f12 7320 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913 7321
51d0c047 7322 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
0e4ead9d 7323 &changeupper_info.info);
9ff162a8
JP
7324}
7325EXPORT_SYMBOL(netdev_upper_dev_unlink);
7326
61bd3857
MS
7327/**
7328 * netdev_bonding_info_change - Dispatch event about slave change
7329 * @dev: device
4a26e453 7330 * @bonding_info: info to dispatch
61bd3857
MS
7331 *
7332 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7333 * The caller must hold the RTNL lock.
7334 */
7335void netdev_bonding_info_change(struct net_device *dev,
7336 struct netdev_bonding_info *bonding_info)
7337{
51d0c047
DA
7338 struct netdev_notifier_bonding_info info = {
7339 .info.dev = dev,
7340 };
61bd3857
MS
7341
7342 memcpy(&info.bonding_info, bonding_info,
7343 sizeof(struct netdev_bonding_info));
51d0c047 7344 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
61bd3857
MS
7345 &info.info);
7346}
7347EXPORT_SYMBOL(netdev_bonding_info_change);
7348
2ce1ee17 7349static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
7350{
7351 struct netdev_adjacent *iter;
7352
7353 struct net *net = dev_net(dev);
7354
7355 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 7356 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
7357 continue;
7358 netdev_adjacent_sysfs_add(iter->dev, dev,
7359 &iter->dev->adj_list.lower);
7360 netdev_adjacent_sysfs_add(dev, iter->dev,
7361 &dev->adj_list.upper);
7362 }
7363
7364 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 7365 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
7366 continue;
7367 netdev_adjacent_sysfs_add(iter->dev, dev,
7368 &iter->dev->adj_list.upper);
7369 netdev_adjacent_sysfs_add(dev, iter->dev,
7370 &dev->adj_list.lower);
7371 }
7372}
7373
2ce1ee17 7374static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
7375{
7376 struct netdev_adjacent *iter;
7377
7378 struct net *net = dev_net(dev);
7379
7380 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 7381 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
7382 continue;
7383 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7384 &iter->dev->adj_list.lower);
7385 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7386 &dev->adj_list.upper);
7387 }
7388
7389 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 7390 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
7391 continue;
7392 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7393 &iter->dev->adj_list.upper);
7394 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7395 &dev->adj_list.lower);
7396 }
7397}
7398
5bb025fa 7399void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 7400{
5bb025fa 7401 struct netdev_adjacent *iter;
402dae96 7402
4c75431a
AF
7403 struct net *net = dev_net(dev);
7404
5bb025fa 7405 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 7406 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 7407 continue;
5bb025fa
VF
7408 netdev_adjacent_sysfs_del(iter->dev, oldname,
7409 &iter->dev->adj_list.lower);
7410 netdev_adjacent_sysfs_add(iter->dev, dev,
7411 &iter->dev->adj_list.lower);
7412 }
402dae96 7413
5bb025fa 7414 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 7415 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 7416 continue;
5bb025fa
VF
7417 netdev_adjacent_sysfs_del(iter->dev, oldname,
7418 &iter->dev->adj_list.upper);
7419 netdev_adjacent_sysfs_add(iter->dev, dev,
7420 &iter->dev->adj_list.upper);
7421 }
402dae96 7422}
402dae96
VF
7423
7424void *netdev_lower_dev_get_private(struct net_device *dev,
7425 struct net_device *lower_dev)
7426{
7427 struct netdev_adjacent *lower;
7428
7429 if (!lower_dev)
7430 return NULL;
6ea29da1 7431 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
7432 if (!lower)
7433 return NULL;
7434
7435 return lower->private;
7436}
7437EXPORT_SYMBOL(netdev_lower_dev_get_private);
7438
4085ebe8 7439
952fcfd0 7440int dev_get_nest_level(struct net_device *dev)
4085ebe8
VY
7441{
7442 struct net_device *lower = NULL;
7443 struct list_head *iter;
7444 int max_nest = -1;
7445 int nest;
7446
7447 ASSERT_RTNL();
7448
7449 netdev_for_each_lower_dev(dev, lower, iter) {
952fcfd0 7450 nest = dev_get_nest_level(lower);
4085ebe8
VY
7451 if (max_nest < nest)
7452 max_nest = nest;
7453 }
7454
952fcfd0 7455 return max_nest + 1;
4085ebe8
VY
7456}
7457EXPORT_SYMBOL(dev_get_nest_level);
7458
04d48266
JP
7459/**
7460 * netdev_lower_change - Dispatch event about lower device state change
7461 * @lower_dev: device
7462 * @lower_state_info: state to dispatch
7463 *
7464 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7465 * The caller must hold the RTNL lock.
7466 */
7467void netdev_lower_state_changed(struct net_device *lower_dev,
7468 void *lower_state_info)
7469{
51d0c047
DA
7470 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
7471 .info.dev = lower_dev,
7472 };
04d48266
JP
7473
7474 ASSERT_RTNL();
7475 changelowerstate_info.lower_state_info = lower_state_info;
51d0c047 7476 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
04d48266
JP
7477 &changelowerstate_info.info);
7478}
7479EXPORT_SYMBOL(netdev_lower_state_changed);
7480
b6c40d68
PM
7481static void dev_change_rx_flags(struct net_device *dev, int flags)
7482{
d314774c
SH
7483 const struct net_device_ops *ops = dev->netdev_ops;
7484
d2615bf4 7485 if (ops->ndo_change_rx_flags)
d314774c 7486 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
7487}
7488
991fb3f7 7489static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 7490{
b536db93 7491 unsigned int old_flags = dev->flags;
d04a48b0
EB
7492 kuid_t uid;
7493 kgid_t gid;
1da177e4 7494
24023451
PM
7495 ASSERT_RTNL();
7496
dad9b335
WC
7497 dev->flags |= IFF_PROMISC;
7498 dev->promiscuity += inc;
7499 if (dev->promiscuity == 0) {
7500 /*
7501 * Avoid overflow.
7502 * If inc causes overflow, untouch promisc and return error.
7503 */
7504 if (inc < 0)
7505 dev->flags &= ~IFF_PROMISC;
7506 else {
7507 dev->promiscuity -= inc;
7b6cd1ce
JP
7508 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7509 dev->name);
dad9b335
WC
7510 return -EOVERFLOW;
7511 }
7512 }
52609c0b 7513 if (dev->flags != old_flags) {
7b6cd1ce
JP
7514 pr_info("device %s %s promiscuous mode\n",
7515 dev->name,
7516 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
7517 if (audit_enabled) {
7518 current_uid_gid(&uid, &gid);
cdfb6b34
RGB
7519 audit_log(audit_context(), GFP_ATOMIC,
7520 AUDIT_ANOM_PROMISCUOUS,
7521 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7522 dev->name, (dev->flags & IFF_PROMISC),
7523 (old_flags & IFF_PROMISC),
7524 from_kuid(&init_user_ns, audit_get_loginuid(current)),
7525 from_kuid(&init_user_ns, uid),
7526 from_kgid(&init_user_ns, gid),
7527 audit_get_sessionid(current));
8192b0c4 7528 }
24023451 7529
b6c40d68 7530 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 7531 }
991fb3f7
ND
7532 if (notify)
7533 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 7534 return 0;
1da177e4
LT
7535}
7536
4417da66
PM
7537/**
7538 * dev_set_promiscuity - update promiscuity count on a device
7539 * @dev: device
7540 * @inc: modifier
7541 *
7542 * Add or remove promiscuity from a device. While the count in the device
7543 * remains above zero the interface remains promiscuous. Once it hits zero
7544 * the device reverts back to normal filtering operation. A negative inc
7545 * value is used to drop promiscuity on the device.
dad9b335 7546 * Return 0 if successful or a negative errno code on error.
4417da66 7547 */
dad9b335 7548int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 7549{
b536db93 7550 unsigned int old_flags = dev->flags;
dad9b335 7551 int err;
4417da66 7552
991fb3f7 7553 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 7554 if (err < 0)
dad9b335 7555 return err;
4417da66
PM
7556 if (dev->flags != old_flags)
7557 dev_set_rx_mode(dev);
dad9b335 7558 return err;
4417da66 7559}
d1b19dff 7560EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 7561
991fb3f7 7562static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 7563{
991fb3f7 7564 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 7565
24023451
PM
7566 ASSERT_RTNL();
7567
1da177e4 7568 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
7569 dev->allmulti += inc;
7570 if (dev->allmulti == 0) {
7571 /*
7572 * Avoid overflow.
7573 * If inc causes overflow, untouch allmulti and return error.
7574 */
7575 if (inc < 0)
7576 dev->flags &= ~IFF_ALLMULTI;
7577 else {
7578 dev->allmulti -= inc;
7b6cd1ce
JP
7579 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7580 dev->name);
dad9b335
WC
7581 return -EOVERFLOW;
7582 }
7583 }
24023451 7584 if (dev->flags ^ old_flags) {
b6c40d68 7585 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 7586 dev_set_rx_mode(dev);
991fb3f7
ND
7587 if (notify)
7588 __dev_notify_flags(dev, old_flags,
7589 dev->gflags ^ old_gflags);
24023451 7590 }
dad9b335 7591 return 0;
4417da66 7592}
991fb3f7
ND
7593
7594/**
7595 * dev_set_allmulti - update allmulti count on a device
7596 * @dev: device
7597 * @inc: modifier
7598 *
7599 * Add or remove reception of all multicast frames to a device. While the
7600 * count in the device remains above zero the interface remains listening
7601 * to all interfaces. Once it hits zero the device reverts back to normal
7602 * filtering operation. A negative @inc value is used to drop the counter
7603 * when releasing a resource needing all multicasts.
7604 * Return 0 if successful or a negative errno code on error.
7605 */
7606
7607int dev_set_allmulti(struct net_device *dev, int inc)
7608{
7609 return __dev_set_allmulti(dev, inc, true);
7610}
d1b19dff 7611EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
7612
7613/*
7614 * Upload unicast and multicast address lists to device and
7615 * configure RX filtering. When the device doesn't support unicast
53ccaae1 7616 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
7617 * are present.
7618 */
7619void __dev_set_rx_mode(struct net_device *dev)
7620{
d314774c
SH
7621 const struct net_device_ops *ops = dev->netdev_ops;
7622
4417da66
PM
7623 /* dev_open will call this function so the list will stay sane. */
7624 if (!(dev->flags&IFF_UP))
7625 return;
7626
7627 if (!netif_device_present(dev))
40b77c94 7628 return;
4417da66 7629
01789349 7630 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
7631 /* Unicast addresses changes may only happen under the rtnl,
7632 * therefore calling __dev_set_promiscuity here is safe.
7633 */
32e7bfc4 7634 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 7635 __dev_set_promiscuity(dev, 1, false);
2d348d1f 7636 dev->uc_promisc = true;
32e7bfc4 7637 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 7638 __dev_set_promiscuity(dev, -1, false);
2d348d1f 7639 dev->uc_promisc = false;
4417da66 7640 }
4417da66 7641 }
01789349
JP
7642
7643 if (ops->ndo_set_rx_mode)
7644 ops->ndo_set_rx_mode(dev);
4417da66
PM
7645}
7646
7647void dev_set_rx_mode(struct net_device *dev)
7648{
b9e40857 7649 netif_addr_lock_bh(dev);
4417da66 7650 __dev_set_rx_mode(dev);
b9e40857 7651 netif_addr_unlock_bh(dev);
1da177e4
LT
7652}
7653
f0db275a
SH
7654/**
7655 * dev_get_flags - get flags reported to userspace
7656 * @dev: device
7657 *
7658 * Get the combination of flag bits exported through APIs to userspace.
7659 */
95c96174 7660unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 7661{
95c96174 7662 unsigned int flags;
1da177e4
LT
7663
7664 flags = (dev->flags & ~(IFF_PROMISC |
7665 IFF_ALLMULTI |
b00055aa
SR
7666 IFF_RUNNING |
7667 IFF_LOWER_UP |
7668 IFF_DORMANT)) |
1da177e4
LT
7669 (dev->gflags & (IFF_PROMISC |
7670 IFF_ALLMULTI));
7671
b00055aa
SR
7672 if (netif_running(dev)) {
7673 if (netif_oper_up(dev))
7674 flags |= IFF_RUNNING;
7675 if (netif_carrier_ok(dev))
7676 flags |= IFF_LOWER_UP;
7677 if (netif_dormant(dev))
7678 flags |= IFF_DORMANT;
7679 }
1da177e4
LT
7680
7681 return flags;
7682}
d1b19dff 7683EXPORT_SYMBOL(dev_get_flags);
1da177e4 7684
6d040321
PM
7685int __dev_change_flags(struct net_device *dev, unsigned int flags,
7686 struct netlink_ext_ack *extack)
1da177e4 7687{
b536db93 7688 unsigned int old_flags = dev->flags;
bd380811 7689 int ret;
1da177e4 7690
24023451
PM
7691 ASSERT_RTNL();
7692
1da177e4
LT
7693 /*
7694 * Set the flags on our device.
7695 */
7696
7697 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
7698 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
7699 IFF_AUTOMEDIA)) |
7700 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
7701 IFF_ALLMULTI));
7702
7703 /*
7704 * Load in the correct multicast list now the flags have changed.
7705 */
7706
b6c40d68
PM
7707 if ((old_flags ^ flags) & IFF_MULTICAST)
7708 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 7709
4417da66 7710 dev_set_rx_mode(dev);
1da177e4
LT
7711
7712 /*
7713 * Have we downed the interface. We handle IFF_UP ourselves
7714 * according to user attempts to set it, rather than blindly
7715 * setting it.
7716 */
7717
7718 ret = 0;
7051b88a 7719 if ((old_flags ^ flags) & IFF_UP) {
7720 if (old_flags & IFF_UP)
7721 __dev_close(dev);
7722 else
40c900aa 7723 ret = __dev_open(dev, extack);
7051b88a 7724 }
1da177e4 7725
1da177e4 7726 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 7727 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 7728 unsigned int old_flags = dev->flags;
d1b19dff 7729
1da177e4 7730 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
7731
7732 if (__dev_set_promiscuity(dev, inc, false) >= 0)
7733 if (dev->flags != old_flags)
7734 dev_set_rx_mode(dev);
1da177e4
LT
7735 }
7736
7737 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
eb13da1a 7738 * is important. Some (broken) drivers set IFF_PROMISC, when
7739 * IFF_ALLMULTI is requested not asking us and not reporting.
1da177e4
LT
7740 */
7741 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
7742 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
7743
1da177e4 7744 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 7745 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
7746 }
7747
bd380811
PM
7748 return ret;
7749}
7750
a528c219
ND
7751void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
7752 unsigned int gchanges)
bd380811
PM
7753{
7754 unsigned int changes = dev->flags ^ old_flags;
7755
a528c219 7756 if (gchanges)
7f294054 7757 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 7758
bd380811
PM
7759 if (changes & IFF_UP) {
7760 if (dev->flags & IFF_UP)
7761 call_netdevice_notifiers(NETDEV_UP, dev);
7762 else
7763 call_netdevice_notifiers(NETDEV_DOWN, dev);
7764 }
7765
7766 if (dev->flags & IFF_UP &&
be9efd36 7767 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
51d0c047
DA
7768 struct netdev_notifier_change_info change_info = {
7769 .info = {
7770 .dev = dev,
7771 },
7772 .flags_changed = changes,
7773 };
be9efd36 7774
51d0c047 7775 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
be9efd36 7776 }
bd380811
PM
7777}
7778
7779/**
7780 * dev_change_flags - change device settings
7781 * @dev: device
7782 * @flags: device state flags
567c5e13 7783 * @extack: netlink extended ack
bd380811
PM
7784 *
7785 * Change settings on device based state flags. The flags are
7786 * in the userspace exported format.
7787 */
567c5e13
PM
7788int dev_change_flags(struct net_device *dev, unsigned int flags,
7789 struct netlink_ext_ack *extack)
bd380811 7790{
b536db93 7791 int ret;
991fb3f7 7792 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811 7793
6d040321 7794 ret = __dev_change_flags(dev, flags, extack);
bd380811
PM
7795 if (ret < 0)
7796 return ret;
7797
991fb3f7 7798 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 7799 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
7800 return ret;
7801}
d1b19dff 7802EXPORT_SYMBOL(dev_change_flags);
1da177e4 7803
f51048c3 7804int __dev_set_mtu(struct net_device *dev, int new_mtu)
2315dc91
VF
7805{
7806 const struct net_device_ops *ops = dev->netdev_ops;
7807
7808 if (ops->ndo_change_mtu)
7809 return ops->ndo_change_mtu(dev, new_mtu);
7810
7811 dev->mtu = new_mtu;
7812 return 0;
7813}
f51048c3 7814EXPORT_SYMBOL(__dev_set_mtu);
2315dc91 7815
f0db275a 7816/**
7a4c53be 7817 * dev_set_mtu_ext - Change maximum transfer unit
f0db275a
SH
7818 * @dev: device
7819 * @new_mtu: new transfer unit
7a4c53be 7820 * @extack: netlink extended ack
f0db275a
SH
7821 *
7822 * Change the maximum transfer size of the network device.
7823 */
7a4c53be
SH
7824int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
7825 struct netlink_ext_ack *extack)
1da177e4 7826{
2315dc91 7827 int err, orig_mtu;
1da177e4
LT
7828
7829 if (new_mtu == dev->mtu)
7830 return 0;
7831
61e84623
JW
7832 /* MTU must be positive, and in range */
7833 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
7a4c53be 7834 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
1da177e4 7835 return -EINVAL;
61e84623
JW
7836 }
7837
7838 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
7a4c53be 7839 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
61e84623
JW
7840 return -EINVAL;
7841 }
1da177e4
LT
7842
7843 if (!netif_device_present(dev))
7844 return -ENODEV;
7845
1d486bfb
VF
7846 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
7847 err = notifier_to_errno(err);
7848 if (err)
7849 return err;
d314774c 7850
2315dc91
VF
7851 orig_mtu = dev->mtu;
7852 err = __dev_set_mtu(dev, new_mtu);
d314774c 7853
2315dc91 7854 if (!err) {
af7d6cce
SD
7855 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
7856 orig_mtu);
2315dc91
VF
7857 err = notifier_to_errno(err);
7858 if (err) {
7859 /* setting mtu back and notifying everyone again,
7860 * so that they have a chance to revert changes.
7861 */
7862 __dev_set_mtu(dev, orig_mtu);
af7d6cce
SD
7863 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
7864 new_mtu);
2315dc91
VF
7865 }
7866 }
1da177e4
LT
7867 return err;
7868}
7a4c53be
SH
7869
7870int dev_set_mtu(struct net_device *dev, int new_mtu)
7871{
7872 struct netlink_ext_ack extack;
7873 int err;
7874
a6bcfc89 7875 memset(&extack, 0, sizeof(extack));
7a4c53be 7876 err = dev_set_mtu_ext(dev, new_mtu, &extack);
a6bcfc89 7877 if (err && extack._msg)
7a4c53be
SH
7878 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
7879 return err;
7880}
d1b19dff 7881EXPORT_SYMBOL(dev_set_mtu);
1da177e4 7882
6a643ddb
CW
7883/**
7884 * dev_change_tx_queue_len - Change TX queue length of a netdevice
7885 * @dev: device
7886 * @new_len: new tx queue length
7887 */
7888int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7889{
7890 unsigned int orig_len = dev->tx_queue_len;
7891 int res;
7892
7893 if (new_len != (unsigned int)new_len)
7894 return -ERANGE;
7895
7896 if (new_len != orig_len) {
7897 dev->tx_queue_len = new_len;
7898 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7899 res = notifier_to_errno(res);
7effaf06
TT
7900 if (res)
7901 goto err_rollback;
7902 res = dev_qdisc_change_tx_queue_len(dev);
7903 if (res)
7904 goto err_rollback;
6a643ddb
CW
7905 }
7906
7907 return 0;
7effaf06
TT
7908
7909err_rollback:
7910 netdev_err(dev, "refused to change device tx_queue_len\n");
7911 dev->tx_queue_len = orig_len;
7912 return res;
6a643ddb
CW
7913}
7914
cbda10fa
VD
7915/**
7916 * dev_set_group - Change group this device belongs to
7917 * @dev: device
7918 * @new_group: group this device should belong to
7919 */
7920void dev_set_group(struct net_device *dev, int new_group)
7921{
7922 dev->group = new_group;
7923}
7924EXPORT_SYMBOL(dev_set_group);
7925
d59cdf94
PM
7926/**
7927 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
7928 * @dev: device
7929 * @addr: new address
7930 * @extack: netlink extended ack
7931 */
7932int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
7933 struct netlink_ext_ack *extack)
7934{
7935 struct netdev_notifier_pre_changeaddr_info info = {
7936 .info.dev = dev,
7937 .info.extack = extack,
7938 .dev_addr = addr,
7939 };
7940 int rc;
7941
7942 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
7943 return notifier_to_errno(rc);
7944}
7945EXPORT_SYMBOL(dev_pre_changeaddr_notify);
7946
f0db275a
SH
7947/**
7948 * dev_set_mac_address - Change Media Access Control Address
7949 * @dev: device
7950 * @sa: new address
3a37a963 7951 * @extack: netlink extended ack
f0db275a
SH
7952 *
7953 * Change the hardware (MAC) address of the device
7954 */
3a37a963
PM
7955int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
7956 struct netlink_ext_ack *extack)
1da177e4 7957{
d314774c 7958 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
7959 int err;
7960
d314774c 7961 if (!ops->ndo_set_mac_address)
1da177e4
LT
7962 return -EOPNOTSUPP;
7963 if (sa->sa_family != dev->type)
7964 return -EINVAL;
7965 if (!netif_device_present(dev))
7966 return -ENODEV;
d59cdf94
PM
7967 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
7968 if (err)
7969 return err;
d314774c 7970 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
7971 if (err)
7972 return err;
fbdeca2d 7973 dev->addr_assign_type = NET_ADDR_SET;
f6521516 7974 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 7975 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 7976 return 0;
1da177e4 7977}
d1b19dff 7978EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 7979
4bf84c35
JP
7980/**
7981 * dev_change_carrier - Change device carrier
7982 * @dev: device
691b3b7e 7983 * @new_carrier: new value
4bf84c35
JP
7984 *
7985 * Change device carrier
7986 */
7987int dev_change_carrier(struct net_device *dev, bool new_carrier)
7988{
7989 const struct net_device_ops *ops = dev->netdev_ops;
7990
7991 if (!ops->ndo_change_carrier)
7992 return -EOPNOTSUPP;
7993 if (!netif_device_present(dev))
7994 return -ENODEV;
7995 return ops->ndo_change_carrier(dev, new_carrier);
7996}
7997EXPORT_SYMBOL(dev_change_carrier);
7998
66b52b0d
JP
7999/**
8000 * dev_get_phys_port_id - Get device physical port ID
8001 * @dev: device
8002 * @ppid: port ID
8003 *
8004 * Get device physical port ID
8005 */
8006int dev_get_phys_port_id(struct net_device *dev,
02637fce 8007 struct netdev_phys_item_id *ppid)
66b52b0d
JP
8008{
8009 const struct net_device_ops *ops = dev->netdev_ops;
8010
8011 if (!ops->ndo_get_phys_port_id)
8012 return -EOPNOTSUPP;
8013 return ops->ndo_get_phys_port_id(dev, ppid);
8014}
8015EXPORT_SYMBOL(dev_get_phys_port_id);
8016
db24a904
DA
8017/**
8018 * dev_get_phys_port_name - Get device physical port name
8019 * @dev: device
8020 * @name: port name
ed49e650 8021 * @len: limit of bytes to copy to name
db24a904
DA
8022 *
8023 * Get device physical port name
8024 */
8025int dev_get_phys_port_name(struct net_device *dev,
8026 char *name, size_t len)
8027{
8028 const struct net_device_ops *ops = dev->netdev_ops;
af3836df 8029 int err;
db24a904 8030
af3836df
JP
8031 if (ops->ndo_get_phys_port_name) {
8032 err = ops->ndo_get_phys_port_name(dev, name, len);
8033 if (err != -EOPNOTSUPP)
8034 return err;
8035 }
8036 return devlink_compat_phys_port_name_get(dev, name, len);
db24a904
DA
8037}
8038EXPORT_SYMBOL(dev_get_phys_port_name);
8039
d6abc596
FF
8040/**
8041 * dev_get_port_parent_id - Get the device's port parent identifier
8042 * @dev: network device
8043 * @ppid: pointer to a storage for the port's parent identifier
8044 * @recurse: allow/disallow recursion to lower devices
8045 *
8046 * Get the devices's port parent identifier
8047 */
8048int dev_get_port_parent_id(struct net_device *dev,
8049 struct netdev_phys_item_id *ppid,
8050 bool recurse)
8051{
8052 const struct net_device_ops *ops = dev->netdev_ops;
8053 struct netdev_phys_item_id first = { };
8054 struct net_device *lower_dev;
8055 struct list_head *iter;
7e1146e8
JP
8056 int err;
8057
8058 if (ops->ndo_get_port_parent_id) {
8059 err = ops->ndo_get_port_parent_id(dev, ppid);
8060 if (err != -EOPNOTSUPP)
8061 return err;
8062 }
d6abc596 8063
7e1146e8
JP
8064 err = devlink_compat_switch_id_get(dev, ppid);
8065 if (!err || err != -EOPNOTSUPP)
8066 return err;
d6abc596
FF
8067
8068 if (!recurse)
7e1146e8 8069 return -EOPNOTSUPP;
d6abc596
FF
8070
8071 netdev_for_each_lower_dev(dev, lower_dev, iter) {
8072 err = dev_get_port_parent_id(lower_dev, ppid, recurse);
8073 if (err)
8074 break;
8075 if (!first.id_len)
8076 first = *ppid;
8077 else if (memcmp(&first, ppid, sizeof(*ppid)))
8078 return -ENODATA;
8079 }
8080
8081 return err;
8082}
8083EXPORT_SYMBOL(dev_get_port_parent_id);
8084
8085/**
8086 * netdev_port_same_parent_id - Indicate if two network devices have
8087 * the same port parent identifier
8088 * @a: first network device
8089 * @b: second network device
8090 */
8091bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
8092{
8093 struct netdev_phys_item_id a_id = { };
8094 struct netdev_phys_item_id b_id = { };
8095
8096 if (dev_get_port_parent_id(a, &a_id, true) ||
8097 dev_get_port_parent_id(b, &b_id, true))
8098 return false;
8099
8100 return netdev_phys_item_id_same(&a_id, &b_id);
8101}
8102EXPORT_SYMBOL(netdev_port_same_parent_id);
8103
d746d707
AK
8104/**
8105 * dev_change_proto_down - update protocol port state information
8106 * @dev: device
8107 * @proto_down: new value
8108 *
8109 * This info can be used by switch drivers to set the phys state of the
8110 * port.
8111 */
8112int dev_change_proto_down(struct net_device *dev, bool proto_down)
8113{
8114 const struct net_device_ops *ops = dev->netdev_ops;
8115
8116 if (!ops->ndo_change_proto_down)
8117 return -EOPNOTSUPP;
8118 if (!netif_device_present(dev))
8119 return -ENODEV;
8120 return ops->ndo_change_proto_down(dev, proto_down);
8121}
8122EXPORT_SYMBOL(dev_change_proto_down);
8123
b5899679
AR
8124/**
8125 * dev_change_proto_down_generic - generic implementation for
8126 * ndo_change_proto_down that sets carrier according to
8127 * proto_down.
8128 *
8129 * @dev: device
8130 * @proto_down: new value
8131 */
8132int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
8133{
8134 if (proto_down)
8135 netif_carrier_off(dev);
8136 else
8137 netif_carrier_on(dev);
8138 dev->proto_down = proto_down;
8139 return 0;
8140}
8141EXPORT_SYMBOL(dev_change_proto_down_generic);
8142
a25717d2
JK
8143u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
8144 enum bpf_netdev_command cmd)
d67b9cd2 8145{
a25717d2 8146 struct netdev_bpf xdp;
d67b9cd2 8147
a25717d2
JK
8148 if (!bpf_op)
8149 return 0;
118b4aa2 8150
a25717d2
JK
8151 memset(&xdp, 0, sizeof(xdp));
8152 xdp.command = cmd;
118b4aa2 8153
a25717d2
JK
8154 /* Query must always succeed. */
8155 WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
58038695 8156
6b867589 8157 return xdp.prog_id;
d67b9cd2
DB
8158}
8159
f4e63525 8160static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
32d60277 8161 struct netlink_ext_ack *extack, u32 flags,
d67b9cd2
DB
8162 struct bpf_prog *prog)
8163{
f4e63525 8164 struct netdev_bpf xdp;
d67b9cd2
DB
8165
8166 memset(&xdp, 0, sizeof(xdp));
ee5d032f
JK
8167 if (flags & XDP_FLAGS_HW_MODE)
8168 xdp.command = XDP_SETUP_PROG_HW;
8169 else
8170 xdp.command = XDP_SETUP_PROG;
d67b9cd2 8171 xdp.extack = extack;
32d60277 8172 xdp.flags = flags;
d67b9cd2
DB
8173 xdp.prog = prog;
8174
f4e63525 8175 return bpf_op(dev, &xdp);
d67b9cd2
DB
8176}
8177
bd0b2e7f
JK
8178static void dev_xdp_uninstall(struct net_device *dev)
8179{
8180 struct netdev_bpf xdp;
8181 bpf_op_t ndo_bpf;
8182
8183 /* Remove generic XDP */
8184 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
8185
8186 /* Remove from the driver */
8187 ndo_bpf = dev->netdev_ops->ndo_bpf;
8188 if (!ndo_bpf)
8189 return;
8190
a25717d2
JK
8191 memset(&xdp, 0, sizeof(xdp));
8192 xdp.command = XDP_QUERY_PROG;
8193 WARN_ON(ndo_bpf(dev, &xdp));
8194 if (xdp.prog_id)
8195 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8196 NULL));
bd0b2e7f 8197
a25717d2
JK
8198 /* Remove HW offload */
8199 memset(&xdp, 0, sizeof(xdp));
8200 xdp.command = XDP_QUERY_PROG_HW;
8201 if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
8202 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8203 NULL));
bd0b2e7f
JK
8204}
8205
a7862b45
BB
8206/**
8207 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
8208 * @dev: device
b5d60989 8209 * @extack: netlink extended ack
a7862b45 8210 * @fd: new program fd or negative value to clear
85de8576 8211 * @flags: xdp-related flags
a7862b45
BB
8212 *
8213 * Set or clear a bpf program for a device
8214 */
ddf9f970
JK
8215int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
8216 int fd, u32 flags)
a7862b45
BB
8217{
8218 const struct net_device_ops *ops = dev->netdev_ops;
a25717d2 8219 enum bpf_netdev_command query;
a7862b45 8220 struct bpf_prog *prog = NULL;
f4e63525 8221 bpf_op_t bpf_op, bpf_chk;
9ee963d6 8222 bool offload;
a7862b45
BB
8223 int err;
8224
85de8576
DB
8225 ASSERT_RTNL();
8226
9ee963d6
JK
8227 offload = flags & XDP_FLAGS_HW_MODE;
8228 query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
a25717d2 8229
f4e63525 8230 bpf_op = bpf_chk = ops->ndo_bpf;
01dde20c
MF
8231 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) {
8232 NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode");
0489df9a 8233 return -EOPNOTSUPP;
01dde20c 8234 }
f4e63525
JK
8235 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
8236 bpf_op = generic_xdp_install;
8237 if (bpf_op == bpf_chk)
8238 bpf_chk = generic_xdp_install;
b5cdae32 8239
a7862b45 8240 if (fd >= 0) {
c14a9f63
MM
8241 u32 prog_id;
8242
9ee963d6 8243 if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
01dde20c 8244 NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
d67b9cd2 8245 return -EEXIST;
01dde20c 8246 }
c14a9f63
MM
8247
8248 prog_id = __dev_xdp_query(dev, bpf_op, query);
8249 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) {
01dde20c 8250 NL_SET_ERR_MSG(extack, "XDP program already attached");
d67b9cd2 8251 return -EBUSY;
01dde20c 8252 }
85de8576 8253
288b3de5
JK
8254 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
8255 bpf_op == ops->ndo_bpf);
a7862b45
BB
8256 if (IS_ERR(prog))
8257 return PTR_ERR(prog);
441a3303 8258
9ee963d6 8259 if (!offload && bpf_prog_is_dev_bound(prog->aux)) {
441a3303
JK
8260 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
8261 bpf_prog_put(prog);
8262 return -EINVAL;
8263 }
c14a9f63
MM
8264
8265 if (prog->aux->id == prog_id) {
8266 bpf_prog_put(prog);
8267 return 0;
8268 }
8269 } else {
8270 if (!__dev_xdp_query(dev, bpf_op, query))
8271 return 0;
a7862b45
BB
8272 }
8273
f4e63525 8274 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
a7862b45
BB
8275 if (err < 0 && prog)
8276 bpf_prog_put(prog);
8277
8278 return err;
8279}
a7862b45 8280
1da177e4
LT
8281/**
8282 * dev_new_index - allocate an ifindex
c4ea43c5 8283 * @net: the applicable net namespace
1da177e4
LT
8284 *
8285 * Returns a suitable unique value for a new device interface
8286 * number. The caller must hold the rtnl semaphore or the
8287 * dev_base_lock to be sure it remains unique.
8288 */
881d966b 8289static int dev_new_index(struct net *net)
1da177e4 8290{
aa79e66e 8291 int ifindex = net->ifindex;
f4563a75 8292
1da177e4
LT
8293 for (;;) {
8294 if (++ifindex <= 0)
8295 ifindex = 1;
881d966b 8296 if (!__dev_get_by_index(net, ifindex))
aa79e66e 8297 return net->ifindex = ifindex;
1da177e4
LT
8298 }
8299}
8300
1da177e4 8301/* Delayed registration/unregisteration */
3b5b34fd 8302static LIST_HEAD(net_todo_list);
200b916f 8303DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 8304
6f05f629 8305static void net_set_todo(struct net_device *dev)
1da177e4 8306{
1da177e4 8307 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 8308 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
8309}
8310
9b5e383c 8311static void rollback_registered_many(struct list_head *head)
93ee31f1 8312{
e93737b0 8313 struct net_device *dev, *tmp;
5cde2829 8314 LIST_HEAD(close_head);
9b5e383c 8315
93ee31f1
DL
8316 BUG_ON(dev_boot_phase);
8317 ASSERT_RTNL();
8318
e93737b0 8319 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 8320 /* Some devices call without registering
e93737b0
KK
8321 * for initialization unwind. Remove those
8322 * devices and proceed with the remaining.
9b5e383c
ED
8323 */
8324 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
8325 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8326 dev->name, dev);
93ee31f1 8327
9b5e383c 8328 WARN_ON(1);
e93737b0
KK
8329 list_del(&dev->unreg_list);
8330 continue;
9b5e383c 8331 }
449f4544 8332 dev->dismantle = true;
9b5e383c 8333 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 8334 }
93ee31f1 8335
44345724 8336 /* If device is running, close it first. */
5cde2829
EB
8337 list_for_each_entry(dev, head, unreg_list)
8338 list_add_tail(&dev->close_list, &close_head);
99c4a26a 8339 dev_close_many(&close_head, true);
93ee31f1 8340
44345724 8341 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
8342 /* And unlink it from device chain. */
8343 unlist_netdevice(dev);
93ee31f1 8344
9b5e383c
ED
8345 dev->reg_state = NETREG_UNREGISTERING;
8346 }
41852497 8347 flush_all_backlogs();
93ee31f1
DL
8348
8349 synchronize_net();
8350
9b5e383c 8351 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
8352 struct sk_buff *skb = NULL;
8353
9b5e383c
ED
8354 /* Shutdown queueing discipline. */
8355 dev_shutdown(dev);
93ee31f1 8356
bd0b2e7f 8357 dev_xdp_uninstall(dev);
93ee31f1 8358
9b5e383c 8359 /* Notify protocols, that we are about to destroy
eb13da1a 8360 * this device. They should clean all the things.
8361 */
9b5e383c 8362 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 8363
395eea6c
MB
8364 if (!dev->rtnl_link_ops ||
8365 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
3d3ea5af 8366 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
38e01b30 8367 GFP_KERNEL, NULL, 0);
395eea6c 8368
9b5e383c
ED
8369 /*
8370 * Flush the unicast and multicast chains
8371 */
a748ee24 8372 dev_uc_flush(dev);
22bedad3 8373 dev_mc_flush(dev);
93ee31f1 8374
36fbf1e5 8375 netdev_name_node_alt_flush(dev);
ff927412
JP
8376 netdev_name_node_free(dev->name_node);
8377
9b5e383c
ED
8378 if (dev->netdev_ops->ndo_uninit)
8379 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 8380
395eea6c
MB
8381 if (skb)
8382 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 8383
9ff162a8
JP
8384 /* Notifier chain MUST detach us all upper devices. */
8385 WARN_ON(netdev_has_any_upper_dev(dev));
0f524a80 8386 WARN_ON(netdev_has_any_lower_dev(dev));
93ee31f1 8387
9b5e383c
ED
8388 /* Remove entries from kobject tree */
8389 netdev_unregister_kobject(dev);
024e9679
AD
8390#ifdef CONFIG_XPS
8391 /* Remove XPS queueing entries */
8392 netif_reset_xps_queues_gt(dev, 0);
8393#endif
9b5e383c 8394 }
93ee31f1 8395
850a545b 8396 synchronize_net();
395264d5 8397
a5ee1551 8398 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
8399 dev_put(dev);
8400}
8401
8402static void rollback_registered(struct net_device *dev)
8403{
8404 LIST_HEAD(single);
8405
8406 list_add(&dev->unreg_list, &single);
8407 rollback_registered_many(&single);
ceaaec98 8408 list_del(&single);
93ee31f1
DL
8409}
8410
fd867d51
JW
8411static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
8412 struct net_device *upper, netdev_features_t features)
8413{
8414 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8415 netdev_features_t feature;
5ba3f7d6 8416 int feature_bit;
fd867d51 8417
3b89ea9c 8418 for_each_netdev_feature(upper_disables, feature_bit) {
5ba3f7d6 8419 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
8420 if (!(upper->wanted_features & feature)
8421 && (features & feature)) {
8422 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
8423 &feature, upper->name);
8424 features &= ~feature;
8425 }
8426 }
8427
8428 return features;
8429}
8430
8431static void netdev_sync_lower_features(struct net_device *upper,
8432 struct net_device *lower, netdev_features_t features)
8433{
8434 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8435 netdev_features_t feature;
5ba3f7d6 8436 int feature_bit;
fd867d51 8437
3b89ea9c 8438 for_each_netdev_feature(upper_disables, feature_bit) {
5ba3f7d6 8439 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
8440 if (!(features & feature) && (lower->features & feature)) {
8441 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
8442 &feature, lower->name);
8443 lower->wanted_features &= ~feature;
8444 netdev_update_features(lower);
8445
8446 if (unlikely(lower->features & feature))
8447 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
8448 &feature, lower->name);
8449 }
8450 }
8451}
8452
c8f44aff
MM
8453static netdev_features_t netdev_fix_features(struct net_device *dev,
8454 netdev_features_t features)
b63365a2 8455{
57422dc5
MM
8456 /* Fix illegal checksum combinations */
8457 if ((features & NETIF_F_HW_CSUM) &&
8458 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 8459 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
8460 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
8461 }
8462
b63365a2 8463 /* TSO requires that SG is present as well. */
ea2d3688 8464 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 8465 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 8466 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
8467 }
8468
ec5f0615
PS
8469 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
8470 !(features & NETIF_F_IP_CSUM)) {
8471 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
8472 features &= ~NETIF_F_TSO;
8473 features &= ~NETIF_F_TSO_ECN;
8474 }
8475
8476 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
8477 !(features & NETIF_F_IPV6_CSUM)) {
8478 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
8479 features &= ~NETIF_F_TSO6;
8480 }
8481
b1dc497b
AD
8482 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
8483 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
8484 features &= ~NETIF_F_TSO_MANGLEID;
8485
31d8b9e0
BH
8486 /* TSO ECN requires that TSO is present as well. */
8487 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
8488 features &= ~NETIF_F_TSO_ECN;
8489
212b573f
MM
8490 /* Software GSO depends on SG. */
8491 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 8492 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
8493 features &= ~NETIF_F_GSO;
8494 }
8495
802ab55a
AD
8496 /* GSO partial features require GSO partial be set */
8497 if ((features & dev->gso_partial_features) &&
8498 !(features & NETIF_F_GSO_PARTIAL)) {
8499 netdev_dbg(dev,
8500 "Dropping partially supported GSO features since no GSO partial.\n");
8501 features &= ~dev->gso_partial_features;
8502 }
8503
fb1f5f79
MC
8504 if (!(features & NETIF_F_RXCSUM)) {
8505 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
8506 * successfully merged by hardware must also have the
8507 * checksum verified by hardware. If the user does not
8508 * want to enable RXCSUM, logically, we should disable GRO_HW.
8509 */
8510 if (features & NETIF_F_GRO_HW) {
8511 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8512 features &= ~NETIF_F_GRO_HW;
8513 }
8514 }
8515
de8d5ab2
GP
8516 /* LRO/HW-GRO features cannot be combined with RX-FCS */
8517 if (features & NETIF_F_RXFCS) {
8518 if (features & NETIF_F_LRO) {
8519 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
8520 features &= ~NETIF_F_LRO;
8521 }
8522
8523 if (features & NETIF_F_GRO_HW) {
8524 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8525 features &= ~NETIF_F_GRO_HW;
8526 }
e6c6a929
GP
8527 }
8528
b63365a2
HX
8529 return features;
8530}
b63365a2 8531
6cb6a27c 8532int __netdev_update_features(struct net_device *dev)
5455c699 8533{
fd867d51 8534 struct net_device *upper, *lower;
c8f44aff 8535 netdev_features_t features;
fd867d51 8536 struct list_head *iter;
e7868a85 8537 int err = -1;
5455c699 8538
87267485
MM
8539 ASSERT_RTNL();
8540
5455c699
MM
8541 features = netdev_get_wanted_features(dev);
8542
8543 if (dev->netdev_ops->ndo_fix_features)
8544 features = dev->netdev_ops->ndo_fix_features(dev, features);
8545
8546 /* driver might be less strict about feature dependencies */
8547 features = netdev_fix_features(dev, features);
8548
fd867d51
JW
8549 /* some features can't be enabled if they're off an an upper device */
8550 netdev_for_each_upper_dev_rcu(dev, upper, iter)
8551 features = netdev_sync_upper_features(dev, upper, features);
8552
5455c699 8553 if (dev->features == features)
e7868a85 8554 goto sync_lower;
5455c699 8555
c8f44aff
MM
8556 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
8557 &dev->features, &features);
5455c699
MM
8558
8559 if (dev->netdev_ops->ndo_set_features)
8560 err = dev->netdev_ops->ndo_set_features(dev, features);
5f8dc33e
NA
8561 else
8562 err = 0;
5455c699 8563
6cb6a27c 8564 if (unlikely(err < 0)) {
5455c699 8565 netdev_err(dev,
c8f44aff
MM
8566 "set_features() failed (%d); wanted %pNF, left %pNF\n",
8567 err, &features, &dev->features);
17b85d29
NA
8568 /* return non-0 since some features might have changed and
8569 * it's better to fire a spurious notification than miss it
8570 */
8571 return -1;
6cb6a27c
MM
8572 }
8573
e7868a85 8574sync_lower:
fd867d51
JW
8575 /* some features must be disabled on lower devices when disabled
8576 * on an upper device (think: bonding master or bridge)
8577 */
8578 netdev_for_each_lower_dev(dev, lower, iter)
8579 netdev_sync_lower_features(dev, lower, features);
8580
ae847f40
SD
8581 if (!err) {
8582 netdev_features_t diff = features ^ dev->features;
8583
8584 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
8585 /* udp_tunnel_{get,drop}_rx_info both need
8586 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
8587 * device, or they won't do anything.
8588 * Thus we need to update dev->features
8589 * *before* calling udp_tunnel_get_rx_info,
8590 * but *after* calling udp_tunnel_drop_rx_info.
8591 */
8592 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
8593 dev->features = features;
8594 udp_tunnel_get_rx_info(dev);
8595 } else {
8596 udp_tunnel_drop_rx_info(dev);
8597 }
8598 }
8599
9daae9bd
GP
8600 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
8601 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
8602 dev->features = features;
8603 err |= vlan_get_rx_ctag_filter_info(dev);
8604 } else {
8605 vlan_drop_rx_ctag_filter_info(dev);
8606 }
8607 }
8608
8609 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
8610 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
8611 dev->features = features;
8612 err |= vlan_get_rx_stag_filter_info(dev);
8613 } else {
8614 vlan_drop_rx_stag_filter_info(dev);
8615 }
8616 }
8617
6cb6a27c 8618 dev->features = features;
ae847f40 8619 }
6cb6a27c 8620
e7868a85 8621 return err < 0 ? 0 : 1;
6cb6a27c
MM
8622}
8623
afe12cc8
MM
8624/**
8625 * netdev_update_features - recalculate device features
8626 * @dev: the device to check
8627 *
8628 * Recalculate dev->features set and send notifications if it
8629 * has changed. Should be called after driver or hardware dependent
8630 * conditions might have changed that influence the features.
8631 */
6cb6a27c
MM
8632void netdev_update_features(struct net_device *dev)
8633{
8634 if (__netdev_update_features(dev))
8635 netdev_features_change(dev);
5455c699
MM
8636}
8637EXPORT_SYMBOL(netdev_update_features);
8638
afe12cc8
MM
8639/**
8640 * netdev_change_features - recalculate device features
8641 * @dev: the device to check
8642 *
8643 * Recalculate dev->features set and send notifications even
8644 * if they have not changed. Should be called instead of
8645 * netdev_update_features() if also dev->vlan_features might
8646 * have changed to allow the changes to be propagated to stacked
8647 * VLAN devices.
8648 */
8649void netdev_change_features(struct net_device *dev)
8650{
8651 __netdev_update_features(dev);
8652 netdev_features_change(dev);
8653}
8654EXPORT_SYMBOL(netdev_change_features);
8655
fc4a7489
PM
8656/**
8657 * netif_stacked_transfer_operstate - transfer operstate
8658 * @rootdev: the root or lower level device to transfer state from
8659 * @dev: the device to transfer operstate to
8660 *
8661 * Transfer operational state from root to device. This is normally
8662 * called when a stacking relationship exists between the root
8663 * device and the device(a leaf device).
8664 */
8665void netif_stacked_transfer_operstate(const struct net_device *rootdev,
8666 struct net_device *dev)
8667{
8668 if (rootdev->operstate == IF_OPER_DORMANT)
8669 netif_dormant_on(dev);
8670 else
8671 netif_dormant_off(dev);
8672
0575c86b
ZS
8673 if (netif_carrier_ok(rootdev))
8674 netif_carrier_on(dev);
8675 else
8676 netif_carrier_off(dev);
fc4a7489
PM
8677}
8678EXPORT_SYMBOL(netif_stacked_transfer_operstate);
8679
1b4bf461
ED
8680static int netif_alloc_rx_queues(struct net_device *dev)
8681{
1b4bf461 8682 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 8683 struct netdev_rx_queue *rx;
10595902 8684 size_t sz = count * sizeof(*rx);
e817f856 8685 int err = 0;
1b4bf461 8686
bd25fa7b 8687 BUG_ON(count < 1);
1b4bf461 8688
dcda9b04 8689 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
8690 if (!rx)
8691 return -ENOMEM;
8692
bd25fa7b
TH
8693 dev->_rx = rx;
8694
e817f856 8695 for (i = 0; i < count; i++) {
fe822240 8696 rx[i].dev = dev;
e817f856
JDB
8697
8698 /* XDP RX-queue setup */
8699 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
8700 if (err < 0)
8701 goto err_rxq_info;
8702 }
1b4bf461 8703 return 0;
e817f856
JDB
8704
8705err_rxq_info:
8706 /* Rollback successful reg's and free other resources */
8707 while (i--)
8708 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
141b52a9 8709 kvfree(dev->_rx);
e817f856
JDB
8710 dev->_rx = NULL;
8711 return err;
8712}
8713
8714static void netif_free_rx_queues(struct net_device *dev)
8715{
8716 unsigned int i, count = dev->num_rx_queues;
e817f856
JDB
8717
8718 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
8719 if (!dev->_rx)
8720 return;
8721
e817f856 8722 for (i = 0; i < count; i++)
82aaff2f
JK
8723 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
8724
8725 kvfree(dev->_rx);
1b4bf461
ED
8726}
8727
aa942104
CG
8728static void netdev_init_one_queue(struct net_device *dev,
8729 struct netdev_queue *queue, void *_unused)
8730{
8731 /* Initialize queue lock */
8732 spin_lock_init(&queue->_xmit_lock);
8733 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
8734 queue->xmit_lock_owner = -1;
b236da69 8735 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 8736 queue->dev = dev;
114cf580
TH
8737#ifdef CONFIG_BQL
8738 dql_init(&queue->dql, HZ);
8739#endif
aa942104
CG
8740}
8741
60877a32
ED
8742static void netif_free_tx_queues(struct net_device *dev)
8743{
4cb28970 8744 kvfree(dev->_tx);
60877a32
ED
8745}
8746
e6484930
TH
8747static int netif_alloc_netdev_queues(struct net_device *dev)
8748{
8749 unsigned int count = dev->num_tx_queues;
8750 struct netdev_queue *tx;
60877a32 8751 size_t sz = count * sizeof(*tx);
e6484930 8752
d339727c
ED
8753 if (count < 1 || count > 0xffff)
8754 return -EINVAL;
62b5942a 8755
dcda9b04 8756 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
8757 if (!tx)
8758 return -ENOMEM;
8759
e6484930 8760 dev->_tx = tx;
1d24eb48 8761
e6484930
TH
8762 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
8763 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
8764
8765 return 0;
e6484930
TH
8766}
8767
a2029240
DV
8768void netif_tx_stop_all_queues(struct net_device *dev)
8769{
8770 unsigned int i;
8771
8772 for (i = 0; i < dev->num_tx_queues; i++) {
8773 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
f4563a75 8774
a2029240
DV
8775 netif_tx_stop_queue(txq);
8776 }
8777}
8778EXPORT_SYMBOL(netif_tx_stop_all_queues);
8779
1da177e4
LT
8780/**
8781 * register_netdevice - register a network device
8782 * @dev: device to register
8783 *
8784 * Take a completed network device structure and add it to the kernel
8785 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8786 * chain. 0 is returned on success. A negative errno code is returned
8787 * on a failure to set up the device, or if the name is a duplicate.
8788 *
8789 * Callers must hold the rtnl semaphore. You may want
8790 * register_netdev() instead of this.
8791 *
8792 * BUGS:
8793 * The locking appears insufficient to guarantee two parallel registers
8794 * will not get the same name.
8795 */
8796
8797int register_netdevice(struct net_device *dev)
8798{
1da177e4 8799 int ret;
d314774c 8800 struct net *net = dev_net(dev);
1da177e4 8801
e283de3a
FF
8802 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
8803 NETDEV_FEATURE_COUNT);
1da177e4
LT
8804 BUG_ON(dev_boot_phase);
8805 ASSERT_RTNL();
8806
b17a7c17
SH
8807 might_sleep();
8808
1da177e4
LT
8809 /* When net_device's are persistent, this will be fatal. */
8810 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 8811 BUG_ON(!net);
1da177e4 8812
f1f28aa3 8813 spin_lock_init(&dev->addr_list_lock);
cf508b12 8814 netdev_set_addr_lockdep_class(dev);
1da177e4 8815
828de4f6 8816 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
8817 if (ret < 0)
8818 goto out;
8819
ff927412
JP
8820 dev->name_node = netdev_name_node_head_alloc(dev);
8821 if (!dev->name_node)
8822 goto out;
8823
1da177e4 8824 /* Init, if this function is available */
d314774c
SH
8825 if (dev->netdev_ops->ndo_init) {
8826 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
8827 if (ret) {
8828 if (ret > 0)
8829 ret = -EIO;
90833aa4 8830 goto out;
1da177e4
LT
8831 }
8832 }
4ec93edb 8833
f646968f
PM
8834 if (((dev->hw_features | dev->features) &
8835 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
8836 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
8837 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
8838 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
8839 ret = -EINVAL;
8840 goto err_uninit;
8841 }
8842
9c7dafbf
PE
8843 ret = -EBUSY;
8844 if (!dev->ifindex)
8845 dev->ifindex = dev_new_index(net);
8846 else if (__dev_get_by_index(net, dev->ifindex))
8847 goto err_uninit;
8848
5455c699
MM
8849 /* Transfer changeable features to wanted_features and enable
8850 * software offloads (GSO and GRO).
8851 */
8852 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f 8853 dev->features |= NETIF_F_SOFT_FEATURES;
d764a122
SD
8854
8855 if (dev->netdev_ops->ndo_udp_tunnel_add) {
8856 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8857 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8858 }
8859
14d1232f 8860 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 8861
cbc53e08 8862 if (!(dev->flags & IFF_LOOPBACK))
34324dc2 8863 dev->hw_features |= NETIF_F_NOCACHE_COPY;
cbc53e08 8864
7f348a60
AD
8865 /* If IPv4 TCP segmentation offload is supported we should also
8866 * allow the device to enable segmenting the frame with the option
8867 * of ignoring a static IP ID value. This doesn't enable the
8868 * feature itself but allows the user to enable it later.
8869 */
cbc53e08
AD
8870 if (dev->hw_features & NETIF_F_TSO)
8871 dev->hw_features |= NETIF_F_TSO_MANGLEID;
7f348a60
AD
8872 if (dev->vlan_features & NETIF_F_TSO)
8873 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
8874 if (dev->mpls_features & NETIF_F_TSO)
8875 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
8876 if (dev->hw_enc_features & NETIF_F_TSO)
8877 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
c6e1a0d1 8878
1180e7d6 8879 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 8880 */
1180e7d6 8881 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 8882
ee579677
PS
8883 /* Make NETIF_F_SG inheritable to tunnel devices.
8884 */
802ab55a 8885 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
ee579677 8886
0d89d203
SH
8887 /* Make NETIF_F_SG inheritable to MPLS.
8888 */
8889 dev->mpls_features |= NETIF_F_SG;
8890
7ffbe3fd
JB
8891 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
8892 ret = notifier_to_errno(ret);
8893 if (ret)
8894 goto err_uninit;
8895
8b41d188 8896 ret = netdev_register_kobject(dev);
b17a7c17 8897 if (ret)
7ce1b0ed 8898 goto err_uninit;
b17a7c17
SH
8899 dev->reg_state = NETREG_REGISTERED;
8900
6cb6a27c 8901 __netdev_update_features(dev);
8e9b59b2 8902
1da177e4
LT
8903 /*
8904 * Default initial state at registry is that the
8905 * device is present.
8906 */
8907
8908 set_bit(__LINK_STATE_PRESENT, &dev->state);
8909
8f4cccbb
BH
8910 linkwatch_init_dev(dev);
8911
1da177e4 8912 dev_init_scheduler(dev);
1da177e4 8913 dev_hold(dev);
ce286d32 8914 list_netdevice(dev);
7bf23575 8915 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 8916
948b337e
JP
8917 /* If the device has permanent device address, driver should
8918 * set dev_addr and also addr_assign_type should be set to
8919 * NET_ADDR_PERM (default value).
8920 */
8921 if (dev->addr_assign_type == NET_ADDR_PERM)
8922 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
8923
1da177e4 8924 /* Notify protocols, that a new device appeared. */
056925ab 8925 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 8926 ret = notifier_to_errno(ret);
93ee31f1
DL
8927 if (ret) {
8928 rollback_registered(dev);
10cc514f
SAK
8929 rcu_barrier();
8930
93ee31f1
DL
8931 dev->reg_state = NETREG_UNREGISTERED;
8932 }
d90a909e
EB
8933 /*
8934 * Prevent userspace races by waiting until the network
8935 * device is fully setup before sending notifications.
8936 */
a2835763
PM
8937 if (!dev->rtnl_link_ops ||
8938 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 8939 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
8940
8941out:
8942 return ret;
7ce1b0ed
HX
8943
8944err_uninit:
ff927412
JP
8945 if (dev->name_node)
8946 netdev_name_node_free(dev->name_node);
d314774c
SH
8947 if (dev->netdev_ops->ndo_uninit)
8948 dev->netdev_ops->ndo_uninit(dev);
cf124db5
DM
8949 if (dev->priv_destructor)
8950 dev->priv_destructor(dev);
7ce1b0ed 8951 goto out;
1da177e4 8952}
d1b19dff 8953EXPORT_SYMBOL(register_netdevice);
1da177e4 8954
937f1ba5
BH
8955/**
8956 * init_dummy_netdev - init a dummy network device for NAPI
8957 * @dev: device to init
8958 *
8959 * This takes a network device structure and initialize the minimum
8960 * amount of fields so it can be used to schedule NAPI polls without
8961 * registering a full blown interface. This is to be used by drivers
8962 * that need to tie several hardware interfaces to a single NAPI
8963 * poll scheduler due to HW limitations.
8964 */
8965int init_dummy_netdev(struct net_device *dev)
8966{
8967 /* Clear everything. Note we don't initialize spinlocks
8968 * are they aren't supposed to be taken by any of the
8969 * NAPI code and this dummy netdev is supposed to be
8970 * only ever used for NAPI polls
8971 */
8972 memset(dev, 0, sizeof(struct net_device));
8973
8974 /* make sure we BUG if trying to hit standard
8975 * register/unregister code path
8976 */
8977 dev->reg_state = NETREG_DUMMY;
8978
937f1ba5
BH
8979 /* NAPI wants this */
8980 INIT_LIST_HEAD(&dev->napi_list);
8981
8982 /* a dummy interface is started by default */
8983 set_bit(__LINK_STATE_PRESENT, &dev->state);
8984 set_bit(__LINK_STATE_START, &dev->state);
8985
35edfdc7
JE
8986 /* napi_busy_loop stats accounting wants this */
8987 dev_net_set(dev, &init_net);
8988
29b4433d
ED
8989 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8990 * because users of this 'device' dont need to change
8991 * its refcount.
8992 */
8993
937f1ba5
BH
8994 return 0;
8995}
8996EXPORT_SYMBOL_GPL(init_dummy_netdev);
8997
8998
1da177e4
LT
8999/**
9000 * register_netdev - register a network device
9001 * @dev: device to register
9002 *
9003 * Take a completed network device structure and add it to the kernel
9004 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9005 * chain. 0 is returned on success. A negative errno code is returned
9006 * on a failure to set up the device, or if the name is a duplicate.
9007 *
38b4da38 9008 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
9009 * and expands the device name if you passed a format string to
9010 * alloc_netdev.
9011 */
9012int register_netdev(struct net_device *dev)
9013{
9014 int err;
9015
b0f3debc
KT
9016 if (rtnl_lock_killable())
9017 return -EINTR;
1da177e4 9018 err = register_netdevice(dev);
1da177e4
LT
9019 rtnl_unlock();
9020 return err;
9021}
9022EXPORT_SYMBOL(register_netdev);
9023
29b4433d
ED
9024int netdev_refcnt_read(const struct net_device *dev)
9025{
9026 int i, refcnt = 0;
9027
9028 for_each_possible_cpu(i)
9029 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
9030 return refcnt;
9031}
9032EXPORT_SYMBOL(netdev_refcnt_read);
9033
2c53040f 9034/**
1da177e4 9035 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 9036 * @dev: target net_device
1da177e4
LT
9037 *
9038 * This is called when unregistering network devices.
9039 *
9040 * Any protocol or device that holds a reference should register
9041 * for netdevice notification, and cleanup and put back the
9042 * reference if they receive an UNREGISTER event.
9043 * We can get stuck here if buggy protocols don't correctly
4ec93edb 9044 * call dev_put.
1da177e4
LT
9045 */
9046static void netdev_wait_allrefs(struct net_device *dev)
9047{
9048 unsigned long rebroadcast_time, warning_time;
29b4433d 9049 int refcnt;
1da177e4 9050
e014debe
ED
9051 linkwatch_forget_dev(dev);
9052
1da177e4 9053 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
9054 refcnt = netdev_refcnt_read(dev);
9055
9056 while (refcnt != 0) {
1da177e4 9057 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 9058 rtnl_lock();
1da177e4
LT
9059
9060 /* Rebroadcast unregister notification */
056925ab 9061 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 9062
748e2d93 9063 __rtnl_unlock();
0115e8e3 9064 rcu_barrier();
748e2d93
ED
9065 rtnl_lock();
9066
1da177e4
LT
9067 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
9068 &dev->state)) {
9069 /* We must not have linkwatch events
9070 * pending on unregister. If this
9071 * happens, we simply run the queue
9072 * unscheduled, resulting in a noop
9073 * for this device.
9074 */
9075 linkwatch_run_queue();
9076 }
9077
6756ae4b 9078 __rtnl_unlock();
1da177e4
LT
9079
9080 rebroadcast_time = jiffies;
9081 }
9082
9083 msleep(250);
9084
29b4433d
ED
9085 refcnt = netdev_refcnt_read(dev);
9086
d7c04b05 9087 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
9088 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
9089 dev->name, refcnt);
1da177e4
LT
9090 warning_time = jiffies;
9091 }
9092 }
9093}
9094
9095/* The sequence is:
9096 *
9097 * rtnl_lock();
9098 * ...
9099 * register_netdevice(x1);
9100 * register_netdevice(x2);
9101 * ...
9102 * unregister_netdevice(y1);
9103 * unregister_netdevice(y2);
9104 * ...
9105 * rtnl_unlock();
9106 * free_netdev(y1);
9107 * free_netdev(y2);
9108 *
58ec3b4d 9109 * We are invoked by rtnl_unlock().
1da177e4 9110 * This allows us to deal with problems:
b17a7c17 9111 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
9112 * without deadlocking with linkwatch via keventd.
9113 * 2) Since we run with the RTNL semaphore not held, we can sleep
9114 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
9115 *
9116 * We must not return until all unregister events added during
9117 * the interval the lock was held have been completed.
1da177e4 9118 */
1da177e4
LT
9119void netdev_run_todo(void)
9120{
626ab0e6 9121 struct list_head list;
1da177e4 9122
1da177e4 9123 /* Snapshot list, allow later requests */
626ab0e6 9124 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
9125
9126 __rtnl_unlock();
626ab0e6 9127
0115e8e3
ED
9128
9129 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
9130 if (!list_empty(&list))
9131 rcu_barrier();
9132
1da177e4
LT
9133 while (!list_empty(&list)) {
9134 struct net_device *dev
e5e26d75 9135 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
9136 list_del(&dev->todo_list);
9137
b17a7c17 9138 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 9139 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
9140 dev->name, dev->reg_state);
9141 dump_stack();
9142 continue;
9143 }
1da177e4 9144
b17a7c17 9145 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 9146
b17a7c17 9147 netdev_wait_allrefs(dev);
1da177e4 9148
b17a7c17 9149 /* paranoia */
29b4433d 9150 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
9151 BUG_ON(!list_empty(&dev->ptype_all));
9152 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
9153 WARN_ON(rcu_access_pointer(dev->ip_ptr));
9154 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
330c7272 9155#if IS_ENABLED(CONFIG_DECNET)
547b792c 9156 WARN_ON(dev->dn_ptr);
330c7272 9157#endif
cf124db5
DM
9158 if (dev->priv_destructor)
9159 dev->priv_destructor(dev);
9160 if (dev->needs_free_netdev)
9161 free_netdev(dev);
9093bbb2 9162
50624c93
EB
9163 /* Report a network device has been unregistered */
9164 rtnl_lock();
9165 dev_net(dev)->dev_unreg_count--;
9166 __rtnl_unlock();
9167 wake_up(&netdev_unregistering_wq);
9168
9093bbb2
SH
9169 /* Free network device */
9170 kobject_put(&dev->dev.kobj);
1da177e4 9171 }
1da177e4
LT
9172}
9173
9256645a
JW
9174/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
9175 * all the same fields in the same order as net_device_stats, with only
9176 * the type differing, but rtnl_link_stats64 may have additional fields
9177 * at the end for newer counters.
3cfde79c 9178 */
77a1abf5
ED
9179void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
9180 const struct net_device_stats *netdev_stats)
3cfde79c
BH
9181{
9182#if BITS_PER_LONG == 64
9256645a 9183 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9af9959e 9184 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9256645a
JW
9185 /* zero out counters that only exist in rtnl_link_stats64 */
9186 memset((char *)stats64 + sizeof(*netdev_stats), 0,
9187 sizeof(*stats64) - sizeof(*netdev_stats));
3cfde79c 9188#else
9256645a 9189 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
3cfde79c
BH
9190 const unsigned long *src = (const unsigned long *)netdev_stats;
9191 u64 *dst = (u64 *)stats64;
9192
9256645a 9193 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
3cfde79c
BH
9194 for (i = 0; i < n; i++)
9195 dst[i] = src[i];
9256645a
JW
9196 /* zero out counters that only exist in rtnl_link_stats64 */
9197 memset((char *)stats64 + n * sizeof(u64), 0,
9198 sizeof(*stats64) - n * sizeof(u64));
3cfde79c
BH
9199#endif
9200}
77a1abf5 9201EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 9202
eeda3fd6
SH
9203/**
9204 * dev_get_stats - get network device statistics
9205 * @dev: device to get statistics from
28172739 9206 * @storage: place to store stats
eeda3fd6 9207 *
d7753516
BH
9208 * Get network statistics from device. Return @storage.
9209 * The device driver may provide its own method by setting
9210 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
9211 * otherwise the internal statistics structure is used.
eeda3fd6 9212 */
d7753516
BH
9213struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
9214 struct rtnl_link_stats64 *storage)
7004bf25 9215{
eeda3fd6
SH
9216 const struct net_device_ops *ops = dev->netdev_ops;
9217
28172739
ED
9218 if (ops->ndo_get_stats64) {
9219 memset(storage, 0, sizeof(*storage));
caf586e5
ED
9220 ops->ndo_get_stats64(dev, storage);
9221 } else if (ops->ndo_get_stats) {
3cfde79c 9222 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
9223 } else {
9224 netdev_stats_to_stats64(storage, &dev->stats);
28172739 9225 }
6f64ec74
ED
9226 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
9227 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
9228 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
28172739 9229 return storage;
c45d286e 9230}
eeda3fd6 9231EXPORT_SYMBOL(dev_get_stats);
c45d286e 9232
24824a09 9233struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 9234{
24824a09 9235 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 9236
24824a09
ED
9237#ifdef CONFIG_NET_CLS_ACT
9238 if (queue)
9239 return queue;
9240 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
9241 if (!queue)
9242 return NULL;
9243 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 9244 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
9245 queue->qdisc_sleeping = &noop_qdisc;
9246 rcu_assign_pointer(dev->ingress_queue, queue);
9247#endif
9248 return queue;
bb949fbd
DM
9249}
9250
2c60db03
ED
9251static const struct ethtool_ops default_ethtool_ops;
9252
d07d7507
SG
9253void netdev_set_default_ethtool_ops(struct net_device *dev,
9254 const struct ethtool_ops *ops)
9255{
9256 if (dev->ethtool_ops == &default_ethtool_ops)
9257 dev->ethtool_ops = ops;
9258}
9259EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
9260
74d332c1
ED
9261void netdev_freemem(struct net_device *dev)
9262{
9263 char *addr = (char *)dev - dev->padded;
9264
4cb28970 9265 kvfree(addr);
74d332c1
ED
9266}
9267
1da177e4 9268/**
722c9a0c 9269 * alloc_netdev_mqs - allocate network device
9270 * @sizeof_priv: size of private data to allocate space for
9271 * @name: device name format string
9272 * @name_assign_type: origin of device name
9273 * @setup: callback to initialize device
9274 * @txqs: the number of TX subqueues to allocate
9275 * @rxqs: the number of RX subqueues to allocate
9276 *
9277 * Allocates a struct net_device with private data area for driver use
9278 * and performs basic initialization. Also allocates subqueue structs
9279 * for each queue on the device.
1da177e4 9280 */
36909ea4 9281struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 9282 unsigned char name_assign_type,
36909ea4
TH
9283 void (*setup)(struct net_device *),
9284 unsigned int txqs, unsigned int rxqs)
1da177e4 9285{
1da177e4 9286 struct net_device *dev;
52a59bd5 9287 unsigned int alloc_size;
1ce8e7b5 9288 struct net_device *p;
1da177e4 9289
b6fe17d6
SH
9290 BUG_ON(strlen(name) >= sizeof(dev->name));
9291
36909ea4 9292 if (txqs < 1) {
7b6cd1ce 9293 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
9294 return NULL;
9295 }
9296
36909ea4 9297 if (rxqs < 1) {
7b6cd1ce 9298 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
9299 return NULL;
9300 }
36909ea4 9301
fd2ea0a7 9302 alloc_size = sizeof(struct net_device);
d1643d24
AD
9303 if (sizeof_priv) {
9304 /* ensure 32-byte alignment of private area */
1ce8e7b5 9305 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
9306 alloc_size += sizeof_priv;
9307 }
9308 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 9309 alloc_size += NETDEV_ALIGN - 1;
1da177e4 9310
dcda9b04 9311 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
62b5942a 9312 if (!p)
1da177e4 9313 return NULL;
1da177e4 9314
1ce8e7b5 9315 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 9316 dev->padded = (char *)dev - (char *)p;
ab9c73cc 9317
29b4433d
ED
9318 dev->pcpu_refcnt = alloc_percpu(int);
9319 if (!dev->pcpu_refcnt)
74d332c1 9320 goto free_dev;
ab9c73cc 9321
ab9c73cc 9322 if (dev_addr_init(dev))
29b4433d 9323 goto free_pcpu;
ab9c73cc 9324
22bedad3 9325 dev_mc_init(dev);
a748ee24 9326 dev_uc_init(dev);
ccffad25 9327
c346dca1 9328 dev_net_set(dev, &init_net);
1da177e4 9329
8d3bdbd5 9330 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 9331 dev->gso_max_segs = GSO_MAX_SEGS;
8d3bdbd5 9332
8d3bdbd5
DM
9333 INIT_LIST_HEAD(&dev->napi_list);
9334 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 9335 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 9336 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
9337 INIT_LIST_HEAD(&dev->adj_list.upper);
9338 INIT_LIST_HEAD(&dev->adj_list.lower);
7866a621
SN
9339 INIT_LIST_HEAD(&dev->ptype_all);
9340 INIT_LIST_HEAD(&dev->ptype_specific);
59cc1f61
JK
9341#ifdef CONFIG_NET_SCHED
9342 hash_init(dev->qdisc_hash);
9343#endif
02875878 9344 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
9345 setup(dev);
9346
a813104d 9347 if (!dev->tx_queue_len) {
f84bb1ea 9348 dev->priv_flags |= IFF_NO_QUEUE;
11597084 9349 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
a813104d 9350 }
906470c1 9351
36909ea4
TH
9352 dev->num_tx_queues = txqs;
9353 dev->real_num_tx_queues = txqs;
ed9af2e8 9354 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 9355 goto free_all;
e8a0464c 9356
36909ea4
TH
9357 dev->num_rx_queues = rxqs;
9358 dev->real_num_rx_queues = rxqs;
fe822240 9359 if (netif_alloc_rx_queues(dev))
8d3bdbd5 9360 goto free_all;
0a9627f2 9361
1da177e4 9362 strcpy(dev->name, name);
c835a677 9363 dev->name_assign_type = name_assign_type;
cbda10fa 9364 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
9365 if (!dev->ethtool_ops)
9366 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
9367
9368 nf_hook_ingress_init(dev);
9369
1da177e4 9370 return dev;
ab9c73cc 9371
8d3bdbd5
DM
9372free_all:
9373 free_netdev(dev);
9374 return NULL;
9375
29b4433d
ED
9376free_pcpu:
9377 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
9378free_dev:
9379 netdev_freemem(dev);
ab9c73cc 9380 return NULL;
1da177e4 9381}
36909ea4 9382EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
9383
9384/**
722c9a0c 9385 * free_netdev - free network device
9386 * @dev: device
1da177e4 9387 *
722c9a0c 9388 * This function does the last stage of destroying an allocated device
9389 * interface. The reference to the device object is released. If this
9390 * is the last reference then it will be freed.Must be called in process
9391 * context.
1da177e4
LT
9392 */
9393void free_netdev(struct net_device *dev)
9394{
d565b0a1
HX
9395 struct napi_struct *p, *n;
9396
93d05d4a 9397 might_sleep();
60877a32 9398 netif_free_tx_queues(dev);
e817f856 9399 netif_free_rx_queues(dev);
e8a0464c 9400
33d480ce 9401 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 9402
f001fde5
JP
9403 /* Flush device addresses */
9404 dev_addr_flush(dev);
9405
d565b0a1
HX
9406 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
9407 netif_napi_del(p);
9408
29b4433d
ED
9409 free_percpu(dev->pcpu_refcnt);
9410 dev->pcpu_refcnt = NULL;
9411
3041a069 9412 /* Compatibility with error handling in drivers */
1da177e4 9413 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 9414 netdev_freemem(dev);
1da177e4
LT
9415 return;
9416 }
9417
9418 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
9419 dev->reg_state = NETREG_RELEASED;
9420
43cb76d9
GKH
9421 /* will free via device release */
9422 put_device(&dev->dev);
1da177e4 9423}
d1b19dff 9424EXPORT_SYMBOL(free_netdev);
4ec93edb 9425
f0db275a
SH
9426/**
9427 * synchronize_net - Synchronize with packet receive processing
9428 *
9429 * Wait for packets currently being received to be done.
9430 * Does not block later packets from starting.
9431 */
4ec93edb 9432void synchronize_net(void)
1da177e4
LT
9433{
9434 might_sleep();
be3fc413
ED
9435 if (rtnl_is_locked())
9436 synchronize_rcu_expedited();
9437 else
9438 synchronize_rcu();
1da177e4 9439}
d1b19dff 9440EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
9441
9442/**
44a0873d 9443 * unregister_netdevice_queue - remove device from the kernel
1da177e4 9444 * @dev: device
44a0873d 9445 * @head: list
6ebfbc06 9446 *
1da177e4 9447 * This function shuts down a device interface and removes it
d59b54b1 9448 * from the kernel tables.
44a0873d 9449 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
9450 *
9451 * Callers must hold the rtnl semaphore. You may want
9452 * unregister_netdev() instead of this.
9453 */
9454
44a0873d 9455void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 9456{
a6620712
HX
9457 ASSERT_RTNL();
9458
44a0873d 9459 if (head) {
9fdce099 9460 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
9461 } else {
9462 rollback_registered(dev);
9463 /* Finish processing unregister after unlock */
9464 net_set_todo(dev);
9465 }
1da177e4 9466}
44a0873d 9467EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 9468
9b5e383c
ED
9469/**
9470 * unregister_netdevice_many - unregister many devices
9471 * @head: list of devices
87757a91
ED
9472 *
9473 * Note: As most callers use a stack allocated list_head,
9474 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
9475 */
9476void unregister_netdevice_many(struct list_head *head)
9477{
9478 struct net_device *dev;
9479
9480 if (!list_empty(head)) {
9481 rollback_registered_many(head);
9482 list_for_each_entry(dev, head, unreg_list)
9483 net_set_todo(dev);
87757a91 9484 list_del(head);
9b5e383c
ED
9485 }
9486}
63c8099d 9487EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 9488
1da177e4
LT
9489/**
9490 * unregister_netdev - remove device from the kernel
9491 * @dev: device
9492 *
9493 * This function shuts down a device interface and removes it
d59b54b1 9494 * from the kernel tables.
1da177e4
LT
9495 *
9496 * This is just a wrapper for unregister_netdevice that takes
9497 * the rtnl semaphore. In general you want to use this and not
9498 * unregister_netdevice.
9499 */
9500void unregister_netdev(struct net_device *dev)
9501{
9502 rtnl_lock();
9503 unregister_netdevice(dev);
9504 rtnl_unlock();
9505}
1da177e4
LT
9506EXPORT_SYMBOL(unregister_netdev);
9507
ce286d32
EB
9508/**
9509 * dev_change_net_namespace - move device to different nethost namespace
9510 * @dev: device
9511 * @net: network namespace
9512 * @pat: If not NULL name pattern to try if the current device name
9513 * is already taken in the destination network namespace.
9514 *
9515 * This function shuts down a device interface and moves it
9516 * to a new network namespace. On success 0 is returned, on
9517 * a failure a netagive errno code is returned.
9518 *
9519 * Callers must hold the rtnl semaphore.
9520 */
9521
9522int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
9523{
38e01b30 9524 int err, new_nsid, new_ifindex;
ce286d32
EB
9525
9526 ASSERT_RTNL();
9527
9528 /* Don't allow namespace local devices to be moved. */
9529 err = -EINVAL;
9530 if (dev->features & NETIF_F_NETNS_LOCAL)
9531 goto out;
9532
9533 /* Ensure the device has been registrered */
ce286d32
EB
9534 if (dev->reg_state != NETREG_REGISTERED)
9535 goto out;
9536
9537 /* Get out if there is nothing todo */
9538 err = 0;
878628fb 9539 if (net_eq(dev_net(dev), net))
ce286d32
EB
9540 goto out;
9541
9542 /* Pick the destination device name, and ensure
9543 * we can use it in the destination network namespace.
9544 */
9545 err = -EEXIST;
d9031024 9546 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
9547 /* We get here if we can't use the current device name */
9548 if (!pat)
9549 goto out;
7892bd08
LR
9550 err = dev_get_valid_name(net, dev, pat);
9551 if (err < 0)
ce286d32
EB
9552 goto out;
9553 }
9554
9555 /*
9556 * And now a mini version of register_netdevice unregister_netdevice.
9557 */
9558
9559 /* If device is running close it first. */
9b772652 9560 dev_close(dev);
ce286d32
EB
9561
9562 /* And unlink it from device chain */
ce286d32
EB
9563 unlist_netdevice(dev);
9564
9565 synchronize_net();
9566
9567 /* Shutdown queueing discipline. */
9568 dev_shutdown(dev);
9569
9570 /* Notify protocols, that we are about to destroy
eb13da1a 9571 * this device. They should clean all the things.
9572 *
9573 * Note that dev->reg_state stays at NETREG_REGISTERED.
9574 * This is wanted because this way 8021q and macvlan know
9575 * the device is just moving and can keep their slaves up.
9576 */
ce286d32 9577 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43 9578 rcu_barrier();
38e01b30 9579
c36ac8e2 9580 new_nsid = peernet2id_alloc(dev_net(dev), net);
38e01b30
ND
9581 /* If there is an ifindex conflict assign a new one */
9582 if (__dev_get_by_index(net, dev->ifindex))
9583 new_ifindex = dev_new_index(net);
9584 else
9585 new_ifindex = dev->ifindex;
9586
9587 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
9588 new_ifindex);
ce286d32
EB
9589
9590 /*
9591 * Flush the unicast and multicast chains
9592 */
a748ee24 9593 dev_uc_flush(dev);
22bedad3 9594 dev_mc_flush(dev);
ce286d32 9595
4e66ae2e
SH
9596 /* Send a netdev-removed uevent to the old namespace */
9597 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 9598 netdev_adjacent_del_links(dev);
4e66ae2e 9599
ce286d32 9600 /* Actually switch the network namespace */
c346dca1 9601 dev_net_set(dev, net);
38e01b30 9602 dev->ifindex = new_ifindex;
ce286d32 9603
4e66ae2e
SH
9604 /* Send a netdev-add uevent to the new namespace */
9605 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 9606 netdev_adjacent_add_links(dev);
4e66ae2e 9607
8b41d188 9608 /* Fixup kobjects */
a1b3f594 9609 err = device_rename(&dev->dev, dev->name);
8b41d188 9610 WARN_ON(err);
ce286d32
EB
9611
9612 /* Add the device back in the hashes */
9613 list_netdevice(dev);
9614
9615 /* Notify protocols, that a new device appeared. */
9616 call_netdevice_notifiers(NETDEV_REGISTER, dev);
9617
d90a909e
EB
9618 /*
9619 * Prevent userspace races by waiting until the network
9620 * device is fully setup before sending notifications.
9621 */
7f294054 9622 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 9623
ce286d32
EB
9624 synchronize_net();
9625 err = 0;
9626out:
9627 return err;
9628}
463d0183 9629EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 9630
f0bf90de 9631static int dev_cpu_dead(unsigned int oldcpu)
1da177e4
LT
9632{
9633 struct sk_buff **list_skb;
1da177e4 9634 struct sk_buff *skb;
f0bf90de 9635 unsigned int cpu;
97d8b6e3 9636 struct softnet_data *sd, *oldsd, *remsd = NULL;
1da177e4 9637
1da177e4
LT
9638 local_irq_disable();
9639 cpu = smp_processor_id();
9640 sd = &per_cpu(softnet_data, cpu);
9641 oldsd = &per_cpu(softnet_data, oldcpu);
9642
9643 /* Find end of our completion_queue. */
9644 list_skb = &sd->completion_queue;
9645 while (*list_skb)
9646 list_skb = &(*list_skb)->next;
9647 /* Append completion queue from offline CPU. */
9648 *list_skb = oldsd->completion_queue;
9649 oldsd->completion_queue = NULL;
9650
1da177e4 9651 /* Append output queue from offline CPU. */
a9cbd588
CG
9652 if (oldsd->output_queue) {
9653 *sd->output_queue_tailp = oldsd->output_queue;
9654 sd->output_queue_tailp = oldsd->output_queue_tailp;
9655 oldsd->output_queue = NULL;
9656 oldsd->output_queue_tailp = &oldsd->output_queue;
9657 }
ac64da0b
ED
9658 /* Append NAPI poll list from offline CPU, with one exception :
9659 * process_backlog() must be called by cpu owning percpu backlog.
9660 * We properly handle process_queue & input_pkt_queue later.
9661 */
9662 while (!list_empty(&oldsd->poll_list)) {
9663 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
9664 struct napi_struct,
9665 poll_list);
9666
9667 list_del_init(&napi->poll_list);
9668 if (napi->poll == process_backlog)
9669 napi->state = 0;
9670 else
9671 ____napi_schedule(sd, napi);
264524d5 9672 }
1da177e4
LT
9673
9674 raise_softirq_irqoff(NET_TX_SOFTIRQ);
9675 local_irq_enable();
9676
773fc8f6 9677#ifdef CONFIG_RPS
9678 remsd = oldsd->rps_ipi_list;
9679 oldsd->rps_ipi_list = NULL;
9680#endif
9681 /* send out pending IPI's on offline CPU */
9682 net_rps_send_ipi(remsd);
9683
1da177e4 9684 /* Process offline CPU's input_pkt_queue */
76cc8b13 9685 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 9686 netif_rx_ni(skb);
76cc8b13 9687 input_queue_head_incr(oldsd);
fec5e652 9688 }
ac64da0b 9689 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 9690 netif_rx_ni(skb);
76cc8b13
TH
9691 input_queue_head_incr(oldsd);
9692 }
1da177e4 9693
f0bf90de 9694 return 0;
1da177e4 9695}
1da177e4 9696
7f353bf2 9697/**
b63365a2
HX
9698 * netdev_increment_features - increment feature set by one
9699 * @all: current feature set
9700 * @one: new feature set
9701 * @mask: mask feature set
7f353bf2
HX
9702 *
9703 * Computes a new feature set after adding a device with feature set
b63365a2
HX
9704 * @one to the master device with current feature set @all. Will not
9705 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 9706 */
c8f44aff
MM
9707netdev_features_t netdev_increment_features(netdev_features_t all,
9708 netdev_features_t one, netdev_features_t mask)
b63365a2 9709{
c8cd0989 9710 if (mask & NETIF_F_HW_CSUM)
a188222b 9711 mask |= NETIF_F_CSUM_MASK;
1742f183 9712 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 9713
a188222b 9714 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
1742f183 9715 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 9716
1742f183 9717 /* If one device supports hw checksumming, set for all. */
c8cd0989
TH
9718 if (all & NETIF_F_HW_CSUM)
9719 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
7f353bf2
HX
9720
9721 return all;
9722}
b63365a2 9723EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 9724
430f03cd 9725static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
9726{
9727 int i;
9728 struct hlist_head *hash;
9729
6da2ec56 9730 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
30d97d35
PE
9731 if (hash != NULL)
9732 for (i = 0; i < NETDEV_HASHENTRIES; i++)
9733 INIT_HLIST_HEAD(&hash[i]);
9734
9735 return hash;
9736}
9737
881d966b 9738/* Initialize per network namespace state */
4665079c 9739static int __net_init netdev_init(struct net *net)
881d966b 9740{
d9f37d01 9741 BUILD_BUG_ON(GRO_HASH_BUCKETS >
ccdb5171 9742 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
d9f37d01 9743
734b6541
RM
9744 if (net != &init_net)
9745 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 9746
30d97d35
PE
9747 net->dev_name_head = netdev_create_hash();
9748 if (net->dev_name_head == NULL)
9749 goto err_name;
881d966b 9750
30d97d35
PE
9751 net->dev_index_head = netdev_create_hash();
9752 if (net->dev_index_head == NULL)
9753 goto err_idx;
881d966b
EB
9754
9755 return 0;
30d97d35
PE
9756
9757err_idx:
9758 kfree(net->dev_name_head);
9759err_name:
9760 return -ENOMEM;
881d966b
EB
9761}
9762
f0db275a
SH
9763/**
9764 * netdev_drivername - network driver for the device
9765 * @dev: network device
f0db275a
SH
9766 *
9767 * Determine network driver for device.
9768 */
3019de12 9769const char *netdev_drivername(const struct net_device *dev)
6579e57b 9770{
cf04a4c7
SH
9771 const struct device_driver *driver;
9772 const struct device *parent;
3019de12 9773 const char *empty = "";
6579e57b
AV
9774
9775 parent = dev->dev.parent;
6579e57b 9776 if (!parent)
3019de12 9777 return empty;
6579e57b
AV
9778
9779 driver = parent->driver;
9780 if (driver && driver->name)
3019de12
DM
9781 return driver->name;
9782 return empty;
6579e57b
AV
9783}
9784
6ea754eb
JP
9785static void __netdev_printk(const char *level, const struct net_device *dev,
9786 struct va_format *vaf)
256df2f3 9787{
b004ff49 9788 if (dev && dev->dev.parent) {
6ea754eb
JP
9789 dev_printk_emit(level[1] - '0',
9790 dev->dev.parent,
9791 "%s %s %s%s: %pV",
9792 dev_driver_string(dev->dev.parent),
9793 dev_name(dev->dev.parent),
9794 netdev_name(dev), netdev_reg_state(dev),
9795 vaf);
b004ff49 9796 } else if (dev) {
6ea754eb
JP
9797 printk("%s%s%s: %pV",
9798 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 9799 } else {
6ea754eb 9800 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 9801 }
256df2f3
JP
9802}
9803
6ea754eb
JP
9804void netdev_printk(const char *level, const struct net_device *dev,
9805 const char *format, ...)
256df2f3
JP
9806{
9807 struct va_format vaf;
9808 va_list args;
256df2f3
JP
9809
9810 va_start(args, format);
9811
9812 vaf.fmt = format;
9813 vaf.va = &args;
9814
6ea754eb 9815 __netdev_printk(level, dev, &vaf);
b004ff49 9816
256df2f3 9817 va_end(args);
256df2f3
JP
9818}
9819EXPORT_SYMBOL(netdev_printk);
9820
9821#define define_netdev_printk_level(func, level) \
6ea754eb 9822void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 9823{ \
256df2f3
JP
9824 struct va_format vaf; \
9825 va_list args; \
9826 \
9827 va_start(args, fmt); \
9828 \
9829 vaf.fmt = fmt; \
9830 vaf.va = &args; \
9831 \
6ea754eb 9832 __netdev_printk(level, dev, &vaf); \
b004ff49 9833 \
256df2f3 9834 va_end(args); \
256df2f3
JP
9835} \
9836EXPORT_SYMBOL(func);
9837
9838define_netdev_printk_level(netdev_emerg, KERN_EMERG);
9839define_netdev_printk_level(netdev_alert, KERN_ALERT);
9840define_netdev_printk_level(netdev_crit, KERN_CRIT);
9841define_netdev_printk_level(netdev_err, KERN_ERR);
9842define_netdev_printk_level(netdev_warn, KERN_WARNING);
9843define_netdev_printk_level(netdev_notice, KERN_NOTICE);
9844define_netdev_printk_level(netdev_info, KERN_INFO);
9845
4665079c 9846static void __net_exit netdev_exit(struct net *net)
881d966b
EB
9847{
9848 kfree(net->dev_name_head);
9849 kfree(net->dev_index_head);
ee21b18b
VA
9850 if (net != &init_net)
9851 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
881d966b
EB
9852}
9853
022cbae6 9854static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
9855 .init = netdev_init,
9856 .exit = netdev_exit,
9857};
9858
4665079c 9859static void __net_exit default_device_exit(struct net *net)
ce286d32 9860{
e008b5fc 9861 struct net_device *dev, *aux;
ce286d32 9862 /*
e008b5fc 9863 * Push all migratable network devices back to the
ce286d32
EB
9864 * initial network namespace
9865 */
9866 rtnl_lock();
e008b5fc 9867 for_each_netdev_safe(net, dev, aux) {
ce286d32 9868 int err;
aca51397 9869 char fb_name[IFNAMSIZ];
ce286d32
EB
9870
9871 /* Ignore unmoveable devices (i.e. loopback) */
9872 if (dev->features & NETIF_F_NETNS_LOCAL)
9873 continue;
9874
e008b5fc
EB
9875 /* Leave virtual devices for the generic cleanup */
9876 if (dev->rtnl_link_ops)
9877 continue;
d0c082ce 9878
25985edc 9879 /* Push remaining network devices to init_net */
aca51397 9880 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
55b40dbf
JP
9881 if (__dev_get_by_name(&init_net, fb_name))
9882 snprintf(fb_name, IFNAMSIZ, "dev%%d");
aca51397 9883 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 9884 if (err) {
7b6cd1ce
JP
9885 pr_emerg("%s: failed to move %s to init_net: %d\n",
9886 __func__, dev->name, err);
aca51397 9887 BUG();
ce286d32
EB
9888 }
9889 }
9890 rtnl_unlock();
9891}
9892
50624c93
EB
9893static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
9894{
9895 /* Return with the rtnl_lock held when there are no network
9896 * devices unregistering in any network namespace in net_list.
9897 */
9898 struct net *net;
9899 bool unregistering;
ff960a73 9900 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 9901
ff960a73 9902 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 9903 for (;;) {
50624c93
EB
9904 unregistering = false;
9905 rtnl_lock();
9906 list_for_each_entry(net, net_list, exit_list) {
9907 if (net->dev_unreg_count > 0) {
9908 unregistering = true;
9909 break;
9910 }
9911 }
9912 if (!unregistering)
9913 break;
9914 __rtnl_unlock();
ff960a73
PZ
9915
9916 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 9917 }
ff960a73 9918 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
9919}
9920
04dc7f6b
EB
9921static void __net_exit default_device_exit_batch(struct list_head *net_list)
9922{
9923 /* At exit all network devices most be removed from a network
b595076a 9924 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
9925 * Do this across as many network namespaces as possible to
9926 * improve batching efficiency.
9927 */
9928 struct net_device *dev;
9929 struct net *net;
9930 LIST_HEAD(dev_kill_list);
9931
50624c93
EB
9932 /* To prevent network device cleanup code from dereferencing
9933 * loopback devices or network devices that have been freed
9934 * wait here for all pending unregistrations to complete,
9935 * before unregistring the loopback device and allowing the
9936 * network namespace be freed.
9937 *
9938 * The netdev todo list containing all network devices
9939 * unregistrations that happen in default_device_exit_batch
9940 * will run in the rtnl_unlock() at the end of
9941 * default_device_exit_batch.
9942 */
9943 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
9944 list_for_each_entry(net, net_list, exit_list) {
9945 for_each_netdev_reverse(net, dev) {
b0ab2fab 9946 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
9947 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
9948 else
9949 unregister_netdevice_queue(dev, &dev_kill_list);
9950 }
9951 }
9952 unregister_netdevice_many(&dev_kill_list);
9953 rtnl_unlock();
9954}
9955
022cbae6 9956static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 9957 .exit = default_device_exit,
04dc7f6b 9958 .exit_batch = default_device_exit_batch,
ce286d32
EB
9959};
9960
1da177e4
LT
9961/*
9962 * Initialize the DEV module. At boot time this walks the device list and
9963 * unhooks any devices that fail to initialise (normally hardware not
9964 * present) and leaves us with a valid list of present and active devices.
9965 *
9966 */
9967
9968/*
9969 * This is called single threaded during boot, so no need
9970 * to take the rtnl semaphore.
9971 */
9972static int __init net_dev_init(void)
9973{
9974 int i, rc = -ENOMEM;
9975
9976 BUG_ON(!dev_boot_phase);
9977
1da177e4
LT
9978 if (dev_proc_init())
9979 goto out;
9980
8b41d188 9981 if (netdev_kobject_init())
1da177e4
LT
9982 goto out;
9983
9984 INIT_LIST_HEAD(&ptype_all);
82d8a867 9985 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
9986 INIT_LIST_HEAD(&ptype_base[i]);
9987
62532da9
VY
9988 INIT_LIST_HEAD(&offload_base);
9989
881d966b
EB
9990 if (register_pernet_subsys(&netdev_net_ops))
9991 goto out;
1da177e4
LT
9992
9993 /*
9994 * Initialise the packet receive queues.
9995 */
9996
6f912042 9997 for_each_possible_cpu(i) {
41852497 9998 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
e36fa2f7 9999 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 10000
41852497
ED
10001 INIT_WORK(flush, flush_backlog);
10002
e36fa2f7 10003 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 10004 skb_queue_head_init(&sd->process_queue);
f53c7239
SK
10005#ifdef CONFIG_XFRM_OFFLOAD
10006 skb_queue_head_init(&sd->xfrm_backlog);
10007#endif
e36fa2f7 10008 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 10009 sd->output_queue_tailp = &sd->output_queue;
df334545 10010#ifdef CONFIG_RPS
e36fa2f7
ED
10011 sd->csd.func = rps_trigger_softirq;
10012 sd->csd.info = sd;
e36fa2f7 10013 sd->cpu = i;
1e94d72f 10014#endif
0a9627f2 10015
7c4ec749 10016 init_gro_hash(&sd->backlog);
e36fa2f7
ED
10017 sd->backlog.poll = process_backlog;
10018 sd->backlog.weight = weight_p;
1da177e4
LT
10019 }
10020
1da177e4
LT
10021 dev_boot_phase = 0;
10022
505d4f73
EB
10023 /* The loopback device is special if any other network devices
10024 * is present in a network namespace the loopback device must
10025 * be present. Since we now dynamically allocate and free the
10026 * loopback device ensure this invariant is maintained by
10027 * keeping the loopback device as the first device on the
10028 * list of network devices. Ensuring the loopback devices
10029 * is the first device that appears and the last network device
10030 * that disappears.
10031 */
10032 if (register_pernet_device(&loopback_net_ops))
10033 goto out;
10034
10035 if (register_pernet_device(&default_device_ops))
10036 goto out;
10037
962cf36c
CM
10038 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
10039 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4 10040
f0bf90de
SAS
10041 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
10042 NULL, dev_cpu_dead);
10043 WARN_ON(rc < 0);
1da177e4
LT
10044 rc = 0;
10045out:
10046 return rc;
10047}
10048
10049subsys_initcall(net_dev_init);