]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/core/net-sysfs.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / net / core / net-sysfs.c
CommitLineData
1da177e4
LT
1/*
2 * net-sysfs.c - network device class and attributes
3 *
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
4ec93edb 5 *
1da177e4
LT
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
4fc268d2 12#include <linux/capability.h>
1da177e4
LT
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
aecbe01e 15#include <net/switchdev.h>
1da177e4 16#include <linux/if_arp.h>
5a0e3ad6 17#include <linux/slab.h>
174cd4b1 18#include <linux/sched/signal.h>
608b4b95 19#include <linux/nsproxy.h>
1da177e4 20#include <net/sock.h>
608b4b95 21#include <net/net_namespace.h>
1da177e4 22#include <linux/rtnetlink.h>
fec5e652 23#include <linux/vmalloc.h>
bc3b2d7f 24#include <linux/export.h>
114cf580 25#include <linux/jiffies.h>
9802c8e2 26#include <linux/pm_runtime.h>
aa836df9 27#include <linux/of.h>
88832a22 28#include <linux/of_net.h>
1da177e4 29
342709ef
PE
30#include "net-sysfs.h"
31
8b41d188 32#ifdef CONFIG_SYSFS
1da177e4
LT
33static const char fmt_hex[] = "%#x\n";
34static const char fmt_dec[] = "%d\n";
35static const char fmt_ulong[] = "%lu\n";
be1f3c2c 36static const char fmt_u64[] = "%llu\n";
1da177e4 37
4ec93edb 38static inline int dev_isalive(const struct net_device *dev)
1da177e4 39{
fe9925b5 40 return dev->reg_state <= NETREG_REGISTERED;
1da177e4
LT
41}
42
43/* use same locking rules as GIF* ioctl's */
43cb76d9
GKH
44static ssize_t netdev_show(const struct device *dev,
45 struct device_attribute *attr, char *buf,
1da177e4
LT
46 ssize_t (*format)(const struct net_device *, char *))
47{
6b53dafe 48 struct net_device *ndev = to_net_dev(dev);
1da177e4
LT
49 ssize_t ret = -EINVAL;
50
51 read_lock(&dev_base_lock);
6b53dafe
WC
52 if (dev_isalive(ndev))
53 ret = (*format)(ndev, buf);
1da177e4
LT
54 read_unlock(&dev_base_lock);
55
56 return ret;
57}
58
59/* generate a show function for simple field */
60#define NETDEVICE_SHOW(field, format_string) \
6b53dafe 61static ssize_t format_##field(const struct net_device *dev, char *buf) \
1da177e4 62{ \
6b53dafe 63 return sprintf(buf, format_string, dev->field); \
1da177e4 64} \
6be8aeef 65static ssize_t field##_show(struct device *dev, \
43cb76d9 66 struct device_attribute *attr, char *buf) \
1da177e4 67{ \
43cb76d9 68 return netdev_show(dev, attr, buf, format_##field); \
6be8aeef
GKH
69} \
70
71#define NETDEVICE_SHOW_RO(field, format_string) \
72NETDEVICE_SHOW(field, format_string); \
73static DEVICE_ATTR_RO(field)
1da177e4 74
6be8aeef
GKH
75#define NETDEVICE_SHOW_RW(field, format_string) \
76NETDEVICE_SHOW(field, format_string); \
77static DEVICE_ATTR_RW(field)
1da177e4
LT
78
79/* use same locking and permission rules as SIF* ioctl's */
43cb76d9 80static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
1da177e4
LT
81 const char *buf, size_t len,
82 int (*set)(struct net_device *, unsigned long))
83{
5e1fccc0
EB
84 struct net_device *netdev = to_net_dev(dev);
85 struct net *net = dev_net(netdev);
1da177e4
LT
86 unsigned long new;
87 int ret = -EINVAL;
88
5e1fccc0 89 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1da177e4
LT
90 return -EPERM;
91
e1e420c7
SK
92 ret = kstrtoul(buf, 0, &new);
93 if (ret)
1da177e4
LT
94 goto err;
95
5a5990d3 96 if (!rtnl_trylock())
336ca57c 97 return restart_syscall();
5a5990d3 98
5e1fccc0 99 if (dev_isalive(netdev)) {
6648c65e 100 ret = (*set)(netdev, new);
101 if (ret == 0)
1da177e4
LT
102 ret = len;
103 }
104 rtnl_unlock();
105 err:
106 return ret;
107}
108
6be8aeef 109NETDEVICE_SHOW_RO(dev_id, fmt_hex);
3f85944f 110NETDEVICE_SHOW_RO(dev_port, fmt_dec);
6be8aeef
GKH
111NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
112NETDEVICE_SHOW_RO(addr_len, fmt_dec);
6be8aeef
GKH
113NETDEVICE_SHOW_RO(ifindex, fmt_dec);
114NETDEVICE_SHOW_RO(type, fmt_dec);
115NETDEVICE_SHOW_RO(link_mode, fmt_dec);
1da177e4 116
a54acb3a
ND
117static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
118 char *buf)
119{
120 struct net_device *ndev = to_net_dev(dev);
121
122 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
123}
124static DEVICE_ATTR_RO(iflink);
125
6b53dafe 126static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
685343fc 127{
6b53dafe 128 return sprintf(buf, fmt_dec, dev->name_assign_type);
685343fc
TG
129}
130
131static ssize_t name_assign_type_show(struct device *dev,
132 struct device_attribute *attr,
133 char *buf)
134{
6b53dafe 135 struct net_device *ndev = to_net_dev(dev);
685343fc
TG
136 ssize_t ret = -EINVAL;
137
6b53dafe 138 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
685343fc
TG
139 ret = netdev_show(dev, attr, buf, format_name_assign_type);
140
141 return ret;
142}
143static DEVICE_ATTR_RO(name_assign_type);
144
1da177e4 145/* use same locking rules as GIFHWADDR ioctl's */
6be8aeef 146static ssize_t address_show(struct device *dev, struct device_attribute *attr,
43cb76d9 147 char *buf)
1da177e4 148{
6b53dafe 149 struct net_device *ndev = to_net_dev(dev);
1da177e4
LT
150 ssize_t ret = -EINVAL;
151
152 read_lock(&dev_base_lock);
6b53dafe
WC
153 if (dev_isalive(ndev))
154 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
1da177e4
LT
155 read_unlock(&dev_base_lock);
156 return ret;
157}
6be8aeef 158static DEVICE_ATTR_RO(address);
1da177e4 159
6be8aeef
GKH
160static ssize_t broadcast_show(struct device *dev,
161 struct device_attribute *attr, char *buf)
1da177e4 162{
6b53dafe 163 struct net_device *ndev = to_net_dev(dev);
6648c65e 164
6b53dafe
WC
165 if (dev_isalive(ndev))
166 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
1da177e4
LT
167 return -EINVAL;
168}
6be8aeef 169static DEVICE_ATTR_RO(broadcast);
1da177e4 170
6b53dafe 171static int change_carrier(struct net_device *dev, unsigned long new_carrier)
fdae0fde 172{
6b53dafe 173 if (!netif_running(dev))
fdae0fde 174 return -EINVAL;
6648c65e 175 return dev_change_carrier(dev, (bool)new_carrier);
fdae0fde
JP
176}
177
6be8aeef
GKH
178static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
179 const char *buf, size_t len)
fdae0fde
JP
180{
181 return netdev_store(dev, attr, buf, len, change_carrier);
182}
183
6be8aeef 184static ssize_t carrier_show(struct device *dev,
43cb76d9 185 struct device_attribute *attr, char *buf)
1da177e4
LT
186{
187 struct net_device *netdev = to_net_dev(dev);
6648c65e 188
189 if (netif_running(netdev))
1da177e4 190 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
6648c65e 191
1da177e4
LT
192 return -EINVAL;
193}
6be8aeef 194static DEVICE_ATTR_RW(carrier);
1da177e4 195
6be8aeef 196static ssize_t speed_show(struct device *dev,
d519e17e
AG
197 struct device_attribute *attr, char *buf)
198{
199 struct net_device *netdev = to_net_dev(dev);
200 int ret = -EINVAL;
201
202 if (!rtnl_trylock())
203 return restart_syscall();
204
8ae6daca 205 if (netif_running(netdev)) {
7cad1bac
DD
206 struct ethtool_link_ksettings cmd;
207
208 if (!__ethtool_get_link_ksettings(netdev, &cmd))
209 ret = sprintf(buf, fmt_dec, cmd.base.speed);
d519e17e
AG
210 }
211 rtnl_unlock();
212 return ret;
213}
6be8aeef 214static DEVICE_ATTR_RO(speed);
d519e17e 215
6be8aeef 216static ssize_t duplex_show(struct device *dev,
d519e17e
AG
217 struct device_attribute *attr, char *buf)
218{
219 struct net_device *netdev = to_net_dev(dev);
220 int ret = -EINVAL;
221
222 if (!rtnl_trylock())
223 return restart_syscall();
224
8ae6daca 225 if (netif_running(netdev)) {
7cad1bac
DD
226 struct ethtool_link_ksettings cmd;
227
228 if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
c6c13965 229 const char *duplex;
7cad1bac
DD
230
231 switch (cmd.base.duplex) {
c6c13965
NA
232 case DUPLEX_HALF:
233 duplex = "half";
234 break;
235 case DUPLEX_FULL:
236 duplex = "full";
237 break;
238 default:
239 duplex = "unknown";
240 break;
241 }
242 ret = sprintf(buf, "%s\n", duplex);
243 }
d519e17e
AG
244 }
245 rtnl_unlock();
246 return ret;
247}
6be8aeef 248static DEVICE_ATTR_RO(duplex);
d519e17e 249
6be8aeef 250static ssize_t dormant_show(struct device *dev,
43cb76d9 251 struct device_attribute *attr, char *buf)
b00055aa
SR
252{
253 struct net_device *netdev = to_net_dev(dev);
254
255 if (netif_running(netdev))
256 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
257
258 return -EINVAL;
259}
6be8aeef 260static DEVICE_ATTR_RO(dormant);
b00055aa 261
36cbd3dc 262static const char *const operstates[] = {
b00055aa
SR
263 "unknown",
264 "notpresent", /* currently unused */
265 "down",
266 "lowerlayerdown",
267 "testing", /* currently unused */
268 "dormant",
269 "up"
270};
271
6be8aeef 272static ssize_t operstate_show(struct device *dev,
43cb76d9 273 struct device_attribute *attr, char *buf)
b00055aa
SR
274{
275 const struct net_device *netdev = to_net_dev(dev);
276 unsigned char operstate;
277
278 read_lock(&dev_base_lock);
279 operstate = netdev->operstate;
280 if (!netif_running(netdev))
281 operstate = IF_OPER_DOWN;
282 read_unlock(&dev_base_lock);
283
e3a5cd9e 284 if (operstate >= ARRAY_SIZE(operstates))
b00055aa
SR
285 return -EINVAL; /* should not happen */
286
287 return sprintf(buf, "%s\n", operstates[operstate]);
288}
6be8aeef 289static DEVICE_ATTR_RO(operstate);
b00055aa 290
2d3b479d 291static ssize_t carrier_changes_show(struct device *dev,
292 struct device_attribute *attr,
293 char *buf)
294{
295 struct net_device *netdev = to_net_dev(dev);
6648c65e 296
2d3b479d 297 return sprintf(buf, fmt_dec,
5e7f6597
DD
298 atomic_read(&netdev->carrier_up_count) +
299 atomic_read(&netdev->carrier_down_count));
2d3b479d 300}
301static DEVICE_ATTR_RO(carrier_changes);
302
5e7f6597
DD
303static ssize_t carrier_up_count_show(struct device *dev,
304 struct device_attribute *attr,
305 char *buf)
306{
307 struct net_device *netdev = to_net_dev(dev);
308
309 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
310}
311static DEVICE_ATTR_RO(carrier_up_count);
312
313static ssize_t carrier_down_count_show(struct device *dev,
314 struct device_attribute *attr,
315 char *buf)
316{
317 struct net_device *netdev = to_net_dev(dev);
318
319 return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
320}
321static DEVICE_ATTR_RO(carrier_down_count);
322
1da177e4 323/* read-write attributes */
1da177e4 324
6b53dafe 325static int change_mtu(struct net_device *dev, unsigned long new_mtu)
1da177e4 326{
6648c65e 327 return dev_set_mtu(dev, (int)new_mtu);
1da177e4
LT
328}
329
6be8aeef 330static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
43cb76d9 331 const char *buf, size_t len)
1da177e4 332{
43cb76d9 333 return netdev_store(dev, attr, buf, len, change_mtu);
1da177e4 334}
6be8aeef 335NETDEVICE_SHOW_RW(mtu, fmt_dec);
1da177e4 336
6b53dafe 337static int change_flags(struct net_device *dev, unsigned long new_flags)
1da177e4 338{
6648c65e 339 return dev_change_flags(dev, (unsigned int)new_flags);
1da177e4
LT
340}
341
6be8aeef 342static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
43cb76d9 343 const char *buf, size_t len)
1da177e4 344{
43cb76d9 345 return netdev_store(dev, attr, buf, len, change_flags);
1da177e4 346}
6be8aeef 347NETDEVICE_SHOW_RW(flags, fmt_hex);
1da177e4 348
6b53dafe 349static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
1da177e4 350{
0cd29503
AD
351 unsigned int orig_len = dev->tx_queue_len;
352 int res;
353
354 if (new_len != (unsigned int)new_len)
355 return -ERANGE;
08294a26
JW
356
357 if (new_len != orig_len) {
358 dev->tx_queue_len = new_len;
359 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
360 res = notifier_to_errno(res);
361 if (res) {
362 netdev_err(dev,
363 "refused to change device tx_queue_len\n");
364 dev->tx_queue_len = orig_len;
365 return -EFAULT;
366 }
367 }
368
1da177e4
LT
369 return 0;
370}
371
6be8aeef 372static ssize_t tx_queue_len_store(struct device *dev,
43cb76d9
GKH
373 struct device_attribute *attr,
374 const char *buf, size_t len)
1da177e4 375{
5e1fccc0
EB
376 if (!capable(CAP_NET_ADMIN))
377 return -EPERM;
378
43cb76d9 379 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
1da177e4 380}
0cd29503 381NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
1da177e4 382
3b47d303
ED
383static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
384{
385 dev->gro_flush_timeout = val;
386 return 0;
387}
388
389static ssize_t gro_flush_timeout_store(struct device *dev,
6648c65e 390 struct device_attribute *attr,
391 const char *buf, size_t len)
3b47d303
ED
392{
393 if (!capable(CAP_NET_ADMIN))
394 return -EPERM;
395
396 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
397}
398NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
399
6be8aeef 400static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
0b815a1a
SH
401 const char *buf, size_t len)
402{
403 struct net_device *netdev = to_net_dev(dev);
5e1fccc0 404 struct net *net = dev_net(netdev);
0b815a1a 405 size_t count = len;
c92eb77a 406 ssize_t ret = 0;
0b815a1a 407
5e1fccc0 408 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
0b815a1a
SH
409 return -EPERM;
410
411 /* ignore trailing newline */
412 if (len > 0 && buf[len - 1] == '\n')
413 --count;
414
c92eb77a
RP
415 if (!rtnl_trylock())
416 return restart_syscall();
0b815a1a 417
c92eb77a
RP
418 if (dev_isalive(netdev)) {
419 ret = dev_set_alias(netdev, buf, count);
420 if (ret < 0)
421 goto err;
422 ret = len;
423 netdev_state_change(netdev);
424 }
425err:
426 rtnl_unlock();
427
428 return ret;
0b815a1a
SH
429}
430
6be8aeef 431static ssize_t ifalias_show(struct device *dev,
0b815a1a
SH
432 struct device_attribute *attr, char *buf)
433{
434 const struct net_device *netdev = to_net_dev(dev);
6c557001 435 char tmp[IFALIASZ];
0b815a1a
SH
436 ssize_t ret = 0;
437
6c557001
FW
438 ret = dev_get_alias(netdev, tmp, sizeof(tmp));
439 if (ret > 0)
440 ret = sprintf(buf, "%s\n", tmp);
0b815a1a
SH
441 return ret;
442}
6be8aeef 443static DEVICE_ATTR_RW(ifalias);
a512b92b 444
6b53dafe 445static int change_group(struct net_device *dev, unsigned long new_group)
a512b92b 446{
6648c65e 447 dev_set_group(dev, (int)new_group);
a512b92b
VD
448 return 0;
449}
450
6be8aeef
GKH
451static ssize_t group_store(struct device *dev, struct device_attribute *attr,
452 const char *buf, size_t len)
a512b92b
VD
453{
454 return netdev_store(dev, attr, buf, len, change_group);
455}
6be8aeef
GKH
456NETDEVICE_SHOW(group, fmt_dec);
457static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
458
d746d707
AK
459static int change_proto_down(struct net_device *dev, unsigned long proto_down)
460{
6648c65e 461 return dev_change_proto_down(dev, (bool)proto_down);
d746d707
AK
462}
463
464static ssize_t proto_down_store(struct device *dev,
465 struct device_attribute *attr,
466 const char *buf, size_t len)
467{
468 return netdev_store(dev, attr, buf, len, change_proto_down);
469}
470NETDEVICE_SHOW_RW(proto_down, fmt_dec);
471
cc998ff8 472static ssize_t phys_port_id_show(struct device *dev,
ff80e519
JP
473 struct device_attribute *attr, char *buf)
474{
475 struct net_device *netdev = to_net_dev(dev);
476 ssize_t ret = -EINVAL;
477
478 if (!rtnl_trylock())
479 return restart_syscall();
480
481 if (dev_isalive(netdev)) {
02637fce 482 struct netdev_phys_item_id ppid;
ff80e519
JP
483
484 ret = dev_get_phys_port_id(netdev, &ppid);
485 if (!ret)
486 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
487 }
488 rtnl_unlock();
489
490 return ret;
491}
cc998ff8
LT
492static DEVICE_ATTR_RO(phys_port_id);
493
db24a904
DA
494static ssize_t phys_port_name_show(struct device *dev,
495 struct device_attribute *attr, char *buf)
496{
497 struct net_device *netdev = to_net_dev(dev);
498 ssize_t ret = -EINVAL;
499
500 if (!rtnl_trylock())
501 return restart_syscall();
502
503 if (dev_isalive(netdev)) {
504 char name[IFNAMSIZ];
505
506 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
507 if (!ret)
508 ret = sprintf(buf, "%s\n", name);
509 }
510 rtnl_unlock();
511
512 return ret;
513}
514static DEVICE_ATTR_RO(phys_port_name);
515
aecbe01e
JP
516static ssize_t phys_switch_id_show(struct device *dev,
517 struct device_attribute *attr, char *buf)
518{
519 struct net_device *netdev = to_net_dev(dev);
520 ssize_t ret = -EINVAL;
521
522 if (!rtnl_trylock())
523 return restart_syscall();
524
525 if (dev_isalive(netdev)) {
f8e20a9f 526 struct switchdev_attr attr = {
6ff64f6f 527 .orig_dev = netdev,
1f868398 528 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
f8e20a9f
SF
529 .flags = SWITCHDEV_F_NO_RECURSE,
530 };
aecbe01e 531
f8e20a9f 532 ret = switchdev_port_attr_get(netdev, &attr);
aecbe01e 533 if (!ret)
42275bd8
SF
534 ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
535 attr.u.ppid.id);
aecbe01e
JP
536 }
537 rtnl_unlock();
538
539 return ret;
540}
541static DEVICE_ATTR_RO(phys_switch_id);
542
ec6cc599 543static struct attribute *net_class_attrs[] __ro_after_init = {
6be8aeef
GKH
544 &dev_attr_netdev_group.attr,
545 &dev_attr_type.attr,
546 &dev_attr_dev_id.attr,
3f85944f 547 &dev_attr_dev_port.attr,
6be8aeef
GKH
548 &dev_attr_iflink.attr,
549 &dev_attr_ifindex.attr,
685343fc 550 &dev_attr_name_assign_type.attr,
6be8aeef
GKH
551 &dev_attr_addr_assign_type.attr,
552 &dev_attr_addr_len.attr,
553 &dev_attr_link_mode.attr,
554 &dev_attr_address.attr,
555 &dev_attr_broadcast.attr,
556 &dev_attr_speed.attr,
557 &dev_attr_duplex.attr,
558 &dev_attr_dormant.attr,
559 &dev_attr_operstate.attr,
2d3b479d 560 &dev_attr_carrier_changes.attr,
6be8aeef
GKH
561 &dev_attr_ifalias.attr,
562 &dev_attr_carrier.attr,
563 &dev_attr_mtu.attr,
564 &dev_attr_flags.attr,
565 &dev_attr_tx_queue_len.attr,
3b47d303 566 &dev_attr_gro_flush_timeout.attr,
cc998ff8 567 &dev_attr_phys_port_id.attr,
db24a904 568 &dev_attr_phys_port_name.attr,
aecbe01e 569 &dev_attr_phys_switch_id.attr,
d746d707 570 &dev_attr_proto_down.attr,
5e7f6597
DD
571 &dev_attr_carrier_up_count.attr,
572 &dev_attr_carrier_down_count.attr,
6be8aeef 573 NULL,
1da177e4 574};
6be8aeef 575ATTRIBUTE_GROUPS(net_class);
1da177e4
LT
576
577/* Show a given an attribute in the statistics group */
43cb76d9
GKH
578static ssize_t netstat_show(const struct device *d,
579 struct device_attribute *attr, char *buf,
1da177e4
LT
580 unsigned long offset)
581{
43cb76d9 582 struct net_device *dev = to_net_dev(d);
1da177e4
LT
583 ssize_t ret = -EINVAL;
584
be1f3c2c 585 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
6648c65e 586 offset % sizeof(u64) != 0);
1da177e4
LT
587
588 read_lock(&dev_base_lock);
96e74088 589 if (dev_isalive(dev)) {
28172739
ED
590 struct rtnl_link_stats64 temp;
591 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
592
6648c65e 593 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
96e74088 594 }
1da177e4
LT
595 read_unlock(&dev_base_lock);
596 return ret;
597}
598
599/* generate a read-only statistics attribute */
600#define NETSTAT_ENTRY(name) \
6be8aeef 601static ssize_t name##_show(struct device *d, \
6648c65e 602 struct device_attribute *attr, char *buf) \
1da177e4 603{ \
43cb76d9 604 return netstat_show(d, attr, buf, \
be1f3c2c 605 offsetof(struct rtnl_link_stats64, name)); \
1da177e4 606} \
6be8aeef 607static DEVICE_ATTR_RO(name)
1da177e4
LT
608
609NETSTAT_ENTRY(rx_packets);
610NETSTAT_ENTRY(tx_packets);
611NETSTAT_ENTRY(rx_bytes);
612NETSTAT_ENTRY(tx_bytes);
613NETSTAT_ENTRY(rx_errors);
614NETSTAT_ENTRY(tx_errors);
615NETSTAT_ENTRY(rx_dropped);
616NETSTAT_ENTRY(tx_dropped);
617NETSTAT_ENTRY(multicast);
618NETSTAT_ENTRY(collisions);
619NETSTAT_ENTRY(rx_length_errors);
620NETSTAT_ENTRY(rx_over_errors);
621NETSTAT_ENTRY(rx_crc_errors);
622NETSTAT_ENTRY(rx_frame_errors);
623NETSTAT_ENTRY(rx_fifo_errors);
624NETSTAT_ENTRY(rx_missed_errors);
625NETSTAT_ENTRY(tx_aborted_errors);
626NETSTAT_ENTRY(tx_carrier_errors);
627NETSTAT_ENTRY(tx_fifo_errors);
628NETSTAT_ENTRY(tx_heartbeat_errors);
629NETSTAT_ENTRY(tx_window_errors);
630NETSTAT_ENTRY(rx_compressed);
631NETSTAT_ENTRY(tx_compressed);
6e7333d3 632NETSTAT_ENTRY(rx_nohandler);
1da177e4 633
ec6cc599 634static struct attribute *netstat_attrs[] __ro_after_init = {
43cb76d9
GKH
635 &dev_attr_rx_packets.attr,
636 &dev_attr_tx_packets.attr,
637 &dev_attr_rx_bytes.attr,
638 &dev_attr_tx_bytes.attr,
639 &dev_attr_rx_errors.attr,
640 &dev_attr_tx_errors.attr,
641 &dev_attr_rx_dropped.attr,
642 &dev_attr_tx_dropped.attr,
643 &dev_attr_multicast.attr,
644 &dev_attr_collisions.attr,
645 &dev_attr_rx_length_errors.attr,
646 &dev_attr_rx_over_errors.attr,
647 &dev_attr_rx_crc_errors.attr,
648 &dev_attr_rx_frame_errors.attr,
649 &dev_attr_rx_fifo_errors.attr,
650 &dev_attr_rx_missed_errors.attr,
651 &dev_attr_tx_aborted_errors.attr,
652 &dev_attr_tx_carrier_errors.attr,
653 &dev_attr_tx_fifo_errors.attr,
654 &dev_attr_tx_heartbeat_errors.attr,
655 &dev_attr_tx_window_errors.attr,
656 &dev_attr_rx_compressed.attr,
657 &dev_attr_tx_compressed.attr,
6e7333d3 658 &dev_attr_rx_nohandler.attr,
1da177e4
LT
659 NULL
660};
661
38ef00cc 662static const struct attribute_group netstat_group = {
1da177e4
LT
663 .name = "statistics",
664 .attrs = netstat_attrs,
665};
38c1a01c
JB
666
667#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
668static struct attribute *wireless_attrs[] = {
669 NULL
670};
671
38ef00cc 672static const struct attribute_group wireless_group = {
38c1a01c
JB
673 .name = "wireless",
674 .attrs = wireless_attrs,
675};
676#endif
6be8aeef
GKH
677
678#else /* CONFIG_SYSFS */
679#define net_class_groups NULL
d6523ddf 680#endif /* CONFIG_SYSFS */
1da177e4 681
a953be53 682#ifdef CONFIG_SYSFS
6648c65e 683#define to_rx_queue_attr(_attr) \
684 container_of(_attr, struct rx_queue_attribute, attr)
0a9627f2
TH
685
686#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
687
688static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
689 char *buf)
690{
667e427b 691 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
0a9627f2
TH
692 struct netdev_rx_queue *queue = to_rx_queue(kobj);
693
694 if (!attribute->show)
695 return -EIO;
696
718ad681 697 return attribute->show(queue, buf);
0a9627f2
TH
698}
699
700static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
701 const char *buf, size_t count)
702{
667e427b 703 const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
0a9627f2
TH
704 struct netdev_rx_queue *queue = to_rx_queue(kobj);
705
706 if (!attribute->store)
707 return -EIO;
708
718ad681 709 return attribute->store(queue, buf, count);
0a9627f2
TH
710}
711
fa50d645 712static const struct sysfs_ops rx_queue_sysfs_ops = {
0a9627f2
TH
713 .show = rx_queue_attr_show,
714 .store = rx_queue_attr_store,
715};
716
a953be53 717#ifdef CONFIG_RPS
718ad681 718static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
0a9627f2
TH
719{
720 struct rps_map *map;
721 cpumask_var_t mask;
f0906827 722 int i, len;
0a9627f2
TH
723
724 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
725 return -ENOMEM;
726
727 rcu_read_lock();
728 map = rcu_dereference(queue->rps_map);
729 if (map)
730 for (i = 0; i < map->len; i++)
731 cpumask_set_cpu(map->cpus[i], mask);
732
f0906827 733 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
0a9627f2 734 rcu_read_unlock();
0a9627f2 735 free_cpumask_var(mask);
f0906827
TH
736
737 return len < PAGE_SIZE ? len : -EINVAL;
0a9627f2
TH
738}
739
f5acb907 740static ssize_t store_rps_map(struct netdev_rx_queue *queue,
718ad681 741 const char *buf, size_t len)
0a9627f2
TH
742{
743 struct rps_map *old_map, *map;
744 cpumask_var_t mask;
745 int err, cpu, i;
da65ad1f 746 static DEFINE_MUTEX(rps_map_mutex);
0a9627f2
TH
747
748 if (!capable(CAP_NET_ADMIN))
749 return -EPERM;
750
751 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
752 return -ENOMEM;
753
754 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
755 if (err) {
756 free_cpumask_var(mask);
757 return err;
758 }
759
95c96174 760 map = kzalloc(max_t(unsigned int,
6648c65e 761 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
762 GFP_KERNEL);
0a9627f2
TH
763 if (!map) {
764 free_cpumask_var(mask);
765 return -ENOMEM;
766 }
767
768 i = 0;
769 for_each_cpu_and(cpu, mask, cpu_online_mask)
770 map->cpus[i++] = cpu;
771
6648c65e 772 if (i) {
0a9627f2 773 map->len = i;
6648c65e 774 } else {
0a9627f2
TH
775 kfree(map);
776 map = NULL;
777 }
778
da65ad1f 779 mutex_lock(&rps_map_mutex);
6e3f7faf 780 old_map = rcu_dereference_protected(queue->rps_map,
da65ad1f 781 mutex_is_locked(&rps_map_mutex));
0a9627f2 782 rcu_assign_pointer(queue->rps_map, map);
0a9627f2 783
adc9300e 784 if (map)
c5905afb 785 static_key_slow_inc(&rps_needed);
10e4ea75 786 if (old_map)
c5905afb 787 static_key_slow_dec(&rps_needed);
10e4ea75 788
da65ad1f 789 mutex_unlock(&rps_map_mutex);
10e4ea75
TH
790
791 if (old_map)
792 kfree_rcu(old_map, rcu);
793
0a9627f2
TH
794 free_cpumask_var(mask);
795 return len;
796}
797
fec5e652 798static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
fec5e652
TH
799 char *buf)
800{
801 struct rps_dev_flow_table *flow_table;
60b778ce 802 unsigned long val = 0;
fec5e652
TH
803
804 rcu_read_lock();
805 flow_table = rcu_dereference(queue->rps_flow_table);
806 if (flow_table)
60b778ce 807 val = (unsigned long)flow_table->mask + 1;
fec5e652
TH
808 rcu_read_unlock();
809
60b778ce 810 return sprintf(buf, "%lu\n", val);
fec5e652
TH
811}
812
fec5e652
TH
813static void rps_dev_flow_table_release(struct rcu_head *rcu)
814{
815 struct rps_dev_flow_table *table = container_of(rcu,
816 struct rps_dev_flow_table, rcu);
243198d0 817 vfree(table);
fec5e652
TH
818}
819
f5acb907 820static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
718ad681 821 const char *buf, size_t len)
fec5e652 822{
60b778ce 823 unsigned long mask, count;
fec5e652
TH
824 struct rps_dev_flow_table *table, *old_table;
825 static DEFINE_SPINLOCK(rps_dev_flow_lock);
60b778ce 826 int rc;
fec5e652
TH
827
828 if (!capable(CAP_NET_ADMIN))
829 return -EPERM;
830
60b778ce
ED
831 rc = kstrtoul(buf, 0, &count);
832 if (rc < 0)
833 return rc;
fec5e652
TH
834
835 if (count) {
60b778ce
ED
836 mask = count - 1;
837 /* mask = roundup_pow_of_two(count) - 1;
838 * without overflows...
839 */
840 while ((mask | (mask >> 1)) != mask)
841 mask |= (mask >> 1);
842 /* On 64 bit arches, must check mask fits in table->mask (u32),
8e3bff96 843 * and on 32bit arches, must check
844 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
60b778ce
ED
845 */
846#if BITS_PER_LONG > 32
847 if (mask > (unsigned long)(u32)mask)
a0a129f8 848 return -EINVAL;
60b778ce
ED
849#else
850 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
a0a129f8 851 / sizeof(struct rps_dev_flow)) {
fec5e652
TH
852 /* Enforce a limit to prevent overflow */
853 return -EINVAL;
854 }
60b778ce
ED
855#endif
856 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
fec5e652
TH
857 if (!table)
858 return -ENOMEM;
859
60b778ce
ED
860 table->mask = mask;
861 for (count = 0; count <= mask; count++)
862 table->flows[count].cpu = RPS_NO_CPU;
6648c65e 863 } else {
fec5e652 864 table = NULL;
6648c65e 865 }
fec5e652
TH
866
867 spin_lock(&rps_dev_flow_lock);
6e3f7faf
ED
868 old_table = rcu_dereference_protected(queue->rps_flow_table,
869 lockdep_is_held(&rps_dev_flow_lock));
fec5e652
TH
870 rcu_assign_pointer(queue->rps_flow_table, table);
871 spin_unlock(&rps_dev_flow_lock);
872
873 if (old_table)
874 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
875
876 return len;
877}
878
667e427b 879static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
880 = __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
0a9627f2 881
667e427b 882static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
883 = __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
884 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
a953be53 885#endif /* CONFIG_RPS */
fec5e652 886
667e427b 887static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
a953be53 888#ifdef CONFIG_RPS
0a9627f2 889 &rps_cpus_attribute.attr,
fec5e652 890 &rps_dev_flow_table_cnt_attribute.attr,
a953be53 891#endif
0a9627f2
TH
892 NULL
893};
894
895static void rx_queue_release(struct kobject *kobj)
896{
897 struct netdev_rx_queue *queue = to_rx_queue(kobj);
a953be53 898#ifdef CONFIG_RPS
6e3f7faf
ED
899 struct rps_map *map;
900 struct rps_dev_flow_table *flow_table;
0a9627f2 901
33d480ce 902 map = rcu_dereference_protected(queue->rps_map, 1);
9ea19481
JF
903 if (map) {
904 RCU_INIT_POINTER(queue->rps_map, NULL);
f6f80238 905 kfree_rcu(map, rcu);
9ea19481 906 }
6e3f7faf 907
33d480ce 908 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
9ea19481
JF
909 if (flow_table) {
910 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
6e3f7faf 911 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
9ea19481 912 }
a953be53 913#endif
0a9627f2 914
9ea19481 915 memset(kobj, 0, sizeof(*kobj));
fe822240 916 dev_put(queue->dev);
0a9627f2
TH
917}
918
82ef3d5d
WC
919static const void *rx_queue_namespace(struct kobject *kobj)
920{
921 struct netdev_rx_queue *queue = to_rx_queue(kobj);
922 struct device *dev = &queue->dev->dev;
923 const void *ns = NULL;
924
925 if (dev->class && dev->class->ns_type)
926 ns = dev->class->namespace(dev);
927
928 return ns;
929}
930
6b71d936
DT
931static void rx_queue_get_ownership(struct kobject *kobj,
932 kuid_t *uid, kgid_t *gid)
933{
934 const struct net *net = rx_queue_namespace(kobj);
935
936 net_ns_get_ownership(net, uid, gid);
937}
938
667e427b 939static struct kobj_type rx_queue_ktype __ro_after_init = {
0a9627f2
TH
940 .sysfs_ops = &rx_queue_sysfs_ops,
941 .release = rx_queue_release,
942 .default_attrs = rx_queue_default_attrs,
6b71d936
DT
943 .namespace = rx_queue_namespace,
944 .get_ownership = rx_queue_get_ownership,
0a9627f2
TH
945};
946
6b53dafe 947static int rx_queue_add_kobject(struct net_device *dev, int index)
0a9627f2 948{
6b53dafe 949 struct netdev_rx_queue *queue = dev->_rx + index;
0a9627f2
TH
950 struct kobject *kobj = &queue->kobj;
951 int error = 0;
952
65a97ef3
JH
953 /* Kobject_put later will trigger rx_queue_release call which
954 * decreases dev refcount: Take that reference here
955 */
956 dev_hold(queue->dev);
957
6b53dafe 958 kobj->kset = dev->queues_kset;
0a9627f2 959 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
6648c65e 960 "rx-%u", index);
a953be53 961 if (error)
58135c39 962 goto err;
a953be53 963
6b53dafe
WC
964 if (dev->sysfs_rx_queue_group) {
965 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
58135c39
JH
966 if (error)
967 goto err;
0a9627f2
TH
968 }
969
970 kobject_uevent(kobj, KOBJ_ADD);
971
972 return error;
58135c39
JH
973
974err:
975 kobject_put(kobj);
976 return error;
0a9627f2 977}
80dd6eac 978#endif /* CONFIG_SYSFS */
0a9627f2 979
62fe0b40 980int
6b53dafe 981net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
0a9627f2 982{
a953be53 983#ifdef CONFIG_SYSFS
0a9627f2
TH
984 int i;
985 int error = 0;
986
a953be53 987#ifndef CONFIG_RPS
6b53dafe 988 if (!dev->sysfs_rx_queue_group)
a953be53
MD
989 return 0;
990#endif
62fe0b40 991 for (i = old_num; i < new_num; i++) {
6b53dafe 992 error = rx_queue_add_kobject(dev, i);
62fe0b40
BH
993 if (error) {
994 new_num = old_num;
0a9627f2 995 break;
62fe0b40 996 }
0a9627f2
TH
997 }
998
a953be53 999 while (--i >= new_num) {
002d8a1a
AV
1000 struct kobject *kobj = &dev->_rx[i].kobj;
1001
91864f58 1002 if (!atomic_read(&dev_net(dev)->count))
002d8a1a 1003 kobj->uevent_suppress = 1;
6b53dafe 1004 if (dev->sysfs_rx_queue_group)
002d8a1a
AV
1005 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
1006 kobject_put(kobj);
a953be53 1007 }
0a9627f2
TH
1008
1009 return error;
bf264145
TH
1010#else
1011 return 0;
1012#endif
0a9627f2
TH
1013}
1014
ccf5ff69 1015#ifdef CONFIG_SYSFS
1d24eb48
TH
1016/*
1017 * netdev_queue sysfs structures and functions.
1018 */
1019struct netdev_queue_attribute {
1020 struct attribute attr;
718ad681 1021 ssize_t (*show)(struct netdev_queue *queue, char *buf);
1d24eb48 1022 ssize_t (*store)(struct netdev_queue *queue,
718ad681 1023 const char *buf, size_t len);
1d24eb48 1024};
6648c65e 1025#define to_netdev_queue_attr(_attr) \
1026 container_of(_attr, struct netdev_queue_attribute, attr)
1d24eb48
TH
1027
1028#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
1029
1030static ssize_t netdev_queue_attr_show(struct kobject *kobj,
1031 struct attribute *attr, char *buf)
1032{
667e427b 1033 const struct netdev_queue_attribute *attribute
1034 = to_netdev_queue_attr(attr);
1d24eb48
TH
1035 struct netdev_queue *queue = to_netdev_queue(kobj);
1036
1037 if (!attribute->show)
1038 return -EIO;
1039
718ad681 1040 return attribute->show(queue, buf);
1d24eb48
TH
1041}
1042
1043static ssize_t netdev_queue_attr_store(struct kobject *kobj,
1044 struct attribute *attr,
1045 const char *buf, size_t count)
1046{
667e427b 1047 const struct netdev_queue_attribute *attribute
1048 = to_netdev_queue_attr(attr);
1d24eb48
TH
1049 struct netdev_queue *queue = to_netdev_queue(kobj);
1050
1051 if (!attribute->store)
1052 return -EIO;
1053
718ad681 1054 return attribute->store(queue, buf, count);
1d24eb48
TH
1055}
1056
1057static const struct sysfs_ops netdev_queue_sysfs_ops = {
1058 .show = netdev_queue_attr_show,
1059 .store = netdev_queue_attr_store,
1060};
1061
2b9c7581 1062static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
ccf5ff69 1063{
1064 unsigned long trans_timeout;
1065
1066 spin_lock_irq(&queue->_xmit_lock);
1067 trans_timeout = queue->trans_timeout;
1068 spin_unlock_irq(&queue->_xmit_lock);
1069
1070 return sprintf(buf, "%lu", trans_timeout);
1071}
1072
c4047f53 1073static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
822b3b2e
JF
1074{
1075 struct net_device *dev = queue->dev;
c4047f53 1076 unsigned int i;
822b3b2e 1077
c4047f53 1078 i = queue - dev->_tx;
822b3b2e
JF
1079 BUG_ON(i >= dev->num_tx_queues);
1080
1081 return i;
1082}
1083
2b9c7581 1084static ssize_t traffic_class_show(struct netdev_queue *queue,
8d059b0f
AD
1085 char *buf)
1086{
1087 struct net_device *dev = queue->dev;
1088 int index = get_netdev_queue_index(queue);
1089 int tc = netdev_txq_to_tc(dev, index);
1090
1091 if (tc < 0)
1092 return -EINVAL;
1093
1094 return sprintf(buf, "%u\n", tc);
1095}
1096
1097#ifdef CONFIG_XPS
2b9c7581 1098static ssize_t tx_maxrate_show(struct netdev_queue *queue,
822b3b2e
JF
1099 char *buf)
1100{
1101 return sprintf(buf, "%lu\n", queue->tx_maxrate);
1102}
1103
2b9c7581 1104static ssize_t tx_maxrate_store(struct netdev_queue *queue,
1105 const char *buf, size_t len)
822b3b2e
JF
1106{
1107 struct net_device *dev = queue->dev;
1108 int err, index = get_netdev_queue_index(queue);
1109 u32 rate = 0;
1110
cff0053c
TH
1111 if (!capable(CAP_NET_ADMIN))
1112 return -EPERM;
1113
822b3b2e
JF
1114 err = kstrtou32(buf, 10, &rate);
1115 if (err < 0)
1116 return err;
1117
1118 if (!rtnl_trylock())
1119 return restart_syscall();
1120
1121 err = -EOPNOTSUPP;
1122 if (dev->netdev_ops->ndo_set_tx_maxrate)
1123 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1124
1125 rtnl_unlock();
1126 if (!err) {
1127 queue->tx_maxrate = rate;
1128 return len;
1129 }
1130 return err;
1131}
1132
2b9c7581 1133static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1134 = __ATTR_RW(tx_maxrate);
822b3b2e
JF
1135#endif
1136
2b9c7581 1137static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1138 = __ATTR_RO(tx_timeout);
ccf5ff69 1139
2b9c7581 1140static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1141 = __ATTR_RO(traffic_class);
8d059b0f 1142
114cf580
TH
1143#ifdef CONFIG_BQL
1144/*
1145 * Byte queue limits sysfs structures and functions.
1146 */
1147static ssize_t bql_show(char *buf, unsigned int value)
1148{
1149 return sprintf(buf, "%u\n", value);
1150}
1151
1152static ssize_t bql_set(const char *buf, const size_t count,
1153 unsigned int *pvalue)
1154{
1155 unsigned int value;
1156 int err;
1157
6648c65e 1158 if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
114cf580 1159 value = DQL_MAX_LIMIT;
6648c65e 1160 } else {
114cf580
TH
1161 err = kstrtouint(buf, 10, &value);
1162 if (err < 0)
1163 return err;
1164 if (value > DQL_MAX_LIMIT)
1165 return -EINVAL;
1166 }
1167
1168 *pvalue = value;
1169
1170 return count;
1171}
1172
1173static ssize_t bql_show_hold_time(struct netdev_queue *queue,
114cf580
TH
1174 char *buf)
1175{
1176 struct dql *dql = &queue->dql;
1177
1178 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1179}
1180
1181static ssize_t bql_set_hold_time(struct netdev_queue *queue,
114cf580
TH
1182 const char *buf, size_t len)
1183{
1184 struct dql *dql = &queue->dql;
95c96174 1185 unsigned int value;
114cf580
TH
1186 int err;
1187
1188 err = kstrtouint(buf, 10, &value);
1189 if (err < 0)
1190 return err;
1191
1192 dql->slack_hold_time = msecs_to_jiffies(value);
1193
1194 return len;
1195}
1196
170c658a 1197static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
1198 = __ATTR(hold_time, S_IRUGO | S_IWUSR,
1199 bql_show_hold_time, bql_set_hold_time);
114cf580
TH
1200
1201static ssize_t bql_show_inflight(struct netdev_queue *queue,
114cf580
TH
1202 char *buf)
1203{
1204 struct dql *dql = &queue->dql;
1205
1206 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1207}
1208
170c658a 1209static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
795d9a25 1210 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
114cf580
TH
1211
1212#define BQL_ATTR(NAME, FIELD) \
1213static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
114cf580
TH
1214 char *buf) \
1215{ \
1216 return bql_show(buf, queue->dql.FIELD); \
1217} \
1218 \
1219static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
114cf580
TH
1220 const char *buf, size_t len) \
1221{ \
1222 return bql_set(buf, len, &queue->dql.FIELD); \
1223} \
1224 \
170c658a 1225static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
1226 = __ATTR(NAME, S_IRUGO | S_IWUSR, \
1227 bql_show_ ## NAME, bql_set_ ## NAME)
114cf580 1228
170c658a 1229BQL_ATTR(limit, limit);
1230BQL_ATTR(limit_max, max_limit);
1231BQL_ATTR(limit_min, min_limit);
114cf580 1232
170c658a 1233static struct attribute *dql_attrs[] __ro_after_init = {
114cf580
TH
1234 &bql_limit_attribute.attr,
1235 &bql_limit_max_attribute.attr,
1236 &bql_limit_min_attribute.attr,
1237 &bql_hold_time_attribute.attr,
1238 &bql_inflight_attribute.attr,
1239 NULL
1240};
1241
38ef00cc 1242static const struct attribute_group dql_group = {
114cf580
TH
1243 .name = "byte_queue_limits",
1244 .attrs = dql_attrs,
1245};
1246#endif /* CONFIG_BQL */
1247
ccf5ff69 1248#ifdef CONFIG_XPS
2b9c7581 1249static ssize_t xps_cpus_show(struct netdev_queue *queue,
1250 char *buf)
1d24eb48
TH
1251{
1252 struct net_device *dev = queue->dev;
184c449f 1253 int cpu, len, num_tc = 1, tc = 0;
1d24eb48
TH
1254 struct xps_dev_maps *dev_maps;
1255 cpumask_var_t mask;
1256 unsigned long index;
1d24eb48 1257
1d24eb48
TH
1258 index = get_netdev_queue_index(queue);
1259
184c449f
AD
1260 if (dev->num_tc) {
1261 num_tc = dev->num_tc;
1262 tc = netdev_txq_to_tc(dev, index);
1263 if (tc < 0)
1264 return -EINVAL;
1265 }
1266
6127c428
AD
1267 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1268 return -ENOMEM;
1269
1d24eb48
TH
1270 rcu_read_lock();
1271 dev_maps = rcu_dereference(dev->xps_maps);
1272 if (dev_maps) {
184c449f
AD
1273 for_each_possible_cpu(cpu) {
1274 int i, tci = cpu * num_tc + tc;
1275 struct xps_map *map;
1276
1277 map = rcu_dereference(dev_maps->cpu_map[tci]);
1278 if (!map)
1279 continue;
1280
1281 for (i = map->len; i--;) {
1282 if (map->queues[i] == index) {
1283 cpumask_set_cpu(cpu, mask);
1284 break;
1d24eb48
TH
1285 }
1286 }
1287 }
1288 }
1289 rcu_read_unlock();
1290
f0906827 1291 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
1d24eb48 1292 free_cpumask_var(mask);
f0906827 1293 return len < PAGE_SIZE ? len : -EINVAL;
1d24eb48
TH
1294}
1295
2b9c7581 1296static ssize_t xps_cpus_store(struct netdev_queue *queue,
1297 const char *buf, size_t len)
1d24eb48
TH
1298{
1299 struct net_device *dev = queue->dev;
1d24eb48 1300 unsigned long index;
537c00de
AD
1301 cpumask_var_t mask;
1302 int err;
1d24eb48
TH
1303
1304 if (!capable(CAP_NET_ADMIN))
1305 return -EPERM;
1306
1307 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1308 return -ENOMEM;
1309
1310 index = get_netdev_queue_index(queue);
1311
1312 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1313 if (err) {
1314 free_cpumask_var(mask);
1315 return err;
1316 }
1317
537c00de 1318 err = netif_set_xps_queue(dev, mask, index);
1d24eb48
TH
1319
1320 free_cpumask_var(mask);
1d24eb48 1321
537c00de 1322 return err ? : len;
1d24eb48
TH
1323}
1324
2b9c7581 1325static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1326 = __ATTR_RW(xps_cpus);
ccf5ff69 1327#endif /* CONFIG_XPS */
1d24eb48 1328
2b9c7581 1329static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
ccf5ff69 1330 &queue_trans_timeout.attr,
8d059b0f 1331 &queue_traffic_class.attr,
ccf5ff69 1332#ifdef CONFIG_XPS
1d24eb48 1333 &xps_cpus_attribute.attr,
822b3b2e 1334 &queue_tx_maxrate.attr,
ccf5ff69 1335#endif
1d24eb48
TH
1336 NULL
1337};
1338
1339static void netdev_queue_release(struct kobject *kobj)
1340{
1341 struct netdev_queue *queue = to_netdev_queue(kobj);
1d24eb48 1342
1d24eb48
TH
1343 memset(kobj, 0, sizeof(*kobj));
1344 dev_put(queue->dev);
1345}
1346
82ef3d5d
WC
1347static const void *netdev_queue_namespace(struct kobject *kobj)
1348{
1349 struct netdev_queue *queue = to_netdev_queue(kobj);
1350 struct device *dev = &queue->dev->dev;
1351 const void *ns = NULL;
1352
1353 if (dev->class && dev->class->ns_type)
1354 ns = dev->class->namespace(dev);
1355
1356 return ns;
1357}
1358
6b71d936
DT
1359static void netdev_queue_get_ownership(struct kobject *kobj,
1360 kuid_t *uid, kgid_t *gid)
1361{
1362 const struct net *net = netdev_queue_namespace(kobj);
1363
1364 net_ns_get_ownership(net, uid, gid);
1365}
1366
2b9c7581 1367static struct kobj_type netdev_queue_ktype __ro_after_init = {
1d24eb48
TH
1368 .sysfs_ops = &netdev_queue_sysfs_ops,
1369 .release = netdev_queue_release,
1370 .default_attrs = netdev_queue_default_attrs,
82ef3d5d 1371 .namespace = netdev_queue_namespace,
6b71d936 1372 .get_ownership = netdev_queue_get_ownership,
1d24eb48
TH
1373};
1374
6b53dafe 1375static int netdev_queue_add_kobject(struct net_device *dev, int index)
1d24eb48 1376{
6b53dafe 1377 struct netdev_queue *queue = dev->_tx + index;
1d24eb48
TH
1378 struct kobject *kobj = &queue->kobj;
1379 int error = 0;
1380
0adf068c
JH
1381 /* Kobject_put later will trigger netdev_queue_release call
1382 * which decreases dev refcount: Take that reference here
1383 */
1384 dev_hold(queue->dev);
1385
6b53dafe 1386 kobj->kset = dev->queues_kset;
1d24eb48 1387 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
6648c65e 1388 "tx-%u", index);
114cf580 1389 if (error)
58135c39 1390 goto err;
114cf580
TH
1391
1392#ifdef CONFIG_BQL
1393 error = sysfs_create_group(kobj, &dql_group);
58135c39
JH
1394 if (error)
1395 goto err;
114cf580 1396#endif
1d24eb48
TH
1397
1398 kobject_uevent(kobj, KOBJ_ADD);
c4ecc813 1399 return 0;
1d24eb48 1400
58135c39
JH
1401err:
1402 kobject_put(kobj);
1403 return error;
1d24eb48 1404}
ccf5ff69 1405#endif /* CONFIG_SYSFS */
1d24eb48
TH
1406
1407int
6b53dafe 1408netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1d24eb48 1409{
ccf5ff69 1410#ifdef CONFIG_SYSFS
1d24eb48
TH
1411 int i;
1412 int error = 0;
1413
1414 for (i = old_num; i < new_num; i++) {
6b53dafe 1415 error = netdev_queue_add_kobject(dev, i);
1d24eb48
TH
1416 if (error) {
1417 new_num = old_num;
1418 break;
1419 }
1420 }
1421
114cf580 1422 while (--i >= new_num) {
6b53dafe 1423 struct netdev_queue *queue = dev->_tx + i;
114cf580 1424
91864f58 1425 if (!atomic_read(&dev_net(dev)->count))
002d8a1a 1426 queue->kobj.uevent_suppress = 1;
114cf580
TH
1427#ifdef CONFIG_BQL
1428 sysfs_remove_group(&queue->kobj, &dql_group);
1429#endif
1430 kobject_put(&queue->kobj);
1431 }
1d24eb48
TH
1432
1433 return error;
bf264145
TH
1434#else
1435 return 0;
ccf5ff69 1436#endif /* CONFIG_SYSFS */
1d24eb48
TH
1437}
1438
6b53dafe 1439static int register_queue_kobjects(struct net_device *dev)
1d24eb48 1440{
bf264145 1441 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1d24eb48 1442
ccf5ff69 1443#ifdef CONFIG_SYSFS
6b53dafe 1444 dev->queues_kset = kset_create_and_add("queues",
6648c65e 1445 NULL, &dev->dev.kobj);
6b53dafe 1446 if (!dev->queues_kset)
62fe0b40 1447 return -ENOMEM;
6b53dafe 1448 real_rx = dev->real_num_rx_queues;
bf264145 1449#endif
6b53dafe 1450 real_tx = dev->real_num_tx_queues;
1d24eb48 1451
6b53dafe 1452 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1d24eb48
TH
1453 if (error)
1454 goto error;
bf264145 1455 rxq = real_rx;
1d24eb48 1456
6b53dafe 1457 error = netdev_queue_update_kobjects(dev, 0, real_tx);
1d24eb48
TH
1458 if (error)
1459 goto error;
bf264145 1460 txq = real_tx;
1d24eb48
TH
1461
1462 return 0;
1463
1464error:
6b53dafe
WC
1465 netdev_queue_update_kobjects(dev, txq, 0);
1466 net_rx_queue_update_kobjects(dev, rxq, 0);
5c158017
Y
1467#ifdef CONFIG_SYSFS
1468 kset_unregister(dev->queues_kset);
1469#endif
1d24eb48 1470 return error;
62fe0b40 1471}
0a9627f2 1472
6b53dafe 1473static void remove_queue_kobjects(struct net_device *dev)
62fe0b40 1474{
bf264145
TH
1475 int real_rx = 0, real_tx = 0;
1476
a953be53 1477#ifdef CONFIG_SYSFS
6b53dafe 1478 real_rx = dev->real_num_rx_queues;
bf264145 1479#endif
6b53dafe 1480 real_tx = dev->real_num_tx_queues;
bf264145 1481
6b53dafe
WC
1482 net_rx_queue_update_kobjects(dev, real_rx, 0);
1483 netdev_queue_update_kobjects(dev, real_tx, 0);
ccf5ff69 1484#ifdef CONFIG_SYSFS
6b53dafe 1485 kset_unregister(dev->queues_kset);
bf264145 1486#endif
0a9627f2 1487}
608b4b95 1488
7dc5dbc8
EB
1489static bool net_current_may_mount(void)
1490{
1491 struct net *net = current->nsproxy->net_ns;
1492
1493 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1494}
1495
a685e089 1496static void *net_grab_current_ns(void)
608b4b95 1497{
a685e089
AV
1498 struct net *ns = current->nsproxy->net_ns;
1499#ifdef CONFIG_NET_NS
1500 if (ns)
c122e14d 1501 refcount_inc(&ns->passive);
a685e089
AV
1502#endif
1503 return ns;
608b4b95
EB
1504}
1505
1506static const void *net_initial_ns(void)
1507{
1508 return &init_net;
1509}
1510
1511static const void *net_netlink_ns(struct sock *sk)
1512{
1513 return sock_net(sk);
1514}
1515
737aec57 1516const struct kobj_ns_type_operations net_ns_type_operations = {
608b4b95 1517 .type = KOBJ_NS_TYPE_NET,
7dc5dbc8 1518 .current_may_mount = net_current_may_mount,
a685e089 1519 .grab_current_ns = net_grab_current_ns,
608b4b95
EB
1520 .netlink_ns = net_netlink_ns,
1521 .initial_ns = net_initial_ns,
a685e089 1522 .drop_ns = net_drop_ns,
608b4b95 1523};
04600794 1524EXPORT_SYMBOL_GPL(net_ns_type_operations);
608b4b95 1525
7eff2e7a 1526static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1da177e4 1527{
43cb76d9 1528 struct net_device *dev = to_net_dev(d);
7eff2e7a 1529 int retval;
1da177e4 1530
312c004d 1531 /* pass interface to uevent. */
7eff2e7a 1532 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
bf62456e
ER
1533 if (retval)
1534 goto exit;
ca2f37db
JT
1535
1536 /* pass ifindex to uevent.
1537 * ifindex is useful as it won't change (interface name may change)
6648c65e 1538 * and is what RtNetlink uses natively.
1539 */
7eff2e7a 1540 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1da177e4 1541
bf62456e 1542exit:
bf62456e 1543 return retval;
1da177e4 1544}
1da177e4
LT
1545
1546/*
4ec93edb 1547 * netdev_release -- destroy and free a dead device.
43cb76d9 1548 * Called when last reference to device kobject is gone.
1da177e4 1549 */
43cb76d9 1550static void netdev_release(struct device *d)
1da177e4 1551{
43cb76d9 1552 struct net_device *dev = to_net_dev(d);
1da177e4
LT
1553
1554 BUG_ON(dev->reg_state != NETREG_RELEASED);
1555
6c557001
FW
1556 /* no need to wait for rcu grace period:
1557 * device is dead and about to be freed.
1558 */
1559 kfree(rcu_access_pointer(dev->ifalias));
74d332c1 1560 netdev_freemem(dev);
1da177e4
LT
1561}
1562
608b4b95
EB
1563static const void *net_namespace(struct device *d)
1564{
5c29482d
GT
1565 struct net_device *dev = to_net_dev(d);
1566
608b4b95
EB
1567 return dev_net(dev);
1568}
1569
6b71d936
DT
1570static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
1571{
1572 struct net_device *dev = to_net_dev(d);
1573 const struct net *net = dev_net(dev);
1574
1575 net_ns_get_ownership(net, uid, gid);
1576}
1577
e6d473e6 1578static struct class net_class __ro_after_init = {
1da177e4 1579 .name = "net",
43cb76d9 1580 .dev_release = netdev_release,
6be8aeef 1581 .dev_groups = net_class_groups,
43cb76d9 1582 .dev_uevent = netdev_uevent,
608b4b95
EB
1583 .ns_type = &net_ns_type_operations,
1584 .namespace = net_namespace,
6b71d936 1585 .get_ownership = net_get_ownership,
1da177e4
LT
1586};
1587
aa836df9
FF
1588#ifdef CONFIG_OF_NET
1589static int of_dev_node_match(struct device *dev, const void *data)
1590{
1591 int ret = 0;
1592
1593 if (dev->parent)
1594 ret = dev->parent->of_node == data;
1595
1596 return ret == 0 ? dev->of_node == data : ret;
1597}
1598
9861f720
RK
1599/*
1600 * of_find_net_device_by_node - lookup the net device for the device node
1601 * @np: OF device node
1602 *
1603 * Looks up the net_device structure corresponding with the device node.
1604 * If successful, returns a pointer to the net_device with the embedded
1605 * struct device refcount incremented by one, or NULL on failure. The
1606 * refcount must be dropped when done with the net_device.
1607 */
aa836df9
FF
1608struct net_device *of_find_net_device_by_node(struct device_node *np)
1609{
1610 struct device *dev;
1611
1612 dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1613 if (!dev)
1614 return NULL;
1615
1616 return to_net_dev(dev);
1617}
1618EXPORT_SYMBOL(of_find_net_device_by_node);
1619#endif
1620
9093bbb2
SH
1621/* Delete sysfs entries but hold kobject reference until after all
1622 * netdev references are gone.
1623 */
6b53dafe 1624void netdev_unregister_kobject(struct net_device *ndev)
1da177e4 1625{
6648c65e 1626 struct device *dev = &ndev->dev;
9093bbb2 1627
91864f58 1628 if (!atomic_read(&dev_net(ndev)->count))
002d8a1a
AV
1629 dev_set_uevent_suppress(dev, 1);
1630
9093bbb2 1631 kobject_get(&dev->kobj);
3891845e 1632
6b53dafe 1633 remove_queue_kobjects(ndev);
0a9627f2 1634
9802c8e2
ML
1635 pm_runtime_set_memalloc_noio(dev, false);
1636
9093bbb2 1637 device_del(dev);
1da177e4
LT
1638}
1639
1640/* Create sysfs entries for network device. */
6b53dafe 1641int netdev_register_kobject(struct net_device *ndev)
1da177e4 1642{
6648c65e 1643 struct device *dev = &ndev->dev;
6b53dafe 1644 const struct attribute_group **groups = ndev->sysfs_groups;
0a9627f2 1645 int error = 0;
1da177e4 1646
a1b3f594 1647 device_initialize(dev);
43cb76d9 1648 dev->class = &net_class;
6b53dafe 1649 dev->platform_data = ndev;
43cb76d9 1650 dev->groups = groups;
1da177e4 1651
6b53dafe 1652 dev_set_name(dev, "%s", ndev->name);
1da177e4 1653
8b41d188 1654#ifdef CONFIG_SYSFS
0c509a6c
EB
1655 /* Allow for a device specific group */
1656 if (*groups)
1657 groups++;
1da177e4 1658
0c509a6c 1659 *groups++ = &netstat_group;
38c1a01c
JB
1660
1661#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
6b53dafe 1662 if (ndev->ieee80211_ptr)
38c1a01c
JB
1663 *groups++ = &wireless_group;
1664#if IS_ENABLED(CONFIG_WIRELESS_EXT)
6b53dafe 1665 else if (ndev->wireless_handlers)
38c1a01c
JB
1666 *groups++ = &wireless_group;
1667#endif
1668#endif
8b41d188 1669#endif /* CONFIG_SYSFS */
1da177e4 1670
0a9627f2
TH
1671 error = device_add(dev);
1672 if (error)
1673 return error;
1674
6b53dafe 1675 error = register_queue_kobjects(ndev);
0a9627f2
TH
1676 if (error) {
1677 device_del(dev);
1678 return error;
1679 }
1680
9802c8e2
ML
1681 pm_runtime_set_memalloc_noio(dev, true);
1682
0a9627f2 1683 return error;
1da177e4
LT
1684}
1685
b793dc5c 1686int netdev_class_create_file_ns(const struct class_attribute *class_attr,
58292cbe 1687 const void *ns)
b8a9787e 1688{
58292cbe 1689 return class_create_file_ns(&net_class, class_attr, ns);
b8a9787e 1690}
58292cbe 1691EXPORT_SYMBOL(netdev_class_create_file_ns);
b8a9787e 1692
b793dc5c 1693void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
58292cbe 1694 const void *ns)
b8a9787e 1695{
58292cbe 1696 class_remove_file_ns(&net_class, class_attr, ns);
b8a9787e 1697}
58292cbe 1698EXPORT_SYMBOL(netdev_class_remove_file_ns);
b8a9787e 1699
a48d4bb0 1700int __init netdev_kobject_init(void)
1da177e4 1701{
608b4b95 1702 kobj_ns_type_register(&net_ns_type_operations);
1da177e4
LT
1703 return class_register(&net_class);
1704}