]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - net/core/gen_stats.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[mirror_ubuntu-kernels.git] / net / core / gen_stats.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * net/core/gen_stats.c
4 *
1da177e4
LT
5 * Authors: Thomas Graf <tgraf@suug.ch>
6 * Jamal Hadi Salim
7 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 *
9 * See Documentation/networking/gen_stats.txt
10 */
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/interrupt.h>
16#include <linux/socket.h>
17#include <linux/rtnetlink.h>
18#include <linux/gen_stats.h>
1e90474c 19#include <net/netlink.h>
1da177e4
LT
20#include <net/gen_stats.h>
21
22
23static inline int
9854518e 24gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
1da177e4 25{
9854518e 26 if (nla_put_64bit(d->skb, type, size, buf, padattr))
14ad6647 27 goto nla_put_failure;
1da177e4
LT
28 return 0;
29
1e90474c 30nla_put_failure:
edb09eb1
ED
31 if (d->lock)
32 spin_unlock_bh(d->lock);
1c4cff0c
IG
33 kfree(d->xstats);
34 d->xstats = NULL;
35 d->xstats_len = 0;
1da177e4
LT
36 return -1;
37}
38
39/**
40 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
41 * @skb: socket buffer to put statistics TLVs into
42 * @type: TLV type for top level statistic TLV
43 * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
44 * @xstats_type: TLV type for backward compatibility xstats TLV
45 * @lock: statistics lock
46 * @d: dumping handle
e0d194ad 47 * @padattr: padding attribute
1da177e4
LT
48 *
49 * Initializes the dumping handle, grabs the statistic lock and appends
50 * an empty TLV header to the socket buffer for use a container for all
51 * other statistic TLVS.
52 *
53 * The dumping handle is marked to be in backward compatibility mode telling
54 * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
55 *
56 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
57 */
58int
59gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
9854518e
ND
60 int xstats_type, spinlock_t *lock,
61 struct gnet_dump *d, int padattr)
9a429c49 62 __acquires(lock)
1da177e4
LT
63{
64 memset(d, 0, sizeof(*d));
4ec93edb 65
1da177e4 66 if (type)
1e90474c 67 d->tail = (struct nlattr *)skb_tail_pointer(skb);
1da177e4
LT
68 d->skb = skb;
69 d->compat_tc_stats = tc_stats_type;
70 d->compat_xstats = xstats_type;
9854518e 71 d->padattr = padattr;
edb09eb1
ED
72 if (lock) {
73 d->lock = lock;
74 spin_lock_bh(lock);
75 }
d5a672ac
THJ
76 if (d->tail) {
77 int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
78
79 /* The initial attribute added in gnet_stats_copy() may be
80 * preceded by a padding attribute, in which case d->tail will
81 * end up pointing at the padding instead of the real attribute.
82 * Fix this so gnet_stats_finish_copy() adjusts the length of
83 * the right attribute.
84 */
85 if (ret == 0 && d->tail->nla_type == padattr)
86 d->tail = (struct nlattr *)((char *)d->tail +
87 NLA_ALIGN(d->tail->nla_len));
88 return ret;
89 }
1da177e4
LT
90
91 return 0;
92}
9e34a5b5 93EXPORT_SYMBOL(gnet_stats_start_copy_compat);
1da177e4
LT
94
95/**
9854518e 96 * gnet_stats_start_copy - start dumping procedure in compatibility mode
1da177e4
LT
97 * @skb: socket buffer to put statistics TLVs into
98 * @type: TLV type for top level statistic TLV
99 * @lock: statistics lock
100 * @d: dumping handle
e0d194ad 101 * @padattr: padding attribute
1da177e4
LT
102 *
103 * Initializes the dumping handle, grabs the statistic lock and appends
104 * an empty TLV header to the socket buffer for use a container for all
105 * other statistic TLVS.
106 *
107 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
108 */
109int
110gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
9854518e 111 struct gnet_dump *d, int padattr)
1da177e4 112{
9854518e 113 return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
1da177e4 114}
9e34a5b5 115EXPORT_SYMBOL(gnet_stats_start_copy);
1da177e4 116
22e0f8b9
JF
117static void
118__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
119 struct gnet_stats_basic_cpu __percpu *cpu)
120{
121 int i;
122
123 for_each_possible_cpu(i) {
124 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
125 unsigned int start;
02c0fc1b
WC
126 u64 bytes;
127 u32 packets;
22e0f8b9
JF
128
129 do {
130 start = u64_stats_fetch_begin_irq(&bcpu->syncp);
131 bytes = bcpu->bstats.bytes;
132 packets = bcpu->bstats.packets;
133 } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
134
02c0fc1b
WC
135 bstats->bytes += bytes;
136 bstats->packets += packets;
22e0f8b9
JF
137 }
138}
139
140void
edb09eb1
ED
141__gnet_stats_copy_basic(const seqcount_t *running,
142 struct gnet_stats_basic_packed *bstats,
22e0f8b9
JF
143 struct gnet_stats_basic_cpu __percpu *cpu,
144 struct gnet_stats_basic_packed *b)
145{
edb09eb1
ED
146 unsigned int seq;
147
22e0f8b9
JF
148 if (cpu) {
149 __gnet_stats_copy_basic_cpu(bstats, cpu);
edb09eb1
ED
150 return;
151 }
152 do {
153 if (running)
154 seq = read_seqcount_begin(running);
22e0f8b9
JF
155 bstats->bytes = b->bytes;
156 bstats->packets = b->packets;
edb09eb1 157 } while (running && read_seqcount_retry(running, seq));
22e0f8b9
JF
158}
159EXPORT_SYMBOL(__gnet_stats_copy_basic);
160
5d70a670 161static int
5e111210
EC
162___gnet_stats_copy_basic(const seqcount_t *running,
163 struct gnet_dump *d,
164 struct gnet_stats_basic_cpu __percpu *cpu,
165 struct gnet_stats_basic_packed *b,
166 int type)
1da177e4 167{
22e0f8b9
JF
168 struct gnet_stats_basic_packed bstats = {0};
169
edb09eb1 170 __gnet_stats_copy_basic(running, &bstats, cpu, b);
22e0f8b9 171
5e111210 172 if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
22e0f8b9
JF
173 d->tc_stats.bytes = bstats.bytes;
174 d->tc_stats.packets = bstats.packets;
1da177e4
LT
175 }
176
c1a8f1f1
ED
177 if (d->tail) {
178 struct gnet_stats_basic sb;
1da177e4 179
c1a8f1f1 180 memset(&sb, 0, sizeof(sb));
22e0f8b9
JF
181 sb.bytes = bstats.bytes;
182 sb.packets = bstats.packets;
5e111210 183 return gnet_stats_copy(d, type, &sb, sizeof(sb),
9854518e 184 TCA_STATS_PAD);
c1a8f1f1 185 }
1da177e4
LT
186 return 0;
187}
5e111210
EC
188
189/**
190 * gnet_stats_copy_basic - copy basic statistics into statistic TLV
191 * @running: seqcount_t pointer
192 * @d: dumping handle
193 * @cpu: copy statistic per cpu
194 * @b: basic statistics
195 *
196 * Appends the basic statistics to the top level TLV created by
197 * gnet_stats_start_copy().
198 *
199 * Returns 0 on success or -1 with the statistic lock released
200 * if the room in the socket buffer was not sufficient.
201 */
202int
203gnet_stats_copy_basic(const seqcount_t *running,
204 struct gnet_dump *d,
205 struct gnet_stats_basic_cpu __percpu *cpu,
206 struct gnet_stats_basic_packed *b)
207{
208 return ___gnet_stats_copy_basic(running, d, cpu, b,
209 TCA_STATS_BASIC);
210}
9e34a5b5 211EXPORT_SYMBOL(gnet_stats_copy_basic);
1da177e4 212
5e111210
EC
213/**
214 * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV
215 * @running: seqcount_t pointer
216 * @d: dumping handle
217 * @cpu: copy statistic per cpu
218 * @b: basic statistics
219 *
220 * Appends the basic statistics to the top level TLV created by
221 * gnet_stats_start_copy().
222 *
223 * Returns 0 on success or -1 with the statistic lock released
224 * if the room in the socket buffer was not sufficient.
225 */
226int
227gnet_stats_copy_basic_hw(const seqcount_t *running,
228 struct gnet_dump *d,
229 struct gnet_stats_basic_cpu __percpu *cpu,
230 struct gnet_stats_basic_packed *b)
231{
232 return ___gnet_stats_copy_basic(running, d, cpu, b,
233 TCA_STATS_BASIC_HW);
234}
235EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
236
1da177e4
LT
237/**
238 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
239 * @d: dumping handle
1c0d32fd 240 * @rate_est: rate estimator
1da177e4
LT
241 *
242 * Appends the rate estimator statistics to the top level TLV created by
243 * gnet_stats_start_copy().
244 *
245 * Returns 0 on success or -1 with the statistic lock released
246 * if the room in the socket buffer was not sufficient.
247 */
248int
d250a5f9 249gnet_stats_copy_rate_est(struct gnet_dump *d,
1c0d32fd 250 struct net_rate_estimator __rcu **rate_est)
1da177e4 251{
1c0d32fd 252 struct gnet_stats_rate_est64 sample;
45203a3b
ED
253 struct gnet_stats_rate_est est;
254 int res;
255
1c0d32fd 256 if (!gen_estimator_read(rate_est, &sample))
d250a5f9 257 return 0;
1c0d32fd 258 est.bps = min_t(u64, UINT_MAX, sample.bps);
45203a3b 259 /* we have some time before reaching 2^32 packets per second */
1c0d32fd 260 est.pps = sample.pps;
45203a3b 261
1da177e4 262 if (d->compat_tc_stats) {
45203a3b
ED
263 d->tc_stats.bps = est.bps;
264 d->tc_stats.pps = est.pps;
1da177e4
LT
265 }
266
45203a3b 267 if (d->tail) {
9854518e
ND
268 res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
269 TCA_STATS_PAD);
1c0d32fd 270 if (res < 0 || est.bps == sample.bps)
45203a3b
ED
271 return res;
272 /* emit 64bit stats only if needed */
1c0d32fd
ED
273 return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
274 sizeof(sample), TCA_STATS_PAD);
45203a3b 275 }
1da177e4
LT
276
277 return 0;
278}
9e34a5b5 279EXPORT_SYMBOL(gnet_stats_copy_rate_est);
1da177e4 280
b0ab6f92
JF
281static void
282__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
283 const struct gnet_stats_queue __percpu *q)
284{
285 int i;
286
287 for_each_possible_cpu(i) {
288 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
289
73eb628d 290 qstats->qlen = 0;
b0ab6f92
JF
291 qstats->backlog += qcpu->backlog;
292 qstats->drops += qcpu->drops;
293 qstats->requeues += qcpu->requeues;
294 qstats->overlimits += qcpu->overlimits;
295 }
296}
297
b01ac095
JF
298void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
299 const struct gnet_stats_queue __percpu *cpu,
300 const struct gnet_stats_queue *q,
301 __u32 qlen)
b0ab6f92
JF
302{
303 if (cpu) {
304 __gnet_stats_copy_queue_cpu(qstats, cpu);
305 } else {
73eb628d 306 qstats->qlen = q->qlen;
b0ab6f92
JF
307 qstats->backlog = q->backlog;
308 qstats->drops = q->drops;
309 qstats->requeues = q->requeues;
310 qstats->overlimits = q->overlimits;
311 }
312
313 qstats->qlen = qlen;
314}
b01ac095 315EXPORT_SYMBOL(__gnet_stats_copy_queue);
b0ab6f92 316
1da177e4
LT
317/**
318 * gnet_stats_copy_queue - copy queue statistics into statistics TLV
319 * @d: dumping handle
b0ab6f92 320 * @cpu_q: per cpu queue statistics
1da177e4 321 * @q: queue statistics
64015853 322 * @qlen: queue length statistics
1da177e4
LT
323 *
324 * Appends the queue statistics to the top level TLV created by
b0ab6f92
JF
325 * gnet_stats_start_copy(). Using per cpu queue statistics if
326 * they are available.
1da177e4
LT
327 *
328 * Returns 0 on success or -1 with the statistic lock released
329 * if the room in the socket buffer was not sufficient.
330 */
331int
64015853 332gnet_stats_copy_queue(struct gnet_dump *d,
b0ab6f92 333 struct gnet_stats_queue __percpu *cpu_q,
64015853 334 struct gnet_stats_queue *q, __u32 qlen)
1da177e4 335{
b0ab6f92
JF
336 struct gnet_stats_queue qstats = {0};
337
338 __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
64015853 339
1da177e4 340 if (d->compat_tc_stats) {
b0ab6f92
JF
341 d->tc_stats.drops = qstats.drops;
342 d->tc_stats.qlen = qstats.qlen;
343 d->tc_stats.backlog = qstats.backlog;
344 d->tc_stats.overlimits = qstats.overlimits;
1da177e4
LT
345 }
346
347 if (d->tail)
b0ab6f92 348 return gnet_stats_copy(d, TCA_STATS_QUEUE,
9854518e
ND
349 &qstats, sizeof(qstats),
350 TCA_STATS_PAD);
1da177e4
LT
351
352 return 0;
353}
9e34a5b5 354EXPORT_SYMBOL(gnet_stats_copy_queue);
1da177e4
LT
355
356/**
357 * gnet_stats_copy_app - copy application specific statistics into statistics TLV
358 * @d: dumping handle
359 * @st: application specific statistics data
360 * @len: length of data
361 *
e793c0f7 362 * Appends the application specific statistics to the top level TLV created by
1da177e4
LT
363 * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
364 * handle is in backward compatibility mode.
365 *
366 * Returns 0 on success or -1 with the statistic lock released
367 * if the room in the socket buffer was not sufficient.
368 */
369int
370gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
371{
372 if (d->compat_xstats) {
1c4cff0c
IG
373 d->xstats = kmemdup(st, len, GFP_ATOMIC);
374 if (!d->xstats)
375 goto err_out;
1da177e4
LT
376 d->xstats_len = len;
377 }
378
379 if (d->tail)
9854518e
ND
380 return gnet_stats_copy(d, TCA_STATS_APP, st, len,
381 TCA_STATS_PAD);
1da177e4
LT
382
383 return 0;
1c4cff0c
IG
384
385err_out:
edb09eb1
ED
386 if (d->lock)
387 spin_unlock_bh(d->lock);
1c4cff0c 388 d->xstats_len = 0;
1c4cff0c 389 return -1;
1da177e4 390}
9e34a5b5 391EXPORT_SYMBOL(gnet_stats_copy_app);
1da177e4
LT
392
393/**
394 * gnet_stats_finish_copy - finish dumping procedure
395 * @d: dumping handle
396 *
397 * Corrects the length of the top level TLV to include all TLVs added
398 * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
399 * if gnet_stats_start_copy_compat() was used and releases the statistics
400 * lock.
401 *
402 * Returns 0 on success or -1 with the statistic lock released
403 * if the room in the socket buffer was not sufficient.
404 */
405int
406gnet_stats_finish_copy(struct gnet_dump *d)
407{
408 if (d->tail)
1e90474c 409 d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
1da177e4
LT
410
411 if (d->compat_tc_stats)
412 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
9854518e 413 sizeof(d->tc_stats), d->padattr) < 0)
1da177e4
LT
414 return -1;
415
416 if (d->compat_xstats && d->xstats) {
417 if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
9854518e 418 d->xstats_len, d->padattr) < 0)
1da177e4
LT
419 return -1;
420 }
421
edb09eb1
ED
422 if (d->lock)
423 spin_unlock_bh(d->lock);
1c4cff0c
IG
424 kfree(d->xstats);
425 d->xstats = NULL;
426 d->xstats_len = 0;
1da177e4
LT
427 return 0;
428}
1da177e4 429EXPORT_SYMBOL(gnet_stats_finish_copy);