]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * net/core/gen_stats.c | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Authors: Thomas Graf <tgraf@suug.ch> | |
10 | * Jamal Hadi Salim | |
11 | * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
12 | * | |
13 | * See Documentation/networking/gen_stats.txt | |
14 | */ | |
15 | ||
16 | #include <linux/types.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/socket.h> | |
21 | #include <linux/rtnetlink.h> | |
22 | #include <linux/gen_stats.h> | |
23 | #include <net/netlink.h> | |
24 | #include <net/gen_stats.h> | |
25 | ||
26 | ||
27 | static inline int | |
28 | gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) | |
29 | { | |
30 | if (nla_put_64bit(d->skb, type, size, buf, padattr)) | |
31 | goto nla_put_failure; | |
32 | return 0; | |
33 | ||
34 | nla_put_failure: | |
35 | if (d->lock) | |
36 | spin_unlock_bh(d->lock); | |
37 | kfree(d->xstats); | |
38 | d->xstats = NULL; | |
39 | d->xstats_len = 0; | |
40 | return -1; | |
41 | } | |
42 | ||
43 | /** | |
44 | * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode | |
45 | * @skb: socket buffer to put statistics TLVs into | |
46 | * @type: TLV type for top level statistic TLV | |
47 | * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV | |
48 | * @xstats_type: TLV type for backward compatibility xstats TLV | |
49 | * @lock: statistics lock | |
50 | * @d: dumping handle | |
51 | * @padattr: padding attribute | |
52 | * | |
53 | * Initializes the dumping handle, grabs the statistic lock and appends | |
54 | * an empty TLV header to the socket buffer for use a container for all | |
55 | * other statistic TLVS. | |
56 | * | |
57 | * The dumping handle is marked to be in backward compatibility mode telling | |
58 | * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats. | |
59 | * | |
60 | * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. | |
61 | */ | |
62 | int | |
63 | gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, | |
64 | int xstats_type, spinlock_t *lock, | |
65 | struct gnet_dump *d, int padattr) | |
66 | __acquires(lock) | |
67 | { | |
68 | memset(d, 0, sizeof(*d)); | |
69 | ||
70 | if (type) | |
71 | d->tail = (struct nlattr *)skb_tail_pointer(skb); | |
72 | d->skb = skb; | |
73 | d->compat_tc_stats = tc_stats_type; | |
74 | d->compat_xstats = xstats_type; | |
75 | d->padattr = padattr; | |
76 | if (lock) { | |
77 | d->lock = lock; | |
78 | spin_lock_bh(lock); | |
79 | } | |
80 | if (d->tail) | |
81 | return gnet_stats_copy(d, type, NULL, 0, padattr); | |
82 | ||
83 | return 0; | |
84 | } | |
85 | EXPORT_SYMBOL(gnet_stats_start_copy_compat); | |
86 | ||
87 | /** | |
88 | * gnet_stats_start_copy - start dumping procedure in compatibility mode | |
89 | * @skb: socket buffer to put statistics TLVs into | |
90 | * @type: TLV type for top level statistic TLV | |
91 | * @lock: statistics lock | |
92 | * @d: dumping handle | |
93 | * @padattr: padding attribute | |
94 | * | |
95 | * Initializes the dumping handle, grabs the statistic lock and appends | |
96 | * an empty TLV header to the socket buffer for use a container for all | |
97 | * other statistic TLVS. | |
98 | * | |
99 | * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. | |
100 | */ | |
101 | int | |
102 | gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, | |
103 | struct gnet_dump *d, int padattr) | |
104 | { | |
105 | return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr); | |
106 | } | |
107 | EXPORT_SYMBOL(gnet_stats_start_copy); | |
108 | ||
109 | static void | |
110 | __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, | |
111 | struct gnet_stats_basic_cpu __percpu *cpu) | |
112 | { | |
113 | int i; | |
114 | ||
115 | for_each_possible_cpu(i) { | |
116 | struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); | |
117 | unsigned int start; | |
118 | u64 bytes; | |
119 | u32 packets; | |
120 | ||
121 | do { | |
122 | start = u64_stats_fetch_begin_irq(&bcpu->syncp); | |
123 | bytes = bcpu->bstats.bytes; | |
124 | packets = bcpu->bstats.packets; | |
125 | } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); | |
126 | ||
127 | bstats->bytes += bytes; | |
128 | bstats->packets += packets; | |
129 | } | |
130 | } | |
131 | ||
132 | void | |
133 | __gnet_stats_copy_basic(const seqcount_t *running, | |
134 | struct gnet_stats_basic_packed *bstats, | |
135 | struct gnet_stats_basic_cpu __percpu *cpu, | |
136 | struct gnet_stats_basic_packed *b) | |
137 | { | |
138 | unsigned int seq; | |
139 | ||
140 | if (cpu) { | |
141 | __gnet_stats_copy_basic_cpu(bstats, cpu); | |
142 | return; | |
143 | } | |
144 | do { | |
145 | if (running) | |
146 | seq = read_seqcount_begin(running); | |
147 | bstats->bytes = b->bytes; | |
148 | bstats->packets = b->packets; | |
149 | } while (running && read_seqcount_retry(running, seq)); | |
150 | } | |
151 | EXPORT_SYMBOL(__gnet_stats_copy_basic); | |
152 | ||
153 | /** | |
154 | * gnet_stats_copy_basic - copy basic statistics into statistic TLV | |
155 | * @running: seqcount_t pointer | |
156 | * @d: dumping handle | |
157 | * @cpu: copy statistic per cpu | |
158 | * @b: basic statistics | |
159 | * | |
160 | * Appends the basic statistics to the top level TLV created by | |
161 | * gnet_stats_start_copy(). | |
162 | * | |
163 | * Returns 0 on success or -1 with the statistic lock released | |
164 | * if the room in the socket buffer was not sufficient. | |
165 | */ | |
166 | int | |
167 | gnet_stats_copy_basic(const seqcount_t *running, | |
168 | struct gnet_dump *d, | |
169 | struct gnet_stats_basic_cpu __percpu *cpu, | |
170 | struct gnet_stats_basic_packed *b) | |
171 | { | |
172 | struct gnet_stats_basic_packed bstats = {0}; | |
173 | ||
174 | __gnet_stats_copy_basic(running, &bstats, cpu, b); | |
175 | ||
176 | if (d->compat_tc_stats) { | |
177 | d->tc_stats.bytes = bstats.bytes; | |
178 | d->tc_stats.packets = bstats.packets; | |
179 | } | |
180 | ||
181 | if (d->tail) { | |
182 | struct gnet_stats_basic sb; | |
183 | ||
184 | memset(&sb, 0, sizeof(sb)); | |
185 | sb.bytes = bstats.bytes; | |
186 | sb.packets = bstats.packets; | |
187 | return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb), | |
188 | TCA_STATS_PAD); | |
189 | } | |
190 | return 0; | |
191 | } | |
192 | EXPORT_SYMBOL(gnet_stats_copy_basic); | |
193 | ||
194 | /** | |
195 | * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV | |
196 | * @d: dumping handle | |
197 | * @rate_est: rate estimator | |
198 | * | |
199 | * Appends the rate estimator statistics to the top level TLV created by | |
200 | * gnet_stats_start_copy(). | |
201 | * | |
202 | * Returns 0 on success or -1 with the statistic lock released | |
203 | * if the room in the socket buffer was not sufficient. | |
204 | */ | |
205 | int | |
206 | gnet_stats_copy_rate_est(struct gnet_dump *d, | |
207 | struct net_rate_estimator __rcu **rate_est) | |
208 | { | |
209 | struct gnet_stats_rate_est64 sample; | |
210 | struct gnet_stats_rate_est est; | |
211 | int res; | |
212 | ||
213 | if (!gen_estimator_read(rate_est, &sample)) | |
214 | return 0; | |
215 | est.bps = min_t(u64, UINT_MAX, sample.bps); | |
216 | /* we have some time before reaching 2^32 packets per second */ | |
217 | est.pps = sample.pps; | |
218 | ||
219 | if (d->compat_tc_stats) { | |
220 | d->tc_stats.bps = est.bps; | |
221 | d->tc_stats.pps = est.pps; | |
222 | } | |
223 | ||
224 | if (d->tail) { | |
225 | res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est), | |
226 | TCA_STATS_PAD); | |
227 | if (res < 0 || est.bps == sample.bps) | |
228 | return res; | |
229 | /* emit 64bit stats only if needed */ | |
230 | return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample, | |
231 | sizeof(sample), TCA_STATS_PAD); | |
232 | } | |
233 | ||
234 | return 0; | |
235 | } | |
236 | EXPORT_SYMBOL(gnet_stats_copy_rate_est); | |
237 | ||
238 | static void | |
239 | __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, | |
240 | const struct gnet_stats_queue __percpu *q) | |
241 | { | |
242 | int i; | |
243 | ||
244 | for_each_possible_cpu(i) { | |
245 | const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); | |
246 | ||
247 | qstats->qlen = 0; | |
248 | qstats->backlog += qcpu->backlog; | |
249 | qstats->drops += qcpu->drops; | |
250 | qstats->requeues += qcpu->requeues; | |
251 | qstats->overlimits += qcpu->overlimits; | |
252 | } | |
253 | } | |
254 | ||
255 | static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, | |
256 | const struct gnet_stats_queue __percpu *cpu, | |
257 | const struct gnet_stats_queue *q, | |
258 | __u32 qlen) | |
259 | { | |
260 | if (cpu) { | |
261 | __gnet_stats_copy_queue_cpu(qstats, cpu); | |
262 | } else { | |
263 | qstats->qlen = q->qlen; | |
264 | qstats->backlog = q->backlog; | |
265 | qstats->drops = q->drops; | |
266 | qstats->requeues = q->requeues; | |
267 | qstats->overlimits = q->overlimits; | |
268 | } | |
269 | ||
270 | qstats->qlen = qlen; | |
271 | } | |
272 | ||
273 | /** | |
274 | * gnet_stats_copy_queue - copy queue statistics into statistics TLV | |
275 | * @d: dumping handle | |
276 | * @cpu_q: per cpu queue statistics | |
277 | * @q: queue statistics | |
278 | * @qlen: queue length statistics | |
279 | * | |
280 | * Appends the queue statistics to the top level TLV created by | |
281 | * gnet_stats_start_copy(). Using per cpu queue statistics if | |
282 | * they are available. | |
283 | * | |
284 | * Returns 0 on success or -1 with the statistic lock released | |
285 | * if the room in the socket buffer was not sufficient. | |
286 | */ | |
287 | int | |
288 | gnet_stats_copy_queue(struct gnet_dump *d, | |
289 | struct gnet_stats_queue __percpu *cpu_q, | |
290 | struct gnet_stats_queue *q, __u32 qlen) | |
291 | { | |
292 | struct gnet_stats_queue qstats = {0}; | |
293 | ||
294 | __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen); | |
295 | ||
296 | if (d->compat_tc_stats) { | |
297 | d->tc_stats.drops = qstats.drops; | |
298 | d->tc_stats.qlen = qstats.qlen; | |
299 | d->tc_stats.backlog = qstats.backlog; | |
300 | d->tc_stats.overlimits = qstats.overlimits; | |
301 | } | |
302 | ||
303 | if (d->tail) | |
304 | return gnet_stats_copy(d, TCA_STATS_QUEUE, | |
305 | &qstats, sizeof(qstats), | |
306 | TCA_STATS_PAD); | |
307 | ||
308 | return 0; | |
309 | } | |
310 | EXPORT_SYMBOL(gnet_stats_copy_queue); | |
311 | ||
312 | /** | |
313 | * gnet_stats_copy_app - copy application specific statistics into statistics TLV | |
314 | * @d: dumping handle | |
315 | * @st: application specific statistics data | |
316 | * @len: length of data | |
317 | * | |
318 | * Appends the application specific statistics to the top level TLV created by | |
319 | * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping | |
320 | * handle is in backward compatibility mode. | |
321 | * | |
322 | * Returns 0 on success or -1 with the statistic lock released | |
323 | * if the room in the socket buffer was not sufficient. | |
324 | */ | |
325 | int | |
326 | gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) | |
327 | { | |
328 | if (d->compat_xstats) { | |
329 | d->xstats = kmemdup(st, len, GFP_ATOMIC); | |
330 | if (!d->xstats) | |
331 | goto err_out; | |
332 | d->xstats_len = len; | |
333 | } | |
334 | ||
335 | if (d->tail) | |
336 | return gnet_stats_copy(d, TCA_STATS_APP, st, len, | |
337 | TCA_STATS_PAD); | |
338 | ||
339 | return 0; | |
340 | ||
341 | err_out: | |
342 | if (d->lock) | |
343 | spin_unlock_bh(d->lock); | |
344 | d->xstats_len = 0; | |
345 | return -1; | |
346 | } | |
347 | EXPORT_SYMBOL(gnet_stats_copy_app); | |
348 | ||
349 | /** | |
350 | * gnet_stats_finish_copy - finish dumping procedure | |
351 | * @d: dumping handle | |
352 | * | |
353 | * Corrects the length of the top level TLV to include all TLVs added | |
354 | * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs | |
355 | * if gnet_stats_start_copy_compat() was used and releases the statistics | |
356 | * lock. | |
357 | * | |
358 | * Returns 0 on success or -1 with the statistic lock released | |
359 | * if the room in the socket buffer was not sufficient. | |
360 | */ | |
361 | int | |
362 | gnet_stats_finish_copy(struct gnet_dump *d) | |
363 | { | |
364 | if (d->tail) | |
365 | d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; | |
366 | ||
367 | if (d->compat_tc_stats) | |
368 | if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, | |
369 | sizeof(d->tc_stats), d->padattr) < 0) | |
370 | return -1; | |
371 | ||
372 | if (d->compat_xstats && d->xstats) { | |
373 | if (gnet_stats_copy(d, d->compat_xstats, d->xstats, | |
374 | d->xstats_len, d->padattr) < 0) | |
375 | return -1; | |
376 | } | |
377 | ||
378 | if (d->lock) | |
379 | spin_unlock_bh(d->lock); | |
380 | kfree(d->xstats); | |
381 | d->xstats = NULL; | |
382 | d->xstats_len = 0; | |
383 | return 0; | |
384 | } | |
385 | EXPORT_SYMBOL(gnet_stats_finish_copy); |