]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/taskstats.c
net/mlx5e: Prefetch skb data on RX
[mirror_ubuntu-bionic-kernel.git] / kernel / taskstats.c
1 /*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/slab.h>
26 #include <linux/cgroupstats.h>
27 #include <linux/cgroup.h>
28 #include <linux/fs.h>
29 #include <linux/file.h>
30 #include <linux/pid_namespace.h>
31 #include <net/genetlink.h>
32 #include <linux/atomic.h>
33
34 /*
35 * Maximum length of a cpumask that can be specified in
36 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
37 */
38 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
39
40 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
41 static int family_registered;
42 struct kmem_cache *taskstats_cache;
43
44 static struct genl_family family = {
45 .id = GENL_ID_GENERATE,
46 .name = TASKSTATS_GENL_NAME,
47 .version = TASKSTATS_GENL_VERSION,
48 .maxattr = TASKSTATS_CMD_ATTR_MAX,
49 };
50
51 static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
52 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
54 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
56
57 static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
58 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
59 };
60
61 struct listener {
62 struct list_head list;
63 pid_t pid;
64 char valid;
65 };
66
67 struct listener_list {
68 struct rw_semaphore sem;
69 struct list_head list;
70 };
71 static DEFINE_PER_CPU(struct listener_list, listener_array);
72
73 enum actions {
74 REGISTER,
75 DEREGISTER,
76 CPU_DONT_CARE
77 };
78
79 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
80 size_t size)
81 {
82 struct sk_buff *skb;
83 void *reply;
84
85 /*
86 * If new attributes are added, please revisit this allocation
87 */
88 skb = genlmsg_new(size, GFP_KERNEL);
89 if (!skb)
90 return -ENOMEM;
91
92 if (!info) {
93 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
94
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
96 } else
97 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
98 if (reply == NULL) {
99 nlmsg_free(skb);
100 return -EINVAL;
101 }
102
103 *skbp = skb;
104 return 0;
105 }
106
107 /*
108 * Send taskstats data in @skb to listener with nl_pid @pid
109 */
110 static int send_reply(struct sk_buff *skb, struct genl_info *info)
111 {
112 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
113 void *reply = genlmsg_data(genlhdr);
114
115 genlmsg_end(skb, reply);
116
117 return genlmsg_reply(skb, info);
118 }
119
120 /*
121 * Send taskstats data in @skb to listeners registered for @cpu's exit data
122 */
123 static void send_cpu_listeners(struct sk_buff *skb,
124 struct listener_list *listeners)
125 {
126 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
127 struct listener *s, *tmp;
128 struct sk_buff *skb_next, *skb_cur = skb;
129 void *reply = genlmsg_data(genlhdr);
130 int rc, delcount = 0;
131
132 genlmsg_end(skb, reply);
133
134 rc = 0;
135 down_read(&listeners->sem);
136 list_for_each_entry(s, &listeners->list, list) {
137 skb_next = NULL;
138 if (!list_is_last(&s->list, &listeners->list)) {
139 skb_next = skb_clone(skb_cur, GFP_KERNEL);
140 if (!skb_next)
141 break;
142 }
143 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
144 if (rc == -ECONNREFUSED) {
145 s->valid = 0;
146 delcount++;
147 }
148 skb_cur = skb_next;
149 }
150 up_read(&listeners->sem);
151
152 if (skb_cur)
153 nlmsg_free(skb_cur);
154
155 if (!delcount)
156 return;
157
158 /* Delete invalidated entries */
159 down_write(&listeners->sem);
160 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
161 if (!s->valid) {
162 list_del(&s->list);
163 kfree(s);
164 }
165 }
166 up_write(&listeners->sem);
167 }
168
169 static void fill_stats(struct user_namespace *user_ns,
170 struct pid_namespace *pid_ns,
171 struct task_struct *tsk, struct taskstats *stats)
172 {
173 memset(stats, 0, sizeof(*stats));
174 /*
175 * Each accounting subsystem adds calls to its functions to
176 * fill in relevant parts of struct taskstsats as follows
177 *
178 * per-task-foo(stats, tsk);
179 */
180
181 delayacct_add_tsk(stats, tsk);
182
183 /* fill in basic acct fields */
184 stats->version = TASKSTATS_VERSION;
185 stats->nvcsw = tsk->nvcsw;
186 stats->nivcsw = tsk->nivcsw;
187 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
188
189 /* fill in extended acct fields */
190 xacct_add_tsk(stats, tsk);
191 }
192
193 static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
194 {
195 struct task_struct *tsk;
196
197 rcu_read_lock();
198 tsk = find_task_by_vpid(pid);
199 if (tsk)
200 get_task_struct(tsk);
201 rcu_read_unlock();
202 if (!tsk)
203 return -ESRCH;
204 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
205 put_task_struct(tsk);
206 return 0;
207 }
208
209 static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
210 {
211 struct task_struct *tsk, *first;
212 unsigned long flags;
213 int rc = -ESRCH;
214
215 /*
216 * Add additional stats from live tasks except zombie thread group
217 * leaders who are already counted with the dead tasks
218 */
219 rcu_read_lock();
220 first = find_task_by_vpid(tgid);
221
222 if (!first || !lock_task_sighand(first, &flags))
223 goto out;
224
225 if (first->signal->stats)
226 memcpy(stats, first->signal->stats, sizeof(*stats));
227 else
228 memset(stats, 0, sizeof(*stats));
229
230 tsk = first;
231 do {
232 if (tsk->exit_state)
233 continue;
234 /*
235 * Accounting subsystem can call its functions here to
236 * fill in relevant parts of struct taskstsats as follows
237 *
238 * per-task-foo(stats, tsk);
239 */
240 delayacct_add_tsk(stats, tsk);
241
242 stats->nvcsw += tsk->nvcsw;
243 stats->nivcsw += tsk->nivcsw;
244 } while_each_thread(first, tsk);
245
246 unlock_task_sighand(first, &flags);
247 rc = 0;
248 out:
249 rcu_read_unlock();
250
251 stats->version = TASKSTATS_VERSION;
252 /*
253 * Accounting subsystems can also add calls here to modify
254 * fields of taskstats.
255 */
256 return rc;
257 }
258
259 static void fill_tgid_exit(struct task_struct *tsk)
260 {
261 unsigned long flags;
262
263 spin_lock_irqsave(&tsk->sighand->siglock, flags);
264 if (!tsk->signal->stats)
265 goto ret;
266
267 /*
268 * Each accounting subsystem calls its functions here to
269 * accumalate its per-task stats for tsk, into the per-tgid structure
270 *
271 * per-task-foo(tsk->signal->stats, tsk);
272 */
273 delayacct_add_tsk(tsk->signal->stats, tsk);
274 ret:
275 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
276 return;
277 }
278
279 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
280 {
281 struct listener_list *listeners;
282 struct listener *s, *tmp, *s2;
283 unsigned int cpu;
284 int ret = 0;
285
286 if (!cpumask_subset(mask, cpu_possible_mask))
287 return -EINVAL;
288
289 if (current_user_ns() != &init_user_ns)
290 return -EINVAL;
291
292 if (task_active_pid_ns(current) != &init_pid_ns)
293 return -EINVAL;
294
295 if (isadd == REGISTER) {
296 for_each_cpu(cpu, mask) {
297 s = kmalloc_node(sizeof(struct listener),
298 GFP_KERNEL, cpu_to_node(cpu));
299 if (!s) {
300 ret = -ENOMEM;
301 goto cleanup;
302 }
303 s->pid = pid;
304 s->valid = 1;
305
306 listeners = &per_cpu(listener_array, cpu);
307 down_write(&listeners->sem);
308 list_for_each_entry(s2, &listeners->list, list) {
309 if (s2->pid == pid && s2->valid)
310 goto exists;
311 }
312 list_add(&s->list, &listeners->list);
313 s = NULL;
314 exists:
315 up_write(&listeners->sem);
316 kfree(s); /* nop if NULL */
317 }
318 return 0;
319 }
320
321 /* Deregister or cleanup */
322 cleanup:
323 for_each_cpu(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
327 if (s->pid == pid) {
328 list_del(&s->list);
329 kfree(s);
330 break;
331 }
332 }
333 up_write(&listeners->sem);
334 }
335 return ret;
336 }
337
338 static int parse(struct nlattr *na, struct cpumask *mask)
339 {
340 char *data;
341 int len;
342 int ret;
343
344 if (na == NULL)
345 return 1;
346 len = nla_len(na);
347 if (len > TASKSTATS_CPUMASK_MAXLEN)
348 return -E2BIG;
349 if (len < 1)
350 return -EINVAL;
351 data = kmalloc(len, GFP_KERNEL);
352 if (!data)
353 return -ENOMEM;
354 nla_strlcpy(data, na, len);
355 ret = cpulist_parse(data, mask);
356 kfree(data);
357 return ret;
358 }
359
360 #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
361 #define TASKSTATS_NEEDS_PADDING 1
362 #endif
363
364 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
365 {
366 struct nlattr *na, *ret;
367 int aggr;
368
369 aggr = (type == TASKSTATS_TYPE_PID)
370 ? TASKSTATS_TYPE_AGGR_PID
371 : TASKSTATS_TYPE_AGGR_TGID;
372
373 /*
374 * The taskstats structure is internally aligned on 8 byte
375 * boundaries but the layout of the aggregrate reply, with
376 * two NLA headers and the pid (each 4 bytes), actually
377 * force the entire structure to be unaligned. This causes
378 * the kernel to issue unaligned access warnings on some
379 * architectures like ia64. Unfortunately, some software out there
380 * doesn't properly unroll the NLA packet and assumes that the start
381 * of the taskstats structure will always be 20 bytes from the start
382 * of the netlink payload. Aligning the start of the taskstats
383 * structure breaks this software, which we don't want. So, for now
384 * the alignment only happens on architectures that require it
385 * and those users will have to update to fixed versions of those
386 * packages. Space is reserved in the packet only when needed.
387 * This ifdef should be removed in several years e.g. 2012 once
388 * we can be confident that fixed versions are installed on most
389 * systems. We add the padding before the aggregate since the
390 * aggregate is already a defined type.
391 */
392 #ifdef TASKSTATS_NEEDS_PADDING
393 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
394 goto err;
395 #endif
396 na = nla_nest_start(skb, aggr);
397 if (!na)
398 goto err;
399
400 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
401 nla_nest_cancel(skb, na);
402 goto err;
403 }
404 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
405 if (!ret) {
406 nla_nest_cancel(skb, na);
407 goto err;
408 }
409 nla_nest_end(skb, na);
410
411 return nla_data(ret);
412 err:
413 return NULL;
414 }
415
416 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
417 {
418 int rc = 0;
419 struct sk_buff *rep_skb;
420 struct cgroupstats *stats;
421 struct nlattr *na;
422 size_t size;
423 u32 fd;
424 struct fd f;
425
426 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
427 if (!na)
428 return -EINVAL;
429
430 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
431 f = fdget(fd);
432 if (!f.file)
433 return 0;
434
435 size = nla_total_size(sizeof(struct cgroupstats));
436
437 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
438 size);
439 if (rc < 0)
440 goto err;
441
442 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
443 sizeof(struct cgroupstats));
444 if (na == NULL) {
445 nlmsg_free(rep_skb);
446 rc = -EMSGSIZE;
447 goto err;
448 }
449
450 stats = nla_data(na);
451 memset(stats, 0, sizeof(*stats));
452
453 rc = cgroupstats_build(stats, f.file->f_path.dentry);
454 if (rc < 0) {
455 nlmsg_free(rep_skb);
456 goto err;
457 }
458
459 rc = send_reply(rep_skb, info);
460
461 err:
462 fdput(f);
463 return rc;
464 }
465
466 static int cmd_attr_register_cpumask(struct genl_info *info)
467 {
468 cpumask_var_t mask;
469 int rc;
470
471 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
472 return -ENOMEM;
473 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
474 if (rc < 0)
475 goto out;
476 rc = add_del_listener(info->snd_portid, mask, REGISTER);
477 out:
478 free_cpumask_var(mask);
479 return rc;
480 }
481
482 static int cmd_attr_deregister_cpumask(struct genl_info *info)
483 {
484 cpumask_var_t mask;
485 int rc;
486
487 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
488 return -ENOMEM;
489 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
490 if (rc < 0)
491 goto out;
492 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
493 out:
494 free_cpumask_var(mask);
495 return rc;
496 }
497
498 static size_t taskstats_packet_size(void)
499 {
500 size_t size;
501
502 size = nla_total_size(sizeof(u32)) +
503 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
504 #ifdef TASKSTATS_NEEDS_PADDING
505 size += nla_total_size(0); /* Padding for alignment */
506 #endif
507 return size;
508 }
509
510 static int cmd_attr_pid(struct genl_info *info)
511 {
512 struct taskstats *stats;
513 struct sk_buff *rep_skb;
514 size_t size;
515 u32 pid;
516 int rc;
517
518 size = taskstats_packet_size();
519
520 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
521 if (rc < 0)
522 return rc;
523
524 rc = -EINVAL;
525 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
526 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
527 if (!stats)
528 goto err;
529
530 rc = fill_stats_for_pid(pid, stats);
531 if (rc < 0)
532 goto err;
533 return send_reply(rep_skb, info);
534 err:
535 nlmsg_free(rep_skb);
536 return rc;
537 }
538
539 static int cmd_attr_tgid(struct genl_info *info)
540 {
541 struct taskstats *stats;
542 struct sk_buff *rep_skb;
543 size_t size;
544 u32 tgid;
545 int rc;
546
547 size = taskstats_packet_size();
548
549 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
550 if (rc < 0)
551 return rc;
552
553 rc = -EINVAL;
554 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
555 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
556 if (!stats)
557 goto err;
558
559 rc = fill_stats_for_tgid(tgid, stats);
560 if (rc < 0)
561 goto err;
562 return send_reply(rep_skb, info);
563 err:
564 nlmsg_free(rep_skb);
565 return rc;
566 }
567
568 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
569 {
570 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
571 return cmd_attr_register_cpumask(info);
572 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
573 return cmd_attr_deregister_cpumask(info);
574 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
575 return cmd_attr_pid(info);
576 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
577 return cmd_attr_tgid(info);
578 else
579 return -EINVAL;
580 }
581
582 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
583 {
584 struct signal_struct *sig = tsk->signal;
585 struct taskstats *stats;
586
587 if (sig->stats || thread_group_empty(tsk))
588 goto ret;
589
590 /* No problem if kmem_cache_zalloc() fails */
591 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
592
593 spin_lock_irq(&tsk->sighand->siglock);
594 if (!sig->stats) {
595 sig->stats = stats;
596 stats = NULL;
597 }
598 spin_unlock_irq(&tsk->sighand->siglock);
599
600 if (stats)
601 kmem_cache_free(taskstats_cache, stats);
602 ret:
603 return sig->stats;
604 }
605
606 /* Send pid data out on exit */
607 void taskstats_exit(struct task_struct *tsk, int group_dead)
608 {
609 int rc;
610 struct listener_list *listeners;
611 struct taskstats *stats;
612 struct sk_buff *rep_skb;
613 size_t size;
614 int is_thread_group;
615
616 if (!family_registered)
617 return;
618
619 /*
620 * Size includes space for nested attributes
621 */
622 size = taskstats_packet_size();
623
624 is_thread_group = !!taskstats_tgid_alloc(tsk);
625 if (is_thread_group) {
626 /* PID + STATS + TGID + STATS */
627 size = 2 * size;
628 /* fill the tsk->signal->stats structure */
629 fill_tgid_exit(tsk);
630 }
631
632 listeners = raw_cpu_ptr(&listener_array);
633 if (list_empty(&listeners->list))
634 return;
635
636 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
637 if (rc < 0)
638 return;
639
640 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
641 task_pid_nr_ns(tsk, &init_pid_ns));
642 if (!stats)
643 goto err;
644
645 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
646
647 /*
648 * Doesn't matter if tsk is the leader or the last group member leaving
649 */
650 if (!is_thread_group || !group_dead)
651 goto send;
652
653 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
654 task_tgid_nr_ns(tsk, &init_pid_ns));
655 if (!stats)
656 goto err;
657
658 memcpy(stats, tsk->signal->stats, sizeof(*stats));
659
660 send:
661 send_cpu_listeners(rep_skb, listeners);
662 return;
663 err:
664 nlmsg_free(rep_skb);
665 }
666
667 static const struct genl_ops taskstats_ops[] = {
668 {
669 .cmd = TASKSTATS_CMD_GET,
670 .doit = taskstats_user_cmd,
671 .policy = taskstats_cmd_get_policy,
672 .flags = GENL_ADMIN_PERM,
673 },
674 {
675 .cmd = CGROUPSTATS_CMD_GET,
676 .doit = cgroupstats_user_cmd,
677 .policy = cgroupstats_cmd_get_policy,
678 },
679 };
680
681 /* Needed early in initialization */
682 void __init taskstats_init_early(void)
683 {
684 unsigned int i;
685
686 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
687 for_each_possible_cpu(i) {
688 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
689 init_rwsem(&(per_cpu(listener_array, i).sem));
690 }
691 }
692
693 static int __init taskstats_init(void)
694 {
695 int rc;
696
697 rc = genl_register_family_with_ops(&family, taskstats_ops);
698 if (rc)
699 return rc;
700
701 family_registered = 1;
702 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
703 return 0;
704 }
705
706 /*
707 * late initcall ensures initialization of statistics collection
708 * mechanisms precedes initialization of the taskstats interface
709 */
710 late_initcall(taskstats_init);