]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
rbtree, perf: Use new rbtree helpers
authorPeter Zijlstra <peterz@infradead.org>
Wed, 29 Apr 2020 15:05:15 +0000 (17:05 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 17 Feb 2021 13:07:48 +0000 (14:07 +0100)
Reduce rbtree boiler plate by using the new helpers.

One noteworthy change is unification of the various (partial) compare
functions. We construct a subtree match by forcing the sub-order to
always match, see __group_cmp().

Due to 'const' we had to touch cgroup_id().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
include/linux/cgroup.h
kernel/events/core.c

index 451c2d26a5db8425394fce3ab27736ce61eb58c7..4f2f79de083e213b53967d2808af15636f9eb222 100644 (file)
@@ -307,7 +307,7 @@ void css_task_iter_end(struct css_task_iter *it);
  * Inline functions.
  */
 
-static inline u64 cgroup_id(struct cgroup *cgrp)
+static inline u64 cgroup_id(const struct cgroup *cgrp)
 {
        return cgrp->kn->id;
 }
@@ -701,7 +701,7 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
 struct cgroup_subsys_state;
 struct cgroup;
 
-static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
+static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
 static inline void css_get(struct cgroup_subsys_state *css) {}
 static inline void css_put(struct cgroup_subsys_state *css) {}
 static inline int cgroup_attach_task_all(struct task_struct *from,
index 55d18791a72de38b77ae29440cf5b0a57b8db37d..3d890961f6e5451caf7fc99c16b818fa75f8c147 100644 (file)
@@ -1595,50 +1595,91 @@ static void perf_event_groups_init(struct perf_event_groups *groups)
        groups->index = 0;
 }
 
+static inline struct cgroup *event_cgroup(const struct perf_event *event)
+{
+       struct cgroup *cgroup = NULL;
+
+#ifdef CONFIG_CGROUP_PERF
+       if (event->cgrp)
+               cgroup = event->cgrp->css.cgroup;
+#endif
+
+       return cgroup;
+}
+
 /*
  * Compare function for event groups;
  *
  * Implements complex key that first sorts by CPU and then by virtual index
  * which provides ordering when rotating groups for the same CPU.
  */
-static bool
-perf_event_groups_less(struct perf_event *left, struct perf_event *right)
+static __always_inline int
+perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup,
+                     const u64 left_group_index, const struct perf_event *right)
 {
-       if (left->cpu < right->cpu)
-               return true;
-       if (left->cpu > right->cpu)
-               return false;
+       if (left_cpu < right->cpu)
+               return -1;
+       if (left_cpu > right->cpu)
+               return 1;
 
 #ifdef CONFIG_CGROUP_PERF
-       if (left->cgrp != right->cgrp) {
-               if (!left->cgrp || !left->cgrp->css.cgroup) {
-                       /*
-                        * Left has no cgroup but right does, no cgroups come
-                        * first.
-                        */
-                       return true;
-               }
-               if (!right->cgrp || !right->cgrp->css.cgroup) {
-                       /*
-                        * Right has no cgroup but left does, no cgroups come
-                        * first.
-                        */
-                       return false;
-               }
-               /* Two dissimilar cgroups, order by id. */
-               if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id)
-                       return true;
+       {
+               const struct cgroup *right_cgroup = event_cgroup(right);
 
-               return false;
+               if (left_cgroup != right_cgroup) {
+                       if (!left_cgroup) {
+                               /*
+                                * Left has no cgroup but right does, no
+                                * cgroups come first.
+                                */
+                               return -1;
+                       }
+                       if (!right_cgroup) {
+                               /*
+                                * Right has no cgroup but left does, no
+                                * cgroups come first.
+                                */
+                               return 1;
+                       }
+                       /* Two dissimilar cgroups, order by id. */
+                       if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
+                               return -1;
+
+                       return 1;
+               }
        }
 #endif
 
-       if (left->group_index < right->group_index)
-               return true;
-       if (left->group_index > right->group_index)
-               return false;
+       if (left_group_index < right->group_index)
+               return -1;
+       if (left_group_index > right->group_index)
+               return 1;
 
-       return false;
+       return 0;
+}
+
+#define __node_2_pe(node) \
+       rb_entry((node), struct perf_event, group_node)
+
+static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
+{
+       struct perf_event *e = __node_2_pe(a);
+       return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index,
+                                    __node_2_pe(b)) < 0;
+}
+
+struct __group_key {
+       int cpu;
+       struct cgroup *cgroup;
+};
+
+static inline int __group_cmp(const void *key, const struct rb_node *node)
+{
+       const struct __group_key *a = key;
+       const struct perf_event *b = __node_2_pe(node);
+
+       /* partial/subtree match: @cpu, @cgroup; ignore: @group_index */
+       return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b);
 }
 
 /*
@@ -1650,27 +1691,9 @@ static void
 perf_event_groups_insert(struct perf_event_groups *groups,
                         struct perf_event *event)
 {
-       struct perf_event *node_event;
-       struct rb_node *parent;
-       struct rb_node **node;
-
        event->group_index = ++groups->index;
 
-       node = &groups->tree.rb_node;
-       parent = *node;
-
-       while (*node) {
-               parent = *node;
-               node_event = container_of(*node, struct perf_event, group_node);
-
-               if (perf_event_groups_less(event, node_event))
-                       node = &parent->rb_left;
-               else
-                       node = &parent->rb_right;
-       }
-
-       rb_link_node(&event->group_node, parent, node);
-       rb_insert_color(&event->group_node, &groups->tree);
+       rb_add(&event->group_node, &groups->tree, __group_less);
 }
 
 /*
@@ -1718,45 +1741,17 @@ static struct perf_event *
 perf_event_groups_first(struct perf_event_groups *groups, int cpu,
                        struct cgroup *cgrp)
 {
-       struct perf_event *node_event = NULL, *match = NULL;
-       struct rb_node *node = groups->tree.rb_node;
-#ifdef CONFIG_CGROUP_PERF
-       u64 node_cgrp_id, cgrp_id = 0;
-
-       if (cgrp)
-               cgrp_id = cgrp->kn->id;
-#endif
-
-       while (node) {
-               node_event = container_of(node, struct perf_event, group_node);
-
-               if (cpu < node_event->cpu) {
-                       node = node->rb_left;
-                       continue;
-               }
-               if (cpu > node_event->cpu) {
-                       node = node->rb_right;
-                       continue;
-               }
-#ifdef CONFIG_CGROUP_PERF
-               node_cgrp_id = 0;
-               if (node_event->cgrp && node_event->cgrp->css.cgroup)
-                       node_cgrp_id = node_event->cgrp->css.cgroup->kn->id;
+       struct __group_key key = {
+               .cpu = cpu,
+               .cgroup = cgrp,
+       };
+       struct rb_node *node;
 
-               if (cgrp_id < node_cgrp_id) {
-                       node = node->rb_left;
-                       continue;
-               }
-               if (cgrp_id > node_cgrp_id) {
-                       node = node->rb_right;
-                       continue;
-               }
-#endif
-               match = node_event;
-               node = node->rb_left;
-       }
+       node = rb_find_first(&key, &groups->tree, __group_cmp);
+       if (node)
+               return __node_2_pe(node);
 
-       return match;
+       return NULL;
 }
 
 /*
@@ -1765,27 +1760,17 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu,
 static struct perf_event *
 perf_event_groups_next(struct perf_event *event)
 {
-       struct perf_event *next;
-#ifdef CONFIG_CGROUP_PERF
-       u64 curr_cgrp_id = 0;
-       u64 next_cgrp_id = 0;
-#endif
-
-       next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
-       if (next == NULL || next->cpu != event->cpu)
-               return NULL;
-
-#ifdef CONFIG_CGROUP_PERF
-       if (event->cgrp && event->cgrp->css.cgroup)
-               curr_cgrp_id = event->cgrp->css.cgroup->kn->id;
+       struct __group_key key = {
+               .cpu = event->cpu,
+               .cgroup = event_cgroup(event),
+       };
+       struct rb_node *next;
 
-       if (next->cgrp && next->cgrp->css.cgroup)
-               next_cgrp_id = next->cgrp->css.cgroup->kn->id;
+       next = rb_next_match(&key, &event->group_node, __group_cmp);
+       if (next)
+               return __node_2_pe(next);
 
-       if (curr_cgrp_id != next_cgrp_id)
-               return NULL;
-#endif
-       return next;
+       return NULL;
 }
 
 /*