]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
cgroup: Add cgroup_subsys->css_rstat_flush()
authorTejun Heo <tj@kernel.org>
Thu, 26 Apr 2018 21:29:05 +0000 (14:29 -0700)
committerTejun Heo <tj@kernel.org>
Thu, 26 Apr 2018 21:29:05 +0000 (14:29 -0700)
This patch adds cgroup_subsys->css_rstat_flush().  If a subsystem has
this callback, its csses are linked on cgrp->css_rstat_list and rstat
will call the function whenever the associated cgroup is flushed.
Flush is also performed when such csses are released so that residual
counts aren't lost.

Combined with the rstat API previous patches factored out, this allows
controllers to plug into rstat to manage their statistics in a
scalable way.

Signed-off-by: Tejun Heo <tj@kernel.org>
include/linux/cgroup-defs.h
kernel/cgroup/cgroup.c
kernel/cgroup/rstat.c

index 60d62fe97dc3cd4fea1ce132d701302060a44262..c0e68f903011cb294fe34386dd42896766ab462b 100644 (file)
@@ -130,6 +130,9 @@ struct cgroup_subsys_state {
        struct list_head sibling;
        struct list_head children;
 
+       /* flush target list anchored at cgrp->rstat_css_list */
+       struct list_head rstat_css_node;
+
        /*
         * PI: Subsys-unique ID.  0 is unused and root is always 1.  The
         * matching css can be looked up using css_from_id().
@@ -412,6 +415,7 @@ struct cgroup {
 
        /* per-cpu recursive resource statistics */
        struct cgroup_rstat_cpu __percpu *rstat_cpu;
+       struct list_head rstat_css_list;
 
        /* cgroup basic resource statistics */
        struct cgroup_base_stat pending_bstat;  /* pending from children */
@@ -577,6 +581,7 @@ struct cgroup_subsys {
        void (*css_released)(struct cgroup_subsys_state *css);
        void (*css_free)(struct cgroup_subsys_state *css);
        void (*css_reset)(struct cgroup_subsys_state *css);
+       void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
        int (*css_extra_stat_show)(struct seq_file *seq,
                                   struct cgroup_subsys_state *css);
 
index 31af98996692718ef9a9272b4fdbf6e17377ba42..04b7e7fad31a932c1c69636b45289735b6f634ba 100644 (file)
@@ -1860,6 +1860,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
        cgrp->dom_cgrp = cgrp;
        cgrp->max_descendants = INT_MAX;
        cgrp->max_depth = INT_MAX;
+       INIT_LIST_HEAD(&cgrp->rstat_css_list);
        prev_cputime_init(&cgrp->prev_cputime);
 
        for_each_subsys(ss, ssid)
@@ -4621,6 +4622,11 @@ static void css_release_work_fn(struct work_struct *work)
 
        if (ss) {
                /* css release path */
+               if (!list_empty(&css->rstat_css_node)) {
+                       cgroup_rstat_flush(cgrp);
+                       list_del_rcu(&css->rstat_css_node);
+               }
+
                cgroup_idr_replace(&ss->css_idr, NULL, css->id);
                if (ss->css_released)
                        ss->css_released(css);
@@ -4682,6 +4688,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
        css->id = -1;
        INIT_LIST_HEAD(&css->sibling);
        INIT_LIST_HEAD(&css->children);
+       INIT_LIST_HEAD(&css->rstat_css_node);
        css->serial_nr = css_serial_nr_next++;
        atomic_set(&css->online_cnt, 0);
 
@@ -4690,6 +4697,9 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
                css_get(css->parent);
        }
 
+       if (cgroup_on_dfl(cgrp) && ss->css_rstat_flush)
+               list_add_rcu(&css->rstat_css_node, &cgrp->rstat_css_list);
+
        BUG_ON(cgroup_css(cgrp, ss));
 }
 
@@ -4791,6 +4801,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
 err_list_del:
        list_del_rcu(&css->sibling);
 err_free_css:
+       list_del_rcu(&css->rstat_css_node);
        INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
        queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
        return ERR_PTR(err);
index 3386fb251a9eeba54d50ddaaa4882123df977372..339366e257d4d13db013e8d949117ac024ba0737 100644 (file)
@@ -145,8 +145,17 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
                struct cgroup *pos = NULL;
 
                raw_spin_lock(cpu_lock);
-               while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
+               while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
+                       struct cgroup_subsys_state *css;
+
                        cgroup_base_stat_flush(pos, cpu);
+
+                       rcu_read_lock();
+                       list_for_each_entry_rcu(css, &pos->rstat_css_list,
+                                               rstat_css_node)
+                               css->ss->css_rstat_flush(css, cpu);
+                       rcu_read_unlock();
+               }
                raw_spin_unlock(cpu_lock);
 
                /* if @may_sleep, play nice and yield if necessary */