#define MAX_KEY_LEN 100
static DEFINE_MUTEX(blkcg_pol_mutex);
-static DEFINE_MUTEX(all_q_mutex);
-static LIST_HEAD(all_q_list);
struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkio_root_cgroup);
-static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
+static struct blkio_policy_type *blkio_policy[BLKCG_MAX_POLS];
struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
}
EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
+static bool blkcg_policy_enabled(struct request_queue *q,
+ const struct blkio_policy_type *pol)
+{
+ return pol && test_bit(pol->plid, q->blkcg_pols);
+}
+
+static size_t blkg_pd_size(const struct blkio_policy_type *pol)
+{
+ return sizeof(struct blkg_policy_data) + pol->pdata_size;
+}
+
/**
* blkg_free - free a blkg
* @blkg: blkg to free
if (!blkg)
return;
- for (i = 0; i < BLKIO_NR_POLICIES; i++) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkio_policy_type *pol = blkio_policy[i];
struct blkg_policy_data *pd = blkg->pd[i];
blkg->refcnt = 1;
cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
- for (i = 0; i < BLKIO_NR_POLICIES; i++) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkio_policy_type *pol = blkio_policy[i];
struct blkg_policy_data *pd;
- if (!pol)
+ if (!blkcg_policy_enabled(q, pol))
continue;
/* alloc per-policy data and attach it to blkg */
- pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
- q->node);
+ pd = kzalloc_node(blkg_pd_size(pol), GFP_ATOMIC, q->node);
if (!pd) {
blkg_free(blkg);
return NULL;
}
/* invoke per-policy init */
- for (i = 0; i < BLKIO_NR_POLICIES; i++) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkio_policy_type *pol = blkio_policy[i];
- if (pol)
+ if (blkcg_policy_enabled(blkg->q, pol))
pol->ops.blkio_init_group_fn(blkg);
}
return blkg;
}
-struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
- struct request_queue *q,
- bool for_root)
+static struct blkio_group *__blkg_lookup(struct blkio_cgroup *blkcg,
+ struct request_queue *q)
+{
+ struct blkio_group *blkg;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
+ if (blkg->q == q)
+ return blkg;
+ return NULL;
+}
+
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
+ */
+struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
+ struct request_queue *q)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (unlikely(blk_queue_bypass(q)))
+ return NULL;
+ return __blkg_lookup(blkcg, q);
+}
+EXPORT_SYMBOL_GPL(blkg_lookup);
+
+static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg,
+ struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct blkio_group *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
- /*
- * This could be the first entry point of blkcg implementation and
- * we shouldn't allow anything to go through for a bypassing queue.
- * The following can be removed if blkg lookup is guaranteed to
- * fail on a bypassing queue.
- */
- if (unlikely(blk_queue_bypass(q)) && !for_root)
- return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
-
- blkg = blkg_lookup(blkcg, q);
+ blkg = __blkg_lookup(blkcg, q);
if (blkg)
return blkg;
out:
return blkg;
}
-EXPORT_SYMBOL_GPL(blkg_lookup_create);
-/* called under rcu_read_lock(). */
-struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
- struct request_queue *q)
+struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
+ struct request_queue *q)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
-
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
- if (blkg->q == q)
- return blkg;
- return NULL;
+ /*
+ * This could be the first entry point of blkcg implementation and
+ * we shouldn't allow anything to go through for a bypassing queue.
+ */
+ if (unlikely(blk_queue_bypass(q)))
+ return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return __blkg_lookup_create(blkcg, q);
}
-EXPORT_SYMBOL_GPL(blkg_lookup);
+EXPORT_SYMBOL_GPL(blkg_lookup_create);
static void blkg_destroy(struct blkio_group *blkg)
{
blkg_put(blkg);
}
-/*
- * XXX: This updates blkg policy data in-place for root blkg, which is
- * necessary across elevator switch and policy registration as root blkgs
- * aren't shot down. This broken and racy implementation is temporary.
- * Eventually, blkg shoot down will be replaced by proper in-place update.
- */
-void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
-{
- struct blkio_policy_type *pol = blkio_policy[plid];
- struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
- struct blkg_policy_data *pd;
-
- if (!blkg)
- return;
-
- kfree(blkg->pd[plid]);
- blkg->pd[plid] = NULL;
-
- if (!pol)
- return;
-
- pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
- WARN_ON_ONCE(!pd);
-
- blkg->pd[plid] = pd;
- pd->blkg = blkg;
- pol->ops.blkio_init_group_fn(blkg);
-}
-EXPORT_SYMBOL_GPL(update_root_blkg_pd);
-
/**
* blkg_destroy_all - destroy all blkgs associated with a request_queue
* @q: request_queue of interest
- * @destroy_root: whether to destroy root blkg or not
*
- * Destroy blkgs associated with @q. If @destroy_root is %true, all are
- * destroyed; otherwise, root blkg is left alone.
+ * Destroy all blkgs associated with @q.
*/
-void blkg_destroy_all(struct request_queue *q, bool destroy_root)
+static void blkg_destroy_all(struct request_queue *q)
{
struct blkio_group *blkg, *n;
- spin_lock_irq(q->queue_lock);
+ lockdep_assert_held(q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkio_cgroup *blkcg = blkg->blkcg;
- /* skip root? */
- if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
- continue;
-
spin_lock(&blkcg->lock);
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
-
- spin_unlock_irq(q->queue_lock);
}
-EXPORT_SYMBOL_GPL(blkg_destroy_all);
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
* anyway. If you get hit by a race, retry.
*/
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- for (i = 0; i < BLKIO_NR_POLICIES; i++) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkio_policy_type *pol = blkio_policy[i];
- if (pol && pol->ops.blkio_reset_group_stats_fn)
+ if (blkcg_policy_enabled(blkg->q, pol) &&
+ pol->ops.blkio_reset_group_stats_fn)
pol->ops.blkio_reset_group_stats_fn(blkg);
}
}
*/
void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
u64 (*prfill)(struct seq_file *, void *, int),
- int pol, int data, bool show_total)
+ const struct blkio_policy_type *pol, int data,
+ bool show_total)
{
struct blkio_group *blkg;
struct hlist_node *n;
spin_lock_irq(&blkcg->lock);
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
- if (blkg->pd[pol])
- total += prfill(sf, blkg->pd[pol]->pdata, data);
+ if (blkcg_policy_enabled(blkg->q, pol))
+ total += prfill(sf, blkg->pd[pol->plid]->pdata, data);
spin_unlock_irq(&blkcg->lock);
if (show_total)
/**
* blkg_conf_prep - parse and prepare for per-blkg config update
* @blkcg: target block cgroup
+ * @pol: target policy
* @input: input string
* @ctx: blkg_conf_ctx to be filled
*
* Parse per-blkg config update from @input and initialize @ctx with the
* result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
- * value. This function returns with RCU read locked and must be paired
- * with blkg_conf_finish().
+ * value. This function returns with RCU read lock and queue lock held and
+ * must be paired with blkg_conf_finish().
*/
-int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
+int blkg_conf_prep(struct blkio_cgroup *blkcg,
+ const struct blkio_policy_type *pol, const char *input,
struct blkg_conf_ctx *ctx)
- __acquires(rcu)
+ __acquires(rcu) __acquires(disk->queue->queue_lock)
{
struct gendisk *disk;
struct blkio_group *blkg;
return -EINVAL;
rcu_read_lock();
-
spin_lock_irq(disk->queue->queue_lock);
- blkg = blkg_lookup_create(blkcg, disk->queue, false);
- spin_unlock_irq(disk->queue->queue_lock);
+
+ if (blkcg_policy_enabled(disk->queue, pol))
+ blkg = blkg_lookup_create(blkcg, disk->queue);
+ else
+ blkg = ERR_PTR(-EINVAL);
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
rcu_read_unlock();
+ spin_unlock_irq(disk->queue->queue_lock);
put_disk(disk);
/*
* If queue was bypassing, we should retry. Do so after a
* with blkg_conf_prep().
*/
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
- __releases(rcu)
+ __releases(ctx->disk->queue->queue_lock) __releases(rcu)
{
+ spin_unlock_irq(ctx->disk->queue->queue_lock);
rcu_read_unlock();
put_disk(ctx->disk);
}
*/
int blkcg_init_queue(struct request_queue *q)
{
- int ret;
-
might_sleep();
- ret = blk_throtl_init(q);
- if (ret)
- return ret;
-
- mutex_lock(&all_q_mutex);
- INIT_LIST_HEAD(&q->all_q_node);
- list_add_tail(&q->all_q_node, &all_q_list);
- mutex_unlock(&all_q_mutex);
-
- return 0;
+ return blk_throtl_init(q);
}
/**
*/
void blkcg_exit_queue(struct request_queue *q)
{
- mutex_lock(&all_q_mutex);
- list_del_init(&q->all_q_node);
- mutex_unlock(&all_q_mutex);
-
- blkg_destroy_all(q, true);
+ spin_lock_irq(q->queue_lock);
+ blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
blk_throtl_exit(q);
}
return ret;
}
-static void blkcg_bypass_start(void)
- __acquires(&all_q_mutex)
-{
- struct request_queue *q;
-
- mutex_lock(&all_q_mutex);
-
- list_for_each_entry(q, &all_q_list, all_q_node) {
- blk_queue_bypass_start(q);
- blkg_destroy_all(q, false);
- }
-}
-
-static void blkcg_bypass_end(void)
- __releases(&all_q_mutex)
-{
- struct request_queue *q;
-
- list_for_each_entry(q, &all_q_list, all_q_node)
- blk_queue_bypass_end(q);
-
- mutex_unlock(&all_q_mutex);
-}
-
struct cgroup_subsys blkio_subsys = {
.name = "blkio",
.create = blkiocg_create,
};
EXPORT_SYMBOL_GPL(blkio_subsys);
-void blkio_policy_register(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_activate_policy - activate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to activate
+ *
+ * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
+ * bypass mode to populate its blkgs with policy_data for @pol.
+ *
+ * Activation happens with @q bypassed, so nobody would be accessing blkgs
+ * from IO path. Update of each blkg is protected by both queue and blkcg
+ * locks so that holding either lock and testing blkcg_policy_enabled() is
+ * always enough for dereferencing policy data.
+ *
+ * The caller is responsible for synchronizing [de]activations and policy
+ * [un]registerations. Returns 0 on success, -errno on failure.
+ */
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkio_policy_type *pol)
{
- struct request_queue *q;
+ LIST_HEAD(pds);
+ struct blkio_group *blkg;
+ struct blkg_policy_data *pd, *n;
+ int cnt = 0, ret;
- mutex_lock(&blkcg_pol_mutex);
+ if (blkcg_policy_enabled(q, pol))
+ return 0;
+
+ blk_queue_bypass_start(q);
+
+ /* make sure the root blkg exists and count the existing blkgs */
+ spin_lock_irq(q->queue_lock);
+
+ rcu_read_lock();
+ blkg = __blkg_lookup_create(&blkio_root_cgroup, q);
+ rcu_read_unlock();
+
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ goto out_unlock;
+ }
+ q->root_blkg = blkg;
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node)
+ cnt++;
+
+ spin_unlock_irq(q->queue_lock);
+
+ /* allocate policy_data for all existing blkgs */
+ while (cnt--) {
+ pd = kzalloc_node(blkg_pd_size(pol), GFP_KERNEL, q->node);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ list_add_tail(&pd->alloc_node, &pds);
+ }
+
+ /*
+ * Install the allocated pds. With @q bypassing, no new blkg
+ * should have been created while the queue lock was dropped.
+ */
+ spin_lock_irq(q->queue_lock);
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ if (WARN_ON(list_empty(&pds))) {
+ /* umm... this shouldn't happen, just abort */
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
+ list_del_init(&pd->alloc_node);
+
+ /* grab blkcg lock too while installing @pd on @blkg */
+ spin_lock(&blkg->blkcg->lock);
+
+ blkg->pd[pol->plid] = pd;
+ pd->blkg = blkg;
+ pol->ops.blkio_init_group_fn(blkg);
+
+ spin_unlock(&blkg->blkcg->lock);
+ }
- blkcg_bypass_start();
+ __set_bit(pol->plid, q->blkcg_pols);
+ ret = 0;
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
+out_free:
+ blk_queue_bypass_end(q);
+ list_for_each_entry_safe(pd, n, &pds, alloc_node)
+ kfree(pd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkcg_activate_policy);
+
+/**
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to deactivate
+ *
+ * Deactivate @pol on @q. Follows the same synchronization rules as
+ * blkcg_activate_policy().
+ */
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkio_policy_type *pol)
+{
+ struct blkio_group *blkg;
+
+ if (!blkcg_policy_enabled(q, pol))
+ return;
+
+ blk_queue_bypass_start(q);
+ spin_lock_irq(q->queue_lock);
+
+ __clear_bit(pol->plid, q->blkcg_pols);
+
+ /* if no policy is left, no need for blkgs - shoot them down */
+ if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
+ blkg_destroy_all(q);
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ /* grab blkcg lock too while removing @pd from @blkg */
+ spin_lock(&blkg->blkcg->lock);
+
+ if (pol->ops.blkio_exit_group_fn)
+ pol->ops.blkio_exit_group_fn(blkg);
+
+ kfree(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+
+ spin_unlock(&blkg->blkcg->lock);
+ }
+
+ spin_unlock_irq(q->queue_lock);
+ blk_queue_bypass_end(q);
+}
+EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
+
+/**
+ * blkio_policy_register - register a blkcg policy
+ * @blkiop: blkcg policy to register
+ *
+ * Register @blkiop with blkcg core. Might sleep and @blkiop may be
+ * modified on successful registration. Returns 0 on success and -errno on
+ * failure.
+ */
+int blkio_policy_register(struct blkio_policy_type *blkiop)
+{
+ int i, ret;
+
+ mutex_lock(&blkcg_pol_mutex);
- BUG_ON(blkio_policy[blkiop->plid]);
- blkio_policy[blkiop->plid] = blkiop;
- list_for_each_entry(q, &all_q_list, all_q_node)
- update_root_blkg_pd(q, blkiop->plid);
+ /* find an empty slot */
+ ret = -ENOSPC;
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ if (!blkio_policy[i])
+ break;
+ if (i >= BLKCG_MAX_POLS)
+ goto out_unlock;
- blkcg_bypass_end();
+ /* register and update blkgs */
+ blkiop->plid = i;
+ blkio_policy[i] = blkiop;
+ /* everything is in place, add intf files for the new policy */
if (blkiop->cftypes)
WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes));
-
+ ret = 0;
+out_unlock:
mutex_unlock(&blkcg_pol_mutex);
+ return ret;
}
EXPORT_SYMBOL_GPL(blkio_policy_register);
+/**
+ * blkiop_policy_unregister - unregister a blkcg policy
+ * @blkiop: blkcg policy to unregister
+ *
+ * Undo blkio_policy_register(@blkiop). Might sleep.
+ */
void blkio_policy_unregister(struct blkio_policy_type *blkiop)
{
- struct request_queue *q;
-
mutex_lock(&blkcg_pol_mutex);
+ if (WARN_ON(blkio_policy[blkiop->plid] != blkiop))
+ goto out_unlock;
+
+ /* kill the intf files first */
if (blkiop->cftypes)
cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes);
- blkcg_bypass_start();
-
- BUG_ON(blkio_policy[blkiop->plid] != blkiop);
+ /* unregister and update blkgs */
blkio_policy[blkiop->plid] = NULL;
-
- list_for_each_entry(q, &all_q_list, all_q_node)
- update_root_blkg_pd(q, blkiop->plid);
- blkcg_bypass_end();
-
+out_unlock:
mutex_unlock(&blkcg_pol_mutex);
}
EXPORT_SYMBOL_GPL(blkio_policy_unregister);