rcu_assign_pointer(blkg->q, q);
blkg->blkcg = blkcg;
blkg->plid = pol->plid;
+ blkg->refcnt = 1;
cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
/* alloc per-policy data */
}
}
+static void blkg_rcu_free(struct rcu_head *rcu_head)
+{
+ blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
+}
+
+void __blkg_release(struct blkio_group *blkg)
+{
+ /* release the extra blkcg reference this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+
+ /*
+ * A group is freed in rcu manner. But having an rcu lock does not
+ * mean that one can access all the fields of blkg and assume these
+ * are valid. For example, don't try to follow throtl_data and
+ * request queue links.
+ *
+ * Having a reference to blkg under an rcu allows acess to only
+ * values local to groups like group stats and group rate limits
+ */
+ call_rcu(&blkg->rcu_head, blkg_rcu_free);
+}
+EXPORT_SYMBOL_GPL(__blkg_release);
+
static void blkio_reset_stats_cpu(struct blkio_group *blkg)
{
struct blkio_group_stats_cpu *stats_cpu;
char path[128];
/* policy which owns this blk group */
enum blkio_policy_id plid;
+ /* reference count */
+ int refcnt;
/* Configuration */
struct blkio_group_conf conf;
struct blkio_group_stats_cpu __percpu *stats_cpu;
struct blkg_policy_data *pd;
+
+ struct rcu_head rcu_head;
};
typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
return blkg->path;
}
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding queue_lock and an existing reference.
+ */
+static inline void blkg_get(struct blkio_group *blkg)
+{
+ lockdep_assert_held(blkg->q->queue_lock);
+ WARN_ON_ONCE(!blkg->refcnt);
+ blkg->refcnt++;
+}
+
+void __blkg_release(struct blkio_group *blkg);
+
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ *
+ * The caller should be holding queue_lock.
+ */
+static inline void blkg_put(struct blkio_group *blkg)
+{
+ lockdep_assert_held(blkg->q->queue_lock);
+ WARN_ON_ONCE(blkg->refcnt <= 0);
+ if (!--blkg->refcnt)
+ __blkg_release(blkg);
+}
+
#else
struct blkio_group {
static inline struct blkio_group *pdata_to_blkg(void *pdata,
struct blkio_policy_type *pol) { return NULL; }
static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
+static inline void blkg_get(struct blkio_group *blkg) { }
+static inline void blkg_put(struct blkio_group *blkg) { }
#endif
*/
unsigned long disptime;
- atomic_t ref;
unsigned int flags;
/* Two lists for READ and WRITE */
/* Some throttle limits got updated for the group */
int limits_changed;
-
- struct rcu_head rcu_head;
};
struct throtl_data
return td->nr_queued[0] + td->nr_queued[1];
}
-static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
-{
- atomic_inc(&tg->ref);
- return tg;
-}
-
-static void throtl_free_tg(struct rcu_head *head)
-{
- struct throtl_grp *tg = container_of(head, struct throtl_grp, rcu_head);
- struct blkio_group *blkg = tg_to_blkg(tg);
-
- free_percpu(blkg->stats_cpu);
- kfree(blkg->pd);
- kfree(blkg);
-}
-
-static void throtl_put_tg(struct throtl_grp *tg)
-{
- struct blkio_group *blkg = tg_to_blkg(tg);
-
- BUG_ON(atomic_read(&tg->ref) <= 0);
- if (!atomic_dec_and_test(&tg->ref))
- return;
-
- /* release the extra blkcg reference this blkg has been holding */
- css_put(&blkg->blkcg->css);
-
- /*
- * A group is freed in rcu manner. But having an rcu lock does not
- * mean that one can access all the fields of blkg and assume these
- * are valid. For example, don't try to follow throtl_data and
- * request queue links.
- *
- * Having a reference to blkg under an rcu allows acess to only
- * values local to groups like group stats and group rate limits
- */
- call_rcu(&tg->rcu_head, throtl_free_tg);
-}
-
static void throtl_init_blkio_group(struct blkio_group *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
tg->bps[WRITE] = -1;
tg->iops[READ] = -1;
tg->iops[WRITE] = -1;
-
- /*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * request queue which will be dropped by either request queue
- * exit or cgroup deletion path depending on who is exiting first.
- */
- atomic_set(&tg->ref, 1);
}
static void throtl_link_blkio_group(struct request_queue *q,
bio_list_add(&tg->bio_lists[rw], bio);
/* Take a bio reference on tg */
- throtl_ref_get_tg(tg);
+ blkg_get(tg_to_blkg(tg));
tg->nr_queued[rw]++;
td->nr_queued[rw]++;
throtl_enqueue_tg(td, tg);
bio = bio_list_pop(&tg->bio_lists[rw]);
tg->nr_queued[rw]--;
- /* Drop bio reference on tg */
- throtl_put_tg(tg);
+ /* Drop bio reference on blkg */
+ blkg_put(tg_to_blkg(tg));
BUG_ON(td->nr_queued[rw] <= 0);
td->nr_queued[rw]--;
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
- throtl_put_tg(tg);
+ blkg_put(tg_to_blkg(tg));
td->nr_undestroyed_grps--;
}
enum wl_prio_t saved_serving_prio;
#ifdef CONFIG_CFQ_GROUP_IOSCHED
struct hlist_node cfqd_node;
- int ref;
#endif
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
cfq_init_cfqg_base(cfqg);
cfqg->weight = blkg->blkcg->weight;
-
- /*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * elevator which will be dropped by either elevator exit
- * or cgroup deletion path depending on who is exiting first.
- */
- cfqg->ref = 1;
}
/*
return cfqg;
}
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
- cfqg->ref++;
- return cfqg;
-}
-
static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
/* Currently, all async queues are mapped to root group */
cfqq->cfqg = cfqg;
/* cfqq reference on cfqg */
- cfqq->cfqg->ref++;
-}
-
-static void cfq_put_cfqg(struct cfq_group *cfqg)
-{
- struct blkio_group *blkg = cfqg_to_blkg(cfqg);
- struct cfq_rb_root *st;
- int i, j;
-
- BUG_ON(cfqg->ref <= 0);
- cfqg->ref--;
- if (cfqg->ref)
- return;
-
- /* release the extra blkcg reference this blkg has been holding */
- css_put(&blkg->blkcg->css);
-
- for_each_cfqg_st(cfqg, i, j, st)
- BUG_ON(!RB_EMPTY_ROOT(&st->rb));
- free_percpu(blkg->stats_cpu);
- kfree(blkg->pd);
- kfree(blkg);
+ blkg_get(cfqg_to_blkg(cfqg));
}
static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
- cfq_put_cfqg(cfqg);
+ blkg_put(cfqg_to_blkg(cfqg));
}
static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
return cfqd->root_group;
}
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
- return cfqg;
-}
-
static inline void
cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
cfqq->cfqg = cfqg;
}
static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
-static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
#endif /* GROUP_IOSCHED */
BUG_ON(cfq_cfqq_on_rr(cfqq));
kmem_cache_free(cfq_pool, cfqq);
- cfq_put_cfqg(cfqg);
+ blkg_put(cfqg_to_blkg(cfqg));
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
cfqq->allocated[rw]--;
/* Put down rq reference on cfqg */
- cfq_put_cfqg(RQ_CFQG(rq));
+ blkg_put(cfqg_to_blkg(RQ_CFQG(rq)));
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
cfqq->allocated[rw]++;
cfqq->ref++;
+ blkg_get(cfqg_to_blkg(cfqq->cfqg));
rq->elv.priv[0] = cfqq;
- rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
+ rq->elv.priv[1] = cfqq->cfqg;
spin_unlock_irq(q->queue_lock);
return 0;
}
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
cfqd->oom_cfqq.ref++;
+
+ spin_lock_irq(q->queue_lock);
cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
- cfq_put_cfqg(cfqd->root_group);
+ blkg_put(cfqg_to_blkg(cfqd->root_group));
+ spin_unlock_irq(q->queue_lock);
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;