Pull block IO core bits from Jens Axboe:
"Below are the core block IO bits for 3.9. It was delayed a few days
since my workstation kept crashing every 2-8h after pulling it into
current -git, but turns out it is a bug in the new pstate code (divide
by zero, will report separately). In any case, it contains:
- The big cfq/blkcg update from Tejun and and Vivek.
- Additional block and writeback tracepoints from Tejun.
- Improvement of the should sort (based on queues) logic in the plug
flushing.
- _io() variants of the wait_for_completion() interface, using
io_schedule() instead of schedule() to contribute to io wait
properly.
- Various little fixes.
You'll get two trivial merge conflicts, which should be easy enough to
fix up"
Fix up the trivial conflicts due to hlist traversal cleanups (commit
b67bfe0d42ca: "hlist: drop the node parameter from iterators").
* 'for-3.9/core' of git://git.kernel.dk/linux-block: (39 commits)
block: remove redundant check to bd_openers()
block: use i_size_write() in bd_set_size()
cfq: fix lock imbalance with failed allocations
drivers/block/swim3.c: fix null pointer dereference
block: don't select PERCPU_RWSEM
block: account iowait time when waiting for completion of IO request
sched: add wait_for_completion_io[_timeout]
writeback: add more tracepoints
block: add block_{touch|dirty}_buffer tracepoint
buffer: make touch_buffer() an exported function
block: add @req to bio_{front|back}_merge tracepoints
block: add missing block_bio_complete() tracepoint
block: Remove should_sort judgement when flush blk_plug
block,elevator: use new hashtable implementation
cfq-iosched: add hierarchical cfq_group statistics
cfq-iosched: collect stats from dead cfqgs
cfq-iosched: separate out cfqg_stats_reset() from cfq_pd_reset_stats()
blkcg: make blkcg_print_blkgs() grab q locks instead of blkcg lock
block: RCU free request_queue
blkcg: implement blkg_[rw]stat_recursive_sum() and blkg_[rw]stat_merge()
...
bool show_total)
{
struct blkcg_gq *blkg;
- struct hlist_node *n;
u64 total = 0;
- spin_lock_irq(&blkcg->lock);
- hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node)
+ rcu_read_lock();
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
++ hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ spin_lock_irq(blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
- spin_unlock_irq(&blkcg->lock);
+ spin_unlock_irq(blkg->q->queue_lock);
+ }
+ rcu_read_unlock();
if (show_total)
seq_printf(sf, "Total %llu\n", (unsigned long long)total);
return -EINVAL;
spin_lock_irq(&blkcg->lock);
- blkcg->cfq_weight = (unsigned int)val;
+
+ if (!is_leaf_weight)
+ blkcg->cfq_weight = val;
+ else
+ blkcg->cfq_leaf_weight = val;
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct cfq_group *cfqg = blkg_to_cfqg(blkg);
- if (cfqg && !cfqg->dev_weight)
- cfqg->new_weight = blkcg->cfq_weight;
+ if (!cfqg)
+ continue;
+
+ if (!is_leaf_weight) {
+ if (!cfqg->dev_weight)
+ cfqg->new_weight = blkcg->cfq_weight;
+ } else {
+ if (!cfqg->dev_leaf_weight)
+ cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
+ }
}
spin_unlock_irq(&blkcg->lock);
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
struct elevator_queue *e = q->elevator;
- struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
- struct hlist_node *entry, *next;
+ struct hlist_node *next;
struct request *rq;
- hlist_for_each_entry_safe(rq, next, hash_list, hash) {
- hash_for_each_possible_safe(e->hash, rq, entry, next, hash, offset) {
++ hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
BUG_ON(!ELV_ON_HASH(rq));
if (unlikely(!rq_mergeable(rq))) {