From: Linus Torvalds Date: Sun, 15 Jan 2012 20:24:45 +0000 (-0800) Subject: Merge branch 'for-3.3/core' of git://git.kernel.dk/linux-block X-Git-Tag: Ubuntu-5.2.0-15.16~18890 X-Git-Url: https://git.proxmox.com/?a=commitdiff_plain;h=b3c9dd182ed3bdcdaf0e42625a35924b0497afdc;p=mirror_ubuntu-eoan-kernel.git Merge branch 'for-3.3/core' of git://git.kernel.dk/linux-block * 'for-3.3/core' of git://git.kernel.dk/linux-block: (37 commits) Revert "block: recursive merge requests" block: Stop using macro stubs for the bio data integrity calls blockdev: convert some macros to static inlines fs: remove unneeded plug in mpage_readpages() block: Add BLKROTATIONAL ioctl block: Introduce blk_set_stacking_limits function block: remove WARN_ON_ONCE() in exit_io_context() block: an exiting task should be allowed to create io_context block: ioc_cgroup_changed() needs to be exported block: recursive merge requests block, cfq: fix empty queue crash caused by request merge block, cfq: move icq creation and rq->elv.icq association to block core block, cfq: restructure io_cq creation path for io_context interface cleanup block, cfq: move io_cq exit/release to blk-ioc.c block, cfq: move icq cache management to block core block, cfq: move io_cq lookup to blk-ioc.c block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq block, cfq: reorganize cfq_io_context into generic and cfq specific parts block: remove elevator_queue->ops block: reorder elevator switch sequence ... Fix up conflicts in: - block/blk-cgroup.c Switch from can_attach_task to can_attach - block/cfq-iosched.c conflict with now removed cic index changes (we now use q->id instead) --- b3c9dd182ed3bdcdaf0e42625a35924b0497afdc diff --cc block/blk-cgroup.c index b8c143d68ee0,278869358049..fa8f26309444 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@@ -1648,18 -1641,15 +1648,19 @@@ static int blkiocg_can_attach(struct cg return ret; } -static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk) +static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct cgroup_taskset *tset) { + struct task_struct *task; struct io_context *ioc; - /* we don't lose anything even if ioc allocation fails */ - ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE); - if (ioc) { - ioc_cgroup_changed(ioc); - put_io_context(ioc, NULL); + cgroup_taskset_for_each(task, cgrp, tset) { - task_lock(task); - ioc = task->io_context; - if (ioc) - ioc->cgroup_changed = 1; - task_unlock(task); ++ /* we don't lose anything even if ioc allocation fails */ ++ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); ++ if (ioc) { ++ ioc_cgroup_changed(ioc); ++ put_io_context(ioc, NULL); ++ } } } diff --cc block/blk-core.c index 15de223c7f93,8fbdac7010bb..e6c05a97ee2b --- a/block/blk-core.c +++ b/block/blk-core.c @@@ -366,19 -369,23 +369,30 @@@ void blk_drain_queue(struct request_que if (drain_all) blk_throtl_drain(q); - __blk_run_queue(q); + /* + * This function might be called on a queue which failed + * driver init after queue creation. Some drivers + * (e.g. fd) get unhappy in such cases. Kick queue iff + * dispatch queue has something on it. + */ + if (!list_empty(&q->queue_head)) + __blk_run_queue(q); - if (drain_all) - nr_rqs = q->rq.count[0] + q->rq.count[1]; - else - nr_rqs = q->rq.elvpriv; + drain |= q->rq.elvpriv; + + /* + * Unfortunately, requests are queued at and tracked from + * multiple places and there's no single counter which can + * be drained. Check all the queues and counters. + */ + if (drain_all) { + drain |= !list_empty(&q->queue_head); + for (i = 0; i < 2; i++) { + drain |= q->rq.count[i]; + drain |= q->in_flight[i]; + drain |= !list_empty(&q->flush_queue[i]); + } + } spin_unlock_irq(q->queue_lock); @@@ -474,18 -485,13 +492,14 @@@ struct request_queue *blk_alloc_queue_n q->backing_dev_info.state = 0; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.name = "block"; + q->node = node_id; err = bdi_init(&q->backing_dev_info); - if (err) { - kmem_cache_free(blk_requestq_cachep, q); - return NULL; - } + if (err) + goto fail_id; - if (blk_throtl_init(q)) { - kmem_cache_free(blk_requestq_cachep, q); - return NULL; - } + if (blk_throtl_init(q)) + goto fail_id; setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); @@@ -603,16 -625,16 +624,16 @@@ blk_init_allocated_queue(struct request return NULL; } -EXPORT_SYMBOL(blk_init_allocated_queue_node); +EXPORT_SYMBOL(blk_init_allocated_queue); - int blk_get_queue(struct request_queue *q) + bool blk_get_queue(struct request_queue *q) { - if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { - kobject_get(&q->kobj); - return 0; + if (likely(!blk_queue_dead(q))) { + __blk_get_queue(q); + return true; } - return 1; + return false; } EXPORT_SYMBOL(blk_get_queue);