]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/bfq-iosched.c
block, bfq: split bfq-iosched.c into multiple source files
[mirror_ubuntu-bionic-kernel.git] / block / bfq-iosched.c
index deb1f21c535fddb180d4f896063f0798cd18776f..6d14f18c0d45a9188068d8de39ec748d6e1c042e 100644 (file)
 #include "blk-mq.h"
 #include "blk-mq-tag.h"
 #include "blk-mq-sched.h"
-#include <linux/blktrace_api.h>
-#include <linux/hrtimer.h>
-#include <linux/blk-cgroup.h>
-
-#define BFQ_IOPRIO_CLASSES     3
-#define BFQ_CL_IDLE_TIMEOUT    (HZ/5)
-
-#define BFQ_MIN_WEIGHT                 1
-#define BFQ_MAX_WEIGHT                 1000
-#define BFQ_WEIGHT_CONVERSION_COEFF    10
-
-#define BFQ_DEFAULT_QUEUE_IOPRIO       4
-
-#define BFQ_WEIGHT_LEGACY_DFL  100
-#define BFQ_DEFAULT_GRP_IOPRIO 0
-#define BFQ_DEFAULT_GRP_CLASS  IOPRIO_CLASS_BE
-
-/*
- * Soft real-time applications are extremely more latency sensitive
- * than interactive ones. Over-raise the weight of the former to
- * privilege them against the latter.
- */
-#define BFQ_SOFTRT_WEIGHT_FACTOR       100
-
-struct bfq_entity;
-
-/**
- * struct bfq_service_tree - per ioprio_class service tree.
- *
- * Each service tree represents a B-WF2Q+ scheduler on its own.  Each
- * ioprio_class has its own independent scheduler, and so its own
- * bfq_service_tree.  All the fields are protected by the queue lock
- * of the containing bfqd.
- */
-struct bfq_service_tree {
-       /* tree for active entities (i.e., those backlogged) */
-       struct rb_root active;
-       /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/
-       struct rb_root idle;
-
-       /* idle entity with minimum F_i */
-       struct bfq_entity *first_idle;
-       /* idle entity with maximum F_i */
-       struct bfq_entity *last_idle;
-
-       /* scheduler virtual time */
-       u64 vtime;
-       /* scheduler weight sum; active and idle entities contribute to it */
-       unsigned long wsum;
-};
-
-/**
- * struct bfq_sched_data - multi-class scheduler.
- *
- * bfq_sched_data is the basic scheduler queue.  It supports three
- * ioprio_classes, and can be used either as a toplevel queue or as an
- * intermediate queue on a hierarchical setup.  @next_in_service
- * points to the active entity of the sched_data service trees that
- * will be scheduled next. It is used to reduce the number of steps
- * needed for each hierarchical-schedule update.
- *
- * The supported ioprio_classes are the same as in CFQ, in descending
- * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
- * Requests from higher priority queues are served before all the
- * requests from lower priority queues; among requests of the same
- * queue requests are served according to B-WF2Q+.
- * All the fields are protected by the queue lock of the containing bfqd.
- */
-struct bfq_sched_data {
-       /* entity in service */
-       struct bfq_entity *in_service_entity;
-       /* head-of-line entity (see comments above) */
-       struct bfq_entity *next_in_service;
-       /* array of service trees, one per ioprio_class */
-       struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
-       /* last time CLASS_IDLE was served */
-       unsigned long bfq_class_idle_last_service;
-
-};
-
-/**
- * struct bfq_entity - schedulable entity.
- *
- * A bfq_entity is used to represent either a bfq_queue (leaf node in the
- * cgroup hierarchy) or a bfq_group into the upper level scheduler.  Each
- * entity belongs to the sched_data of the parent group in the cgroup
- * hierarchy.  Non-leaf entities have also their own sched_data, stored
- * in @my_sched_data.
- *
- * Each entity stores independently its priority values; this would
- * allow different weights on different devices, but this
- * functionality is not exported to userspace by now.  Priorities and
- * weights are updated lazily, first storing the new values into the
- * new_* fields, then setting the @prio_changed flag.  As soon as
- * there is a transition in the entity state that allows the priority
- * update to take place the effective and the requested priority
- * values are synchronized.
- *
- * Unless cgroups are used, the weight value is calculated from the
- * ioprio to export the same interface as CFQ.  When dealing with
- * ``well-behaved'' queues (i.e., queues that do not spend too much
- * time to consume their budget and have true sequential behavior, and
- * when there are no external factors breaking anticipation) the
- * relative weights at each level of the cgroups hierarchy should be
- * guaranteed.  All the fields are protected by the queue lock of the
- * containing bfqd.
- */
-struct bfq_entity {
-       /* service_tree member */
-       struct rb_node rb_node;
-
-       /*
-        * Flag, true if the entity is on a tree (either the active or
-        * the idle one of its service_tree) or is in service.
-        */
-       bool on_st;
-
-       /* B-WF2Q+ start and finish timestamps [sectors/weight] */
-       u64 start, finish;
-
-       /* tree the entity is enqueued into; %NULL if not on a tree */
-       struct rb_root *tree;
-
-       /*
-        * minimum start time of the (active) subtree rooted at this
-        * entity; used for O(log N) lookups into active trees
-        */
-       u64 min_start;
-
-       /* amount of service received during the last service slot */
-       int service;
-
-       /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
-       int budget;
-
-       /* weight of the queue */
-       int weight;
-       /* next weight if a change is in progress */
-       int new_weight;
-
-       /* original weight, used to implement weight boosting */
-       int orig_weight;
-
-       /* parent entity, for hierarchical scheduling */
-       struct bfq_entity *parent;
-
-       /*
-        * For non-leaf nodes in the hierarchy, the associated
-        * scheduler queue, %NULL on leaf nodes.
-        */
-       struct bfq_sched_data *my_sched_data;
-       /* the scheduler queue this entity belongs to */
-       struct bfq_sched_data *sched_data;
-
-       /* flag, set to request a weight, ioprio or ioprio_class change  */
-       int prio_changed;
-};
-
-struct bfq_group;
-
-/**
- * struct bfq_ttime - per process thinktime stats.
- */
-struct bfq_ttime {
-       /* completion time of the last request */
-       u64 last_end_request;
-
-       /* total process thinktime */
-       u64 ttime_total;
-       /* number of thinktime samples */
-       unsigned long ttime_samples;
-       /* average process thinktime */
-       u64 ttime_mean;
-};
-
-/**
- * struct bfq_queue - leaf schedulable entity.
- *
- * A bfq_queue is a leaf request queue; it can be associated with an
- * io_context or more, if it is async. @cgroup holds a reference to
- * the cgroup, to be sure that it does not disappear while a bfqq
- * still references it (mostly to avoid races between request issuing
- * and task migration followed by cgroup destruction).  All the fields
- * are protected by the queue lock of the containing bfqd.
- */
-struct bfq_queue {
-       /* reference counter */
-       int ref;
-       /* parent bfq_data */
-       struct bfq_data *bfqd;
-
-       /* current ioprio and ioprio class */
-       unsigned short ioprio, ioprio_class;
-       /* next ioprio and ioprio class if a change is in progress */
-       unsigned short new_ioprio, new_ioprio_class;
-
-       /* sorted list of pending requests */
-       struct rb_root sort_list;
-       /* if fifo isn't expired, next request to serve */
-       struct request *next_rq;
-       /* number of sync and async requests queued */
-       int queued[2];
-       /* number of requests currently allocated */
-       int allocated;
-       /* number of pending metadata requests */
-       int meta_pending;
-       /* fifo list of requests in sort_list */
-       struct list_head fifo;
-
-       /* entity representing this queue in the scheduler */
-       struct bfq_entity entity;
-
-       /* maximum budget allowed from the feedback mechanism */
-       int max_budget;
-       /* budget expiration (in jiffies) */
-       unsigned long budget_timeout;
-
-       /* number of requests on the dispatch list or inside driver */
-       int dispatched;
-
-       /* status flags */
-       unsigned long flags;
-
-       /* node for active/idle bfqq list inside parent bfqd */
-       struct list_head bfqq_list;
-
-       /* associated @bfq_ttime struct */
-       struct bfq_ttime ttime;
-
-       /* bit vector: a 1 for each seeky requests in history */
-       u32 seek_history;
-       /* position of the last request enqueued */
-       sector_t last_request_pos;
-
-       /* Number of consecutive pairs of request completion and
-        * arrival, such that the queue becomes idle after the
-        * completion, but the next request arrives within an idle
-        * time slice; used only if the queue's IO_bound flag has been
-        * cleared.
-        */
-       unsigned int requests_within_timer;
-
-       /* pid of the process owning the queue, used for logging purposes */
-       pid_t pid;
-
-       /* current maximum weight-raising time for this queue */
-       unsigned long wr_cur_max_time;
-       /*
-        * Minimum time instant such that, only if a new request is
-        * enqueued after this time instant in an idle @bfq_queue with
-        * no outstanding requests, then the task associated with the
-        * queue it is deemed as soft real-time (see the comments on
-        * the function bfq_bfqq_softrt_next_start())
-        */
-       unsigned long soft_rt_next_start;
-       /*
-        * Start time of the current weight-raising period if
-        * the @bfq-queue is being weight-raised, otherwise
-        * finish time of the last weight-raising period.
-        */
-       unsigned long last_wr_start_finish;
-       /* factor by which the weight of this queue is multiplied */
-       unsigned int wr_coeff;
-       /*
-        * Time of the last transition of the @bfq_queue from idle to
-        * backlogged.
-        */
-       unsigned long last_idle_bklogged;
-       /*
-        * Cumulative service received from the @bfq_queue since the
-        * last transition from idle to backlogged.
-        */
-       unsigned long service_from_backlogged;
-       /*
-        * Value of wr start time when switching to soft rt
-        */
-       unsigned long wr_start_at_switch_to_srt;
-};
-
-/**
- * struct bfq_io_cq - per (request_queue, io_context) structure.
- */
-struct bfq_io_cq {
-       /* associated io_cq structure */
-       struct io_cq icq; /* must be the first member */
-       /* array of two process queues, the sync and the async */
-       struct bfq_queue *bfqq[2];
-       /* per (request_queue, blkcg) ioprio */
-       int ioprio;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-       uint64_t blkcg_serial_nr; /* the current blkcg serial */
-#endif
-};
-
-enum bfq_device_speed {
-       BFQ_BFQD_FAST,
-       BFQ_BFQD_SLOW,
-};
-
-/**
- * struct bfq_data - per-device data structure.
- *
- * All the fields are protected by @lock.
- */
-struct bfq_data {
-       /* device request queue */
-       struct request_queue *queue;
-       /* dispatch queue */
-       struct list_head dispatch;
-
-       /* root bfq_group for the device */
-       struct bfq_group *root_group;
-
-       /*
-        * Number of bfq_queues containing requests (including the
-        * queue in service, even if it is idling).
-        */
-       int busy_queues;
-       /* number of weight-raised busy @bfq_queues */
-       int wr_busy_queues;
-       /* number of queued requests */
-       int queued;
-       /* number of requests dispatched and waiting for completion */
-       int rq_in_driver;
-
-       /*
-        * Maximum number of requests in driver in the last
-        * @hw_tag_samples completed requests.
-        */
-       int max_rq_in_driver;
-       /* number of samples used to calculate hw_tag */
-       int hw_tag_samples;
-       /* flag set to one if the driver is showing a queueing behavior */
-       int hw_tag;
-
-       /* number of budgets assigned */
-       int budgets_assigned;
-
-       /*
-        * Timer set when idling (waiting) for the next request from
-        * the queue in service.
-        */
-       struct hrtimer idle_slice_timer;
-
-       /* bfq_queue in service */
-       struct bfq_queue *in_service_queue;
-       /* bfq_io_cq (bic) associated with the @in_service_queue */
-       struct bfq_io_cq *in_service_bic;
-
-       /* on-disk position of the last served request */
-       sector_t last_position;
-
-       /* time of last request completion (ns) */
-       u64 last_completion;
-
-       /* time of first rq dispatch in current observation interval (ns) */
-       u64 first_dispatch;
-       /* time of last rq dispatch in current observation interval (ns) */
-       u64 last_dispatch;
-
-       /* beginning of the last budget */
-       ktime_t last_budget_start;
-       /* beginning of the last idle slice */
-       ktime_t last_idling_start;
-
-       /* number of samples in current observation interval */
-       int peak_rate_samples;
-       /* num of samples of seq dispatches in current observation interval */
-       u32 sequential_samples;
-       /* total num of sectors transferred in current observation interval */
-       u64 tot_sectors_dispatched;
-       /* max rq size seen during current observation interval (sectors) */
-       u32 last_rq_max_size;
-       /* time elapsed from first dispatch in current observ. interval (us) */
-       u64 delta_from_first;
-       /*
-        * Current estimate of the device peak rate, measured in
-        * [BFQ_RATE_SHIFT * sectors/usec]. The left-shift by
-        * BFQ_RATE_SHIFT is performed to increase precision in
-        * fixed-point calculations.
-        */
-       u32 peak_rate;
-
-       /* maximum budget allotted to a bfq_queue before rescheduling */
-       int bfq_max_budget;
-
-       /* list of all the bfq_queues active on the device */
-       struct list_head active_list;
-       /* list of all the bfq_queues idle on the device */
-       struct list_head idle_list;
-
-       /*
-        * Timeout for async/sync requests; when it fires, requests
-        * are served in fifo order.
-        */
-       u64 bfq_fifo_expire[2];
-       /* weight of backward seeks wrt forward ones */
-       unsigned int bfq_back_penalty;
-       /* maximum allowed backward seek */
-       unsigned int bfq_back_max;
-       /* maximum idling time */
-       u32 bfq_slice_idle;
-
-       /* user-configured max budget value (0 for auto-tuning) */
-       int bfq_user_max_budget;
-       /*
-        * Timeout for bfq_queues to consume their budget; used to
-        * prevent seeky queues from imposing long latencies to
-        * sequential or quasi-sequential ones (this also implies that
-        * seeky queues cannot receive guarantees in the service
-        * domain; after a timeout they are charged for the time they
-        * have been in service, to preserve fairness among them, but
-        * without service-domain guarantees).
-        */
-       unsigned int bfq_timeout;
-
-       /*
-        * Number of consecutive requests that must be issued within
-        * the idle time slice to set again idling to a queue which
-        * was marked as non-I/O-bound (see the definition of the
-        * IO_bound flag for further details).
-        */
-       unsigned int bfq_requests_within_timer;
-
-       /*
-        * Force device idling whenever needed to provide accurate
-        * service guarantees, without caring about throughput
-        * issues. CAVEAT: this may even increase latencies, in case
-        * of useless idling for processes that did stop doing I/O.
-        */
-       bool strict_guarantees;
-
-       /* if set to true, low-latency heuristics are enabled */
-       bool low_latency;
-       /*
-        * Maximum factor by which the weight of a weight-raised queue
-        * is multiplied.
-        */
-       unsigned int bfq_wr_coeff;
-       /* maximum duration of a weight-raising period (jiffies) */
-       unsigned int bfq_wr_max_time;
-
-       /* Maximum weight-raising duration for soft real-time processes */
-       unsigned int bfq_wr_rt_max_time;
-       /*
-        * Minimum idle period after which weight-raising may be
-        * reactivated for a queue (in jiffies).
-        */
-       unsigned int bfq_wr_min_idle_time;
-       /*
-        * Minimum period between request arrivals after which
-        * weight-raising may be reactivated for an already busy async
-        * queue (in jiffies).
-        */
-       unsigned long bfq_wr_min_inter_arr_async;
-
-       /* Max service-rate for a soft real-time queue, in sectors/sec */
-       unsigned int bfq_wr_max_softrt_rate;
-       /*
-        * Cached value of the product R*T, used for computing the
-        * maximum duration of weight raising automatically.
-        */
-       u64 RT_prod;
-       /* device-speed class for the low-latency heuristic */
-       enum bfq_device_speed device_speed;
-
-       /* fallback dummy bfqq for extreme OOM conditions */
-       struct bfq_queue oom_bfqq;
-
-       spinlock_t lock;
-
-       /*
-        * bic associated with the task issuing current bio for
-        * merging. This and the next field are used as a support to
-        * be able to perform the bic lookup, needed by bio-merge
-        * functions, before the scheduler lock is taken, and thus
-        * avoid taking the request-queue lock while the scheduler
-        * lock is being held.
-        */
-       struct bfq_io_cq *bio_bic;
-       /* bfqq associated with the task issuing current bio for merging */
-       struct bfq_queue *bio_bfqq;
-};
-
-enum bfqq_state_flags {
-       BFQQF_busy = 0,         /* has requests or is in service */
-       BFQQF_wait_request,     /* waiting for a request */
-       BFQQF_non_blocking_wait_rq, /*
-                                    * waiting for a request
-                                    * without idling the device
-                                    */
-       BFQQF_fifo_expire,      /* FIFO checked in this slice */
-       BFQQF_idle_window,      /* slice idling enabled */
-       BFQQF_sync,             /* synchronous queue */
-       BFQQF_IO_bound,         /*
-                                * bfqq has timed-out at least once
-                                * having consumed at most 2/10 of
-                                * its budget
-                                */
-       BFQQF_softrt_update,    /*
-                                * may need softrt-next-start
-                                * update
-                                */
-};
+#include "bfq-iosched.h"
 
 #define BFQ_BFQQ_FNS(name)                                             \
-static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)               \
+void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)                      \
 {                                                                      \
        __set_bit(BFQQF_##name, &(bfqq)->flags);                        \
 }                                                                      \
-static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)              \
+void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)                     \
 {                                                                      \
        __clear_bit(BFQQF_##name, &(bfqq)->flags);              \
 }                                                                      \
-static int bfq_bfqq_##name(const struct bfq_queue *bfqq)               \
+int bfq_bfqq_##name(const struct bfq_queue *bfqq)                      \
 {                                                                      \
        return test_bit(BFQQF_##name, &(bfqq)->flags);          \
 }
 
+BFQ_BFQQ_FNS(just_created);
 BFQ_BFQQ_FNS(busy);
 BFQ_BFQQ_FNS(wait_request);
 BFQ_BFQQ_FNS(non_blocking_wait_rq);
@@ -628,198 +126,11 @@ BFQ_BFQQ_FNS(fifo_expire);
 BFQ_BFQQ_FNS(idle_window);
 BFQ_BFQQ_FNS(sync);
 BFQ_BFQQ_FNS(IO_bound);
+BFQ_BFQQ_FNS(in_large_burst);
+BFQ_BFQQ_FNS(coop);
+BFQ_BFQQ_FNS(split_coop);
 BFQ_BFQQ_FNS(softrt_update);
-#undef BFQ_BFQQ_FNS
-
-/* Logging facilities. */
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
-static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
-
-#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do {                    \
-       char __pbuf[128];                                               \
-                                                                       \
-       blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
-       blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
-                       bfq_bfqq_sync((bfqq)) ? 'S' : 'A',              \
-                         __pbuf, ##args);                              \
-} while (0)
-
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {                    \
-       char __pbuf[128];                                               \
-                                                                       \
-       blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf));          \
-       blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args);    \
-} while (0)
-
-#else /* CONFIG_BFQ_GROUP_IOSCHED */
-
-#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
-       blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid,   \
-                       bfq_bfqq_sync((bfqq)) ? 'S' : 'A',              \
-                               ##args)
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...)         do {} while (0)
-
-#endif /* CONFIG_BFQ_GROUP_IOSCHED */
-
-#define bfq_log(bfqd, fmt, args...) \
-       blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
-
-/* Expiration reasons. */
-enum bfqq_expiration {
-       BFQQE_TOO_IDLE = 0,             /*
-                                        * queue has been idling for
-                                        * too long
-                                        */
-       BFQQE_BUDGET_TIMEOUT,   /* budget took too long to be used */
-       BFQQE_BUDGET_EXHAUSTED, /* budget consumed */
-       BFQQE_NO_MORE_REQUESTS, /* the queue has no more requests */
-       BFQQE_PREEMPTED         /* preemption in progress */
-};
-
-struct bfqg_stats {
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-       /* number of ios merged */
-       struct blkg_rwstat              merged;
-       /* total time spent on device in ns, may not be accurate w/ queueing */
-       struct blkg_rwstat              service_time;
-       /* total time spent waiting in scheduler queue in ns */
-       struct blkg_rwstat              wait_time;
-       /* number of IOs queued up */
-       struct blkg_rwstat              queued;
-       /* total disk time and nr sectors dispatched by this group */
-       struct blkg_stat                time;
-       /* sum of number of ios queued across all samples */
-       struct blkg_stat                avg_queue_size_sum;
-       /* count of samples taken for average */
-       struct blkg_stat                avg_queue_size_samples;
-       /* how many times this group has been removed from service tree */
-       struct blkg_stat                dequeue;
-       /* total time spent waiting for it to be assigned a timeslice. */
-       struct blkg_stat                group_wait_time;
-       /* time spent idling for this blkcg_gq */
-       struct blkg_stat                idle_time;
-       /* total time with empty current active q with other requests queued */
-       struct blkg_stat                empty_time;
-       /* fields after this shouldn't be cleared on stat reset */
-       uint64_t                        start_group_wait_time;
-       uint64_t                        start_idle_time;
-       uint64_t                        start_empty_time;
-       uint16_t                        flags;
-#endif /* CONFIG_BFQ_GROUP_IOSCHED */
-};
-
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-
-/*
- * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
- *
- * @ps: @blkcg_policy_storage that this structure inherits
- * @weight: weight of the bfq_group
- */
-struct bfq_group_data {
-       /* must be the first member */
-       struct blkcg_policy_data pd;
-
-       unsigned int weight;
-};
-
-/**
- * struct bfq_group - per (device, cgroup) data structure.
- * @entity: schedulable entity to insert into the parent group sched_data.
- * @sched_data: own sched_data, to contain child entities (they may be
- *              both bfq_queues and bfq_groups).
- * @bfqd: the bfq_data for the device this group acts upon.
- * @async_bfqq: array of async queues for all the tasks belonging to
- *              the group, one queue per ioprio value per ioprio_class,
- *              except for the idle class that has only one queue.
- * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
- * @my_entity: pointer to @entity, %NULL for the toplevel group; used
- *             to avoid too many special cases during group creation/
- *             migration.
- * @stats: stats for this bfqg.
- *
- * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
- * there is a set of bfq_groups, each one collecting the lower-level
- * entities belonging to the group that are acting on the same device.
- *
- * Locking works as follows:
- *    o @bfqd is protected by the queue lock, RCU is used to access it
- *      from the readers.
- *    o All the other fields are protected by the @bfqd queue lock.
- */
-struct bfq_group {
-       /* must be the first member */
-       struct blkg_policy_data pd;
-
-       struct bfq_entity entity;
-       struct bfq_sched_data sched_data;
-
-       void *bfqd;
-
-       struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
-       struct bfq_queue *async_idle_bfqq;
-
-       struct bfq_entity *my_entity;
-
-       struct bfqg_stats stats;
-};
-
-#else
-struct bfq_group {
-       struct bfq_sched_data sched_data;
-
-       struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
-       struct bfq_queue *async_idle_bfqq;
-
-       struct rb_root rq_pos_tree;
-};
-#endif
-
-static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
-
-static unsigned int bfq_class_idx(struct bfq_entity *entity)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-
-       return bfqq ? bfqq->ioprio_class - 1 :
-               BFQ_DEFAULT_GRP_CLASS - 1;
-}
-
-static struct bfq_service_tree *
-bfq_entity_service_tree(struct bfq_entity *entity)
-{
-       struct bfq_sched_data *sched_data = entity->sched_data;
-       unsigned int idx = bfq_class_idx(entity);
-
-       return sched_data->service_tree + idx;
-}
-
-static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
-{
-       return bic->bfqq[is_sync];
-}
-
-static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq,
-                        bool is_sync)
-{
-       bic->bfqq[is_sync] = bfqq;
-}
-
-static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
-{
-       return bic->icq.q->elevator->elevator_data;
-}
-
-static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
-static void bfq_put_queue(struct bfq_queue *bfqq);
-static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
-                                      struct bio *bio, bool is_sync,
-                                      struct bfq_io_cq *bic);
-static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
-                                   struct bfq_group *bfqg);
-static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
-static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
+#undef BFQ_BFQQ_FNS                                            \
 
 /* Expiration time of sync (0) and async (1) requests, in ns. */
 static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
@@ -847,7 +158,7 @@ static const int bfq_default_max_budget = 16 * 1024;
 static const int bfq_async_charge_factor = 10;
 
 /* Default timeout values, in jiffies, approximating CFQ defaults. */
-static const int bfq_timeout = HZ / 8;
+const int bfq_timeout = HZ / 8;
 
 static struct kmem_cache *bfq_pool;
 
@@ -916,2721 +227,76 @@ static int R_slow[2] = {1000, 10700};
 static int R_fast[2] = {14000, 33000};
 /*
  * To improve readability, a conversion function is used to initialize the
- * following arrays, which entails that they can be initialized only in a
- * function.
- */
-static int T_slow[2];
-static int T_fast[2];
-static int device_speed_thresh[2];
-
-#define BFQ_SERVICE_TREE_INIT  ((struct bfq_service_tree)              \
-                               { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
-
-#define RQ_BIC(rq)             ((struct bfq_io_cq *) (rq)->elv.priv[0])
-#define RQ_BFQQ(rq)            ((rq)->elv.priv[1])
-
-/**
- * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
- * @icq: the iocontext queue.
- */
-static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
-{
-       /* bic->icq is the first member, %NULL will convert to %NULL */
-       return container_of(icq, struct bfq_io_cq, icq);
-}
-
-/**
- * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
- * @bfqd: the lookup key.
- * @ioc: the io_context of the process doing I/O.
- * @q: the request queue.
- */
-static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
-                                       struct io_context *ioc,
-                                       struct request_queue *q)
-{
-       if (ioc) {
-               unsigned long flags;
-               struct bfq_io_cq *icq;
-
-               spin_lock_irqsave(q->queue_lock, flags);
-               icq = icq_to_bic(ioc_lookup_icq(ioc, q));
-               spin_unlock_irqrestore(q->queue_lock, flags);
-
-               return icq;
-       }
-
-       return NULL;
-}
-
-/*
- * Scheduler run of queue, if there are requests pending and no one in the
- * driver that will restart queueing.
- */
-static void bfq_schedule_dispatch(struct bfq_data *bfqd)
-{
-       if (bfqd->queued != 0) {
-               bfq_log(bfqd, "schedule dispatch");
-               blk_mq_run_hw_queues(bfqd->queue, true);
-       }
-}
-
-/**
- * bfq_gt - compare two timestamps.
- * @a: first ts.
- * @b: second ts.
- *
- * Return @a > @b, dealing with wrapping correctly.
- */
-static int bfq_gt(u64 a, u64 b)
-{
-       return (s64)(a - b) > 0;
-}
-
-static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
-{
-       struct rb_node *node = tree->rb_node;
-
-       return rb_entry(node, struct bfq_entity, rb_node);
-}
-
-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd);
-
-static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
-
-/**
- * bfq_update_next_in_service - update sd->next_in_service
- * @sd: sched_data for which to perform the update.
- * @new_entity: if not NULL, pointer to the entity whose activation,
- *             requeueing or repositionig triggered the invocation of
- *             this function.
- *
- * This function is called to update sd->next_in_service, which, in
- * its turn, may change as a consequence of the insertion or
- * extraction of an entity into/from one of the active trees of
- * sd. These insertions/extractions occur as a consequence of
- * activations/deactivations of entities, with some activations being
- * 'true' activations, and other activations being requeueings (i.e.,
- * implementing the second, requeueing phase of the mechanism used to
- * reposition an entity in its active tree; see comments on
- * __bfq_activate_entity and __bfq_requeue_entity for details). In
- * both the last two activation sub-cases, new_entity points to the
- * just activated or requeued entity.
- *
- * Returns true if sd->next_in_service changes in such a way that
- * entity->parent may become the next_in_service for its parent
- * entity.
- */
-static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
-                                      struct bfq_entity *new_entity)
-{
-       struct bfq_entity *next_in_service = sd->next_in_service;
-       bool parent_sched_may_change = false;
-
-       /*
-        * If this update is triggered by the activation, requeueing
-        * or repositiong of an entity that does not coincide with
-        * sd->next_in_service, then a full lookup in the active tree
-        * can be avoided. In fact, it is enough to check whether the
-        * just-modified entity has a higher priority than
-        * sd->next_in_service, or, even if it has the same priority
-        * as sd->next_in_service, is eligible and has a lower virtual
-        * finish time than sd->next_in_service. If this compound
-        * condition holds, then the new entity becomes the new
-        * next_in_service. Otherwise no change is needed.
-        */
-       if (new_entity && new_entity != sd->next_in_service) {
-               /*
-                * Flag used to decide whether to replace
-                * sd->next_in_service with new_entity. Tentatively
-                * set to true, and left as true if
-                * sd->next_in_service is NULL.
-                */
-               bool replace_next = true;
-
-               /*
-                * If there is already a next_in_service candidate
-                * entity, then compare class priorities or timestamps
-                * to decide whether to replace sd->service_tree with
-                * new_entity.
-                */
-               if (next_in_service) {
-                       unsigned int new_entity_class_idx =
-                               bfq_class_idx(new_entity);
-                       struct bfq_service_tree *st =
-                               sd->service_tree + new_entity_class_idx;
-
-                       /*
-                        * For efficiency, evaluate the most likely
-                        * sub-condition first.
-                        */
-                       replace_next =
-                               (new_entity_class_idx ==
-                                bfq_class_idx(next_in_service)
-                                &&
-                                !bfq_gt(new_entity->start, st->vtime)
-                                &&
-                                bfq_gt(next_in_service->finish,
-                                       new_entity->finish))
-                               ||
-                               new_entity_class_idx <
-                               bfq_class_idx(next_in_service);
-               }
-
-               if (replace_next)
-                       next_in_service = new_entity;
-       } else /* invoked because of a deactivation: lookup needed */
-               next_in_service = bfq_lookup_next_entity(sd);
-
-       if (next_in_service) {
-               parent_sched_may_change = !sd->next_in_service ||
-                       bfq_update_parent_budget(next_in_service);
-       }
-
-       sd->next_in_service = next_in_service;
-
-       if (!next_in_service)
-               return parent_sched_may_change;
-
-       return parent_sched_may_change;
-}
-
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-/* both next loops stop at one of the child entities of the root group */
-#define for_each_entity(entity)        \
-       for (; entity ; entity = entity->parent)
-
-/*
- * For each iteration, compute parent in advance, so as to be safe if
- * entity is deallocated during the iteration. Such a deallocation may
- * happen as a consequence of a bfq_put_queue that frees the bfq_queue
- * containing entity.
- */
-#define for_each_entity_safe(entity, parent) \
-       for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
-
-/*
- * Returns true if this budget changes may let next_in_service->parent
- * become the next_in_service entity for its parent entity.
- */
-static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
-{
-       struct bfq_entity *bfqg_entity;
-       struct bfq_group *bfqg;
-       struct bfq_sched_data *group_sd;
-       bool ret = false;
-
-       group_sd = next_in_service->sched_data;
-
-       bfqg = container_of(group_sd, struct bfq_group, sched_data);
-       /*
-        * bfq_group's my_entity field is not NULL only if the group
-        * is not the root group. We must not touch the root entity
-        * as it must never become an in-service entity.
-        */
-       bfqg_entity = bfqg->my_entity;
-       if (bfqg_entity) {
-               if (bfqg_entity->budget > next_in_service->budget)
-                       ret = true;
-               bfqg_entity->budget = next_in_service->budget;
-       }
-
-       return ret;
-}
-
-/*
- * This function tells whether entity stops being a candidate for next
- * service, according to the following logic.
- *
- * This function is invoked for an entity that is about to be set in
- * service. If such an entity is a queue, then the entity is no longer
- * a candidate for next service (i.e, a candidate entity to serve
- * after the in-service entity is expired). The function then returns
- * true.
- */
-static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
-{
-       if (bfq_entity_to_bfqq(entity))
-               return true;
-
-       return false;
-}
-
-#else /* CONFIG_BFQ_GROUP_IOSCHED */
-/*
- * Next two macros are fake loops when cgroups support is not
- * enabled. I fact, in such a case, there is only one level to go up
- * (to reach the root group).
- */
-#define for_each_entity(entity)        \
-       for (; entity ; entity = NULL)
-
-#define for_each_entity_safe(entity, parent) \
-       for (parent = NULL; entity ; entity = parent)
-
-static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
-{
-       return false;
-}
-
-static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
-{
-       return true;
-}
-
-#endif /* CONFIG_BFQ_GROUP_IOSCHED */
-
-/*
- * Shift for timestamp calculations.  This actually limits the maximum
- * service allowed in one timestamp delta (small shift values increase it),
- * the maximum total weight that can be used for the queues in the system
- * (big shift values increase it), and the period of virtual time
- * wraparounds.
- */
-#define WFQ_SERVICE_SHIFT      22
-
-static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
-{
-       struct bfq_queue *bfqq = NULL;
-
-       if (!entity->my_sched_data)
-               bfqq = container_of(entity, struct bfq_queue, entity);
-
-       return bfqq;
-}
-
-
-/**
- * bfq_delta - map service into the virtual time domain.
- * @service: amount of service.
- * @weight: scale factor (weight of an entity or weight sum).
- */
-static u64 bfq_delta(unsigned long service, unsigned long weight)
-{
-       u64 d = (u64)service << WFQ_SERVICE_SHIFT;
-
-       do_div(d, weight);
-       return d;
-}
-
-/**
- * bfq_calc_finish - assign the finish time to an entity.
- * @entity: the entity to act upon.
- * @service: the service to be charged to the entity.
- */
-static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-
-       entity->finish = entity->start +
-               bfq_delta(service, entity->weight);
-
-       if (bfqq) {
-               bfq_log_bfqq(bfqq->bfqd, bfqq,
-                       "calc_finish: serv %lu, w %d",
-                       service, entity->weight);
-               bfq_log_bfqq(bfqq->bfqd, bfqq,
-                       "calc_finish: start %llu, finish %llu, delta %llu",
-                       entity->start, entity->finish,
-                       bfq_delta(service, entity->weight));
-       }
-}
-
-/**
- * bfq_entity_of - get an entity from a node.
- * @node: the node field of the entity.
- *
- * Convert a node pointer to the relative entity.  This is used only
- * to simplify the logic of some functions and not as the generic
- * conversion mechanism because, e.g., in the tree walking functions,
- * the check for a %NULL value would be redundant.
- */
-static struct bfq_entity *bfq_entity_of(struct rb_node *node)
-{
-       struct bfq_entity *entity = NULL;
-
-       if (node)
-               entity = rb_entry(node, struct bfq_entity, rb_node);
-
-       return entity;
-}
-
-/**
- * bfq_extract - remove an entity from a tree.
- * @root: the tree root.
- * @entity: the entity to remove.
- */
-static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
-{
-       entity->tree = NULL;
-       rb_erase(&entity->rb_node, root);
-}
-
-/**
- * bfq_idle_extract - extract an entity from the idle tree.
- * @st: the service tree of the owning @entity.
- * @entity: the entity being removed.
- */
-static void bfq_idle_extract(struct bfq_service_tree *st,
-                            struct bfq_entity *entity)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-       struct rb_node *next;
-
-       if (entity == st->first_idle) {
-               next = rb_next(&entity->rb_node);
-               st->first_idle = bfq_entity_of(next);
-       }
-
-       if (entity == st->last_idle) {
-               next = rb_prev(&entity->rb_node);
-               st->last_idle = bfq_entity_of(next);
-       }
-
-       bfq_extract(&st->idle, entity);
-
-       if (bfqq)
-               list_del(&bfqq->bfqq_list);
-}
-
-/**
- * bfq_insert - generic tree insertion.
- * @root: tree root.
- * @entity: entity to insert.
- *
- * This is used for the idle and the active tree, since they are both
- * ordered by finish time.
- */
-static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
-{
-       struct bfq_entity *entry;
-       struct rb_node **node = &root->rb_node;
-       struct rb_node *parent = NULL;
-
-       while (*node) {
-               parent = *node;
-               entry = rb_entry(parent, struct bfq_entity, rb_node);
-
-               if (bfq_gt(entry->finish, entity->finish))
-                       node = &parent->rb_left;
-               else
-                       node = &parent->rb_right;
-       }
-
-       rb_link_node(&entity->rb_node, parent, node);
-       rb_insert_color(&entity->rb_node, root);
-
-       entity->tree = root;
-}
-
-/**
- * bfq_update_min - update the min_start field of a entity.
- * @entity: the entity to update.
- * @node: one of its children.
- *
- * This function is called when @entity may store an invalid value for
- * min_start due to updates to the active tree.  The function  assumes
- * that the subtree rooted at @node (which may be its left or its right
- * child) has a valid min_start value.
- */
-static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
-{
-       struct bfq_entity *child;
-
-       if (node) {
-               child = rb_entry(node, struct bfq_entity, rb_node);
-               if (bfq_gt(entity->min_start, child->min_start))
-                       entity->min_start = child->min_start;
-       }
-}
-
-/**
- * bfq_update_active_node - recalculate min_start.
- * @node: the node to update.
- *
- * @node may have changed position or one of its children may have moved,
- * this function updates its min_start value.  The left and right subtrees
- * are assumed to hold a correct min_start value.
- */
-static void bfq_update_active_node(struct rb_node *node)
-{
-       struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
-
-       entity->min_start = entity->start;
-       bfq_update_min(entity, node->rb_right);
-       bfq_update_min(entity, node->rb_left);
-}
-
-/**
- * bfq_update_active_tree - update min_start for the whole active tree.
- * @node: the starting node.
- *
- * @node must be the deepest modified node after an update.  This function
- * updates its min_start using the values held by its children, assuming
- * that they did not change, and then updates all the nodes that may have
- * changed in the path to the root.  The only nodes that may have changed
- * are the ones in the path or their siblings.
- */
-static void bfq_update_active_tree(struct rb_node *node)
-{
-       struct rb_node *parent;
-
-up:
-       bfq_update_active_node(node);
-
-       parent = rb_parent(node);
-       if (!parent)
-               return;
-
-       if (node == parent->rb_left && parent->rb_right)
-               bfq_update_active_node(parent->rb_right);
-       else if (parent->rb_left)
-               bfq_update_active_node(parent->rb_left);
-
-       node = parent;
-       goto up;
-}
-
-/**
- * bfq_active_insert - insert an entity in the active tree of its
- *                     group/device.
- * @st: the service tree of the entity.
- * @entity: the entity being inserted.
- *
- * The active tree is ordered by finish time, but an extra key is kept
- * per each node, containing the minimum value for the start times of
- * its children (and the node itself), so it's possible to search for
- * the eligible node with the lowest finish time in logarithmic time.
- */
-static void bfq_active_insert(struct bfq_service_tree *st,
-                             struct bfq_entity *entity)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-       struct rb_node *node = &entity->rb_node;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-       struct bfq_sched_data *sd = NULL;
-       struct bfq_group *bfqg = NULL;
-       struct bfq_data *bfqd = NULL;
-#endif
-
-       bfq_insert(&st->active, entity);
-
-       if (node->rb_left)
-               node = node->rb_left;
-       else if (node->rb_right)
-               node = node->rb_right;
-
-       bfq_update_active_tree(node);
-
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-       sd = entity->sched_data;
-       bfqg = container_of(sd, struct bfq_group, sched_data);
-       bfqd = (struct bfq_data *)bfqg->bfqd;
-#endif
-       if (bfqq)
-               list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
-}
-
-/**
- * bfq_ioprio_to_weight - calc a weight from an ioprio.
- * @ioprio: the ioprio value to convert.
- */
-static unsigned short bfq_ioprio_to_weight(int ioprio)
-{
-       return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
-}
-
-/**
- * bfq_weight_to_ioprio - calc an ioprio from a weight.
- * @weight: the weight value to convert.
- *
- * To preserve as much as possible the old only-ioprio user interface,
- * 0 is used as an escape ioprio value for weights (numerically) equal or
- * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
- */
-static unsigned short bfq_weight_to_ioprio(int weight)
-{
-       return max_t(int, 0,
-                    IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
-}
-
-static void bfq_get_entity(struct bfq_entity *entity)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-
-       if (bfqq) {
-               bfqq->ref++;
-               bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
-                            bfqq, bfqq->ref);
-       }
-}
-
-/**
- * bfq_find_deepest - find the deepest node that an extraction can modify.
- * @node: the node being removed.
- *
- * Do the first step of an extraction in an rb tree, looking for the
- * node that will replace @node, and returning the deepest node that
- * the following modifications to the tree can touch.  If @node is the
- * last node in the tree return %NULL.
- */
-static struct rb_node *bfq_find_deepest(struct rb_node *node)
-{
-       struct rb_node *deepest;
-
-       if (!node->rb_right && !node->rb_left)
-               deepest = rb_parent(node);
-       else if (!node->rb_right)
-               deepest = node->rb_left;
-       else if (!node->rb_left)
-               deepest = node->rb_right;
-       else {
-               deepest = rb_next(node);
-               if (deepest->rb_right)
-                       deepest = deepest->rb_right;
-               else if (rb_parent(deepest) != node)
-                       deepest = rb_parent(deepest);
-       }
-
-       return deepest;
-}
-
-/**
- * bfq_active_extract - remove an entity from the active tree.
- * @st: the service_tree containing the tree.
- * @entity: the entity being removed.
- */
-static void bfq_active_extract(struct bfq_service_tree *st,
-                              struct bfq_entity *entity)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-       struct rb_node *node;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-       struct bfq_sched_data *sd = NULL;
-       struct bfq_group *bfqg = NULL;
-       struct bfq_data *bfqd = NULL;
-#endif
-
-       node = bfq_find_deepest(&entity->rb_node);
-       bfq_extract(&st->active, entity);
-
-       if (node)
-               bfq_update_active_tree(node);
-
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-       sd = entity->sched_data;
-       bfqg = container_of(sd, struct bfq_group, sched_data);
-       bfqd = (struct bfq_data *)bfqg->bfqd;
-#endif
-       if (bfqq)
-               list_del(&bfqq->bfqq_list);
-}
-
-/**
- * bfq_idle_insert - insert an entity into the idle tree.
- * @st: the service tree containing the tree.
- * @entity: the entity to insert.
- */
-static void bfq_idle_insert(struct bfq_service_tree *st,
-                           struct bfq_entity *entity)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-       struct bfq_entity *first_idle = st->first_idle;
-       struct bfq_entity *last_idle = st->last_idle;
-
-       if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
-               st->first_idle = entity;
-       if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
-               st->last_idle = entity;
-
-       bfq_insert(&st->idle, entity);
-
-       if (bfqq)
-               list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
-}
-
-/**
- * bfq_forget_entity - do not consider entity any longer for scheduling
- * @st: the service tree.
- * @entity: the entity being removed.
- * @is_in_service: true if entity is currently the in-service entity.
- *
- * Forget everything about @entity. In addition, if entity represents
- * a queue, and the latter is not in service, then release the service
- * reference to the queue (the one taken through bfq_get_entity). In
- * fact, in this case, there is really no more service reference to
- * the queue, as the latter is also outside any service tree. If,
- * instead, the queue is in service, then __bfq_bfqd_reset_in_service
- * will take care of putting the reference when the queue finally
- * stops being served.
- */
-static void bfq_forget_entity(struct bfq_service_tree *st,
-                             struct bfq_entity *entity,
-                             bool is_in_service)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-
-       entity->on_st = false;
-       st->wsum -= entity->weight;
-       if (bfqq && !is_in_service)
-               bfq_put_queue(bfqq);
-}
-
-/**
- * bfq_put_idle_entity - release the idle tree ref of an entity.
- * @st: service tree for the entity.
- * @entity: the entity being released.
- */
-static void bfq_put_idle_entity(struct bfq_service_tree *st,
-                               struct bfq_entity *entity)
-{
-       bfq_idle_extract(st, entity);
-       bfq_forget_entity(st, entity,
-                         entity == entity->sched_data->in_service_entity);
-}
-
-/**
- * bfq_forget_idle - update the idle tree if necessary.
- * @st: the service tree to act upon.
- *
- * To preserve the global O(log N) complexity we only remove one entry here;
- * as the idle tree will not grow indefinitely this can be done safely.
- */
-static void bfq_forget_idle(struct bfq_service_tree *st)
-{
-       struct bfq_entity *first_idle = st->first_idle;
-       struct bfq_entity *last_idle = st->last_idle;
-
-       if (RB_EMPTY_ROOT(&st->active) && last_idle &&
-           !bfq_gt(last_idle->finish, st->vtime)) {
-               /*
-                * Forget the whole idle tree, increasing the vtime past
-                * the last finish time of idle entities.
-                */
-               st->vtime = last_idle->finish;
-       }
-
-       if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
-               bfq_put_idle_entity(st, first_idle);
-}
-
-static struct bfq_service_tree *
-__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
-                               struct bfq_entity *entity)
-{
-       struct bfq_service_tree *new_st = old_st;
-
-       if (entity->prio_changed) {
-               struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-               unsigned int prev_weight, new_weight;
-               struct bfq_data *bfqd = NULL;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-               struct bfq_sched_data *sd;
-               struct bfq_group *bfqg;
-#endif
-
-               if (bfqq)
-                       bfqd = bfqq->bfqd;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-               else {
-                       sd = entity->my_sched_data;
-                       bfqg = container_of(sd, struct bfq_group, sched_data);
-                       bfqd = (struct bfq_data *)bfqg->bfqd;
-               }
-#endif
-
-               old_st->wsum -= entity->weight;
-
-               if (entity->new_weight != entity->orig_weight) {
-                       if (entity->new_weight < BFQ_MIN_WEIGHT ||
-                           entity->new_weight > BFQ_MAX_WEIGHT) {
-                               pr_crit("update_weight_prio: new_weight %d\n",
-                                       entity->new_weight);
-                               if (entity->new_weight < BFQ_MIN_WEIGHT)
-                                       entity->new_weight = BFQ_MIN_WEIGHT;
-                               else
-                                       entity->new_weight = BFQ_MAX_WEIGHT;
-                       }
-                       entity->orig_weight = entity->new_weight;
-                       if (bfqq)
-                               bfqq->ioprio =
-                                 bfq_weight_to_ioprio(entity->orig_weight);
-               }
-
-               if (bfqq)
-                       bfqq->ioprio_class = bfqq->new_ioprio_class;
-               entity->prio_changed = 0;
-
-               /*
-                * NOTE: here we may be changing the weight too early,
-                * this will cause unfairness.  The correct approach
-                * would have required additional complexity to defer
-                * weight changes to the proper time instants (i.e.,
-                * when entity->finish <= old_st->vtime).
-                */
-               new_st = bfq_entity_service_tree(entity);
-
-               prev_weight = entity->weight;
-               new_weight = entity->orig_weight *
-                            (bfqq ? bfqq->wr_coeff : 1);
-               entity->weight = new_weight;
-
-               new_st->wsum += entity->weight;
-
-               if (new_st != old_st)
-                       entity->start = new_st->vtime;
-       }
-
-       return new_st;
-}
-
-static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
-static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
-
-/**
- * bfq_bfqq_served - update the scheduler status after selection for
- *                   service.
- * @bfqq: the queue being served.
- * @served: bytes to transfer.
- *
- * NOTE: this can be optimized, as the timestamps of upper level entities
- * are synchronized every time a new bfqq is selected for service.  By now,
- * we keep it to better check consistency.
- */
-static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
-{
-       struct bfq_entity *entity = &bfqq->entity;
-       struct bfq_service_tree *st;
-
-       for_each_entity(entity) {
-               st = bfq_entity_service_tree(entity);
-
-               entity->service += served;
-
-               st->vtime += bfq_delta(served, st->wsum);
-               bfq_forget_idle(st);
-       }
-       bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
-       bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
-}
-
-/**
- * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
- *                       of the time interval during which bfqq has been in
- *                       service.
- * @bfqd: the device
- * @bfqq: the queue that needs a service update.
- * @time_ms: the amount of time during which the queue has received service
- *
- * If a queue does not consume its budget fast enough, then providing
- * the queue with service fairness may impair throughput, more or less
- * severely. For this reason, queues that consume their budget slowly
- * are provided with time fairness instead of service fairness. This
- * goal is achieved through the BFQ scheduling engine, even if such an
- * engine works in the service, and not in the time domain. The trick
- * is charging these queues with an inflated amount of service, equal
- * to the amount of service that they would have received during their
- * service slot if they had been fast, i.e., if their requests had
- * been dispatched at a rate equal to the estimated peak rate.
- *
- * It is worth noting that time fairness can cause important
- * distortions in terms of bandwidth distribution, on devices with
- * internal queueing. The reason is that I/O requests dispatched
- * during the service slot of a queue may be served after that service
- * slot is finished, and may have a total processing time loosely
- * correlated with the duration of the service slot. This is
- * especially true for short service slots.
- */
-static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-                                unsigned long time_ms)
-{
-       struct bfq_entity *entity = &bfqq->entity;
-       int tot_serv_to_charge = entity->service;
-       unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
-
-       if (time_ms > 0 && time_ms < timeout_ms)
-               tot_serv_to_charge =
-                       (bfqd->bfq_max_budget * time_ms) / timeout_ms;
-
-       if (tot_serv_to_charge < entity->service)
-               tot_serv_to_charge = entity->service;
-
-       /* Increase budget to avoid inconsistencies */
-       if (tot_serv_to_charge > entity->budget)
-               entity->budget = tot_serv_to_charge;
-
-       bfq_bfqq_served(bfqq,
-                       max_t(int, 0, tot_serv_to_charge - entity->service));
-}
-
-static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
-                                       struct bfq_service_tree *st,
-                                       bool backshifted)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-
-       st = __bfq_entity_update_weight_prio(st, entity);
-       bfq_calc_finish(entity, entity->budget);
-
-       /*
-        * If some queues enjoy backshifting for a while, then their
-        * (virtual) finish timestamps may happen to become lower and
-        * lower than the system virtual time.  In particular, if
-        * these queues often happen to be idle for short time
-        * periods, and during such time periods other queues with
-        * higher timestamps happen to be busy, then the backshifted
-        * timestamps of the former queues can become much lower than
-        * the system virtual time. In fact, to serve the queues with
-        * higher timestamps while the ones with lower timestamps are
-        * idle, the system virtual time may be pushed-up to much
-        * higher values than the finish timestamps of the idle
-        * queues. As a consequence, the finish timestamps of all new
-        * or newly activated queues may end up being much larger than
-        * those of lucky queues with backshifted timestamps. The
-        * latter queues may then monopolize the device for a lot of
-        * time. This would simply break service guarantees.
-        *
-        * To reduce this problem, push up a little bit the
-        * backshifted timestamps of the queue associated with this
-        * entity (only a queue can happen to have the backshifted
-        * flag set): just enough to let the finish timestamp of the
-        * queue be equal to the current value of the system virtual
-        * time. This may introduce a little unfairness among queues
-        * with backshifted timestamps, but it does not break
-        * worst-case fairness guarantees.
-        *
-        * As a special case, if bfqq is weight-raised, push up
-        * timestamps much less, to keep very low the probability that
-        * this push up causes the backshifted finish timestamps of
-        * weight-raised queues to become higher than the backshifted
-        * finish timestamps of non weight-raised queues.
-        */
-       if (backshifted && bfq_gt(st->vtime, entity->finish)) {
-               unsigned long delta = st->vtime - entity->finish;
-
-               if (bfqq)
-                       delta /= bfqq->wr_coeff;
-
-               entity->start += delta;
-               entity->finish += delta;
-       }
-
-       bfq_active_insert(st, entity);
-}
-
-/**
- * __bfq_activate_entity - handle activation of entity.
- * @entity: the entity being activated.
- * @non_blocking_wait_rq: true if entity was waiting for a request
- *
- * Called for a 'true' activation, i.e., if entity is not active and
- * one of its children receives a new request.
- *
- * Basically, this function updates the timestamps of entity and
- * inserts entity into its active tree, ater possible extracting it
- * from its idle tree.
- */
-static void __bfq_activate_entity(struct bfq_entity *entity,
-                                 bool non_blocking_wait_rq)
-{
-       struct bfq_service_tree *st = bfq_entity_service_tree(entity);
-       bool backshifted = false;
-       unsigned long long min_vstart;
-
-       /* See comments on bfq_fqq_update_budg_for_activation */
-       if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
-               backshifted = true;
-               min_vstart = entity->finish;
-       } else
-               min_vstart = st->vtime;
-
-       if (entity->tree == &st->idle) {
-               /*
-                * Must be on the idle tree, bfq_idle_extract() will
-                * check for that.
-                */
-               bfq_idle_extract(st, entity);
-               entity->start = bfq_gt(min_vstart, entity->finish) ?
-                       min_vstart : entity->finish;
-       } else {
-               /*
-                * The finish time of the entity may be invalid, and
-                * it is in the past for sure, otherwise the queue
-                * would have been on the idle tree.
-                */
-               entity->start = min_vstart;
-               st->wsum += entity->weight;
-               /*
-                * entity is about to be inserted into a service tree,
-                * and then set in service: get a reference to make
-                * sure entity does not disappear until it is no
-                * longer in service or scheduled for service.
-                */
-               bfq_get_entity(entity);
-
-               entity->on_st = true;
-       }
-
-       bfq_update_fin_time_enqueue(entity, st, backshifted);
-}
-
-/**
- * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
- * @entity: the entity being requeued or repositioned.
- *
- * Requeueing is needed if this entity stops being served, which
- * happens if a leaf descendant entity has expired. On the other hand,
- * repositioning is needed if the next_inservice_entity for the child
- * entity has changed. See the comments inside the function for
- * details.
- *
- * Basically, this function: 1) removes entity from its active tree if
- * present there, 2) updates the timestamps of entity and 3) inserts
- * entity back into its active tree (in the new, right position for
- * the new values of the timestamps).
- */
-static void __bfq_requeue_entity(struct bfq_entity *entity)
-{
-       struct bfq_sched_data *sd = entity->sched_data;
-       struct bfq_service_tree *st = bfq_entity_service_tree(entity);
-
-       if (entity == sd->in_service_entity) {
-               /*
-                * We are requeueing the current in-service entity,
-                * which may have to be done for one of the following
-                * reasons:
-                * - entity represents the in-service queue, and the
-                *   in-service queue is being requeued after an
-                *   expiration;
-                * - entity represents a group, and its budget has
-                *   changed because one of its child entities has
-                *   just been either activated or requeued for some
-                *   reason; the timestamps of the entity need then to
-                *   be updated, and the entity needs to be enqueued
-                *   or repositioned accordingly.
-                *
-                * In particular, before requeueing, the start time of
-                * the entity must be moved forward to account for the
-                * service that the entity has received while in
-                * service. This is done by the next instructions. The
-                * finish time will then be updated according to this
-                * new value of the start time, and to the budget of
-                * the entity.
-                */
-               bfq_calc_finish(entity, entity->service);
-               entity->start = entity->finish;
-               /*
-                * In addition, if the entity had more than one child
-                * when set in service, then was not extracted from
-                * the active tree. This implies that the position of
-                * the entity in the active tree may need to be
-                * changed now, because we have just updated the start
-                * time of the entity, and we will update its finish
-                * time in a moment (the requeueing is then, more
-                * precisely, a repositioning in this case). To
-                * implement this repositioning, we: 1) dequeue the
-                * entity here, 2) update the finish time and
-                * requeue the entity according to the new
-                * timestamps below.
-                */
-               if (entity->tree)
-                       bfq_active_extract(st, entity);
-       } else { /* The entity is already active, and not in service */
-               /*
-                * In this case, this function gets called only if the
-                * next_in_service entity below this entity has
-                * changed, and this change has caused the budget of
-                * this entity to change, which, finally implies that
-                * the finish time of this entity must be
-                * updated. Such an update may cause the scheduling,
-                * i.e., the position in the active tree, of this
-                * entity to change. We handle this change by: 1)
-                * dequeueing the entity here, 2) updating the finish
-                * time and requeueing the entity according to the new
-                * timestamps below. This is the same approach as the
-                * non-extracted-entity sub-case above.
-                */
-               bfq_active_extract(st, entity);
-       }
-
-       bfq_update_fin_time_enqueue(entity, st, false);
-}
-
-static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
-                                         struct bfq_sched_data *sd,
-                                         bool non_blocking_wait_rq)
-{
-       struct bfq_service_tree *st = bfq_entity_service_tree(entity);
-
-       if (sd->in_service_entity == entity || entity->tree == &st->active)
-                /*
-                 * in service or already queued on the active tree,
-                 * requeue or reposition
-                 */
-               __bfq_requeue_entity(entity);
-       else
-               /*
-                * Not in service and not queued on its active tree:
-                * the activity is idle and this is a true activation.
-                */
-               __bfq_activate_entity(entity, non_blocking_wait_rq);
-}
-
-
-/**
- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
- *                      and activate, requeue or reposition all ancestors
- *                      for which such an update becomes necessary.
- * @entity: the entity to activate.
- * @non_blocking_wait_rq: true if this entity was waiting for a request
- * @requeue: true if this is a requeue, which implies that bfqq is
- *          being expired; thus ALL its ancestors stop being served and must
- *          therefore be requeued
- */
-static void bfq_activate_requeue_entity(struct bfq_entity *entity,
-                                       bool non_blocking_wait_rq,
-                                       bool requeue)
-{
-       struct bfq_sched_data *sd;
-
-       for_each_entity(entity) {
-               sd = entity->sched_data;
-               __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
-
-               if (!bfq_update_next_in_service(sd, entity) && !requeue)
-                       break;
-       }
-}
-
-/**
- * __bfq_deactivate_entity - deactivate an entity from its service tree.
- * @entity: the entity to deactivate.
- * @ins_into_idle_tree: if false, the entity will not be put into the
- *                     idle tree.
- *
- * Deactivates an entity, independently from its previous state.  Must
- * be invoked only if entity is on a service tree. Extracts the entity
- * from that tree, and if necessary and allowed, puts it on the idle
- * tree.
- */
-static bool __bfq_deactivate_entity(struct bfq_entity *entity,
-                                   bool ins_into_idle_tree)
-{
-       struct bfq_sched_data *sd = entity->sched_data;
-       struct bfq_service_tree *st = bfq_entity_service_tree(entity);
-       int is_in_service = entity == sd->in_service_entity;
-
-       if (!entity->on_st) /* entity never activated, or already inactive */
-               return false;
-
-       if (is_in_service)
-               bfq_calc_finish(entity, entity->service);
-
-       if (entity->tree == &st->active)
-               bfq_active_extract(st, entity);
-       else if (!is_in_service && entity->tree == &st->idle)
-               bfq_idle_extract(st, entity);
-
-       if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
-               bfq_forget_entity(st, entity, is_in_service);
-       else
-               bfq_idle_insert(st, entity);
-
-       return true;
-}
-
-/**
- * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
- * @entity: the entity to deactivate.
- * @ins_into_idle_tree: true if the entity can be put on the idle tree
- */
-static void bfq_deactivate_entity(struct bfq_entity *entity,
-                                 bool ins_into_idle_tree,
-                                 bool expiration)
-{
-       struct bfq_sched_data *sd;
-       struct bfq_entity *parent = NULL;
-
-       for_each_entity_safe(entity, parent) {
-               sd = entity->sched_data;
-
-               if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
-                       /*
-                        * entity is not in any tree any more, so
-                        * this deactivation is a no-op, and there is
-                        * nothing to change for upper-level entities
-                        * (in case of expiration, this can never
-                        * happen).
-                        */
-                       return;
-               }
-
-               if (sd->next_in_service == entity)
-                       /*
-                        * entity was the next_in_service entity,
-                        * then, since entity has just been
-                        * deactivated, a new one must be found.
-                        */
-                       bfq_update_next_in_service(sd, NULL);
-
-               if (sd->next_in_service)
-                       /*
-                        * The parent entity is still backlogged,
-                        * because next_in_service is not NULL. So, no
-                        * further upwards deactivation must be
-                        * performed.  Yet, next_in_service has
-                        * changed.  Then the schedule does need to be
-                        * updated upwards.
-                        */
-                       break;
-
-               /*
-                * If we get here, then the parent is no more
-                * backlogged and we need to propagate the
-                * deactivation upwards. Thus let the loop go on.
-                */
-
-               /*
-                * Also let parent be queued into the idle tree on
-                * deactivation, to preserve service guarantees, and
-                * assuming that who invoked this function does not
-                * need parent entities too to be removed completely.
-                */
-               ins_into_idle_tree = true;
-       }
-
-       /*
-        * If the deactivation loop is fully executed, then there are
-        * no more entities to touch and next loop is not executed at
-        * all. Otherwise, requeue remaining entities if they are
-        * about to stop receiving service, or reposition them if this
-        * is not the case.
-        */
-       entity = parent;
-       for_each_entity(entity) {
-               /*
-                * Invoke __bfq_requeue_entity on entity, even if
-                * already active, to requeue/reposition it in the
-                * active tree (because sd->next_in_service has
-                * changed)
-                */
-               __bfq_requeue_entity(entity);
-
-               sd = entity->sched_data;
-               if (!bfq_update_next_in_service(sd, entity) &&
-                   !expiration)
-                       /*
-                        * next_in_service unchanged or not causing
-                        * any change in entity->parent->sd, and no
-                        * requeueing needed for expiration: stop
-                        * here.
-                        */
-                       break;
-       }
-}
-
-/**
- * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
- *                       if needed, to have at least one entity eligible.
- * @st: the service tree to act upon.
- *
- * Assumes that st is not empty.
- */
-static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
-{
-       struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
-
-       if (bfq_gt(root_entity->min_start, st->vtime))
-               return root_entity->min_start;
-
-       return st->vtime;
-}
-
-static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
-{
-       if (new_value > st->vtime) {
-               st->vtime = new_value;
-               bfq_forget_idle(st);
-       }
-}
-
-/**
- * bfq_first_active_entity - find the eligible entity with
- *                           the smallest finish time
- * @st: the service tree to select from.
- * @vtime: the system virtual to use as a reference for eligibility
- *
- * This function searches the first schedulable entity, starting from the
- * root of the tree and going on the left every time on this side there is
- * a subtree with at least one eligible (start >= vtime) entity. The path on
- * the right is followed only if a) the left subtree contains no eligible
- * entities and b) no eligible entity has been found yet.
- */
-static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
-                                                 u64 vtime)
-{
-       struct bfq_entity *entry, *first = NULL;
-       struct rb_node *node = st->active.rb_node;
-
-       while (node) {
-               entry = rb_entry(node, struct bfq_entity, rb_node);
-left:
-               if (!bfq_gt(entry->start, vtime))
-                       first = entry;
-
-               if (node->rb_left) {
-                       entry = rb_entry(node->rb_left,
-                                        struct bfq_entity, rb_node);
-                       if (!bfq_gt(entry->min_start, vtime)) {
-                               node = node->rb_left;
-                               goto left;
-                       }
-               }
-               if (first)
-                       break;
-               node = node->rb_right;
-       }
-
-       return first;
-}
-
-/**
- * __bfq_lookup_next_entity - return the first eligible entity in @st.
- * @st: the service tree.
- *
- * If there is no in-service entity for the sched_data st belongs to,
- * then return the entity that will be set in service if:
- * 1) the parent entity this st belongs to is set in service;
- * 2) no entity belonging to such parent entity undergoes a state change
- * that would influence the timestamps of the entity (e.g., becomes idle,
- * becomes backlogged, changes its budget, ...).
- *
- * In this first case, update the virtual time in @st too (see the
- * comments on this update inside the function).
- *
- * In constrast, if there is an in-service entity, then return the
- * entity that would be set in service if not only the above
- * conditions, but also the next one held true: the currently
- * in-service entity, on expiration,
- * 1) gets a finish time equal to the current one, or
- * 2) is not eligible any more, or
- * 3) is idle.
- */
-static struct bfq_entity *
-__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
-{
-       struct bfq_entity *entity;
-       u64 new_vtime;
-
-       if (RB_EMPTY_ROOT(&st->active))
-               return NULL;
-
-       /*
-        * Get the value of the system virtual time for which at
-        * least one entity is eligible.
-        */
-       new_vtime = bfq_calc_vtime_jump(st);
-
-       /*
-        * If there is no in-service entity for the sched_data this
-        * active tree belongs to, then push the system virtual time
-        * up to the value that guarantees that at least one entity is
-        * eligible. If, instead, there is an in-service entity, then
-        * do not make any such update, because there is already an
-        * eligible entity, namely the in-service one (even if the
-        * entity is not on st, because it was extracted when set in
-        * service).
-        */
-       if (!in_service)
-               bfq_update_vtime(st, new_vtime);
-
-       entity = bfq_first_active_entity(st, new_vtime);
-
-       return entity;
-}
-
-/**
- * bfq_lookup_next_entity - return the first eligible entity in @sd.
- * @sd: the sched_data.
- *
- * This function is invoked when there has been a change in the trees
- * for sd, and we need know what is the new next entity after this
- * change.
- */
-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
-{
-       struct bfq_service_tree *st = sd->service_tree;
-       struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
-       struct bfq_entity *entity = NULL;
-       int class_idx = 0;
-
-       /*
-        * Choose from idle class, if needed to guarantee a minimum
-        * bandwidth to this class (and if there is some active entity
-        * in idle class). This should also mitigate
-        * priority-inversion problems in case a low priority task is
-        * holding file system resources.
-        */
-       if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
-                                  BFQ_CL_IDLE_TIMEOUT)) {
-               if (!RB_EMPTY_ROOT(&idle_class_st->active))
-                       class_idx = BFQ_IOPRIO_CLASSES - 1;
-               /* About to be served if backlogged, or not yet backlogged */
-               sd->bfq_class_idle_last_service = jiffies;
-       }
-
-       /*
-        * Find the next entity to serve for the highest-priority
-        * class, unless the idle class needs to be served.
-        */
-       for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
-               entity = __bfq_lookup_next_entity(st + class_idx,
-                                                 sd->in_service_entity);
-
-               if (entity)
-                       break;
-       }
-
-       if (!entity)
-               return NULL;
-
-       return entity;
-}
-
-static bool next_queue_may_preempt(struct bfq_data *bfqd)
-{
-       struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
-
-       return sd->next_in_service != sd->in_service_entity;
-}
-
-/*
- * Get next queue for service.
- */
-static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
-{
-       struct bfq_entity *entity = NULL;
-       struct bfq_sched_data *sd;
-       struct bfq_queue *bfqq;
-
-       if (bfqd->busy_queues == 0)
-               return NULL;
-
-       /*
-        * Traverse the path from the root to the leaf entity to
-        * serve. Set in service all the entities visited along the
-        * way.
-        */
-       sd = &bfqd->root_group->sched_data;
-       for (; sd ; sd = entity->my_sched_data) {
-               /*
-                * WARNING. We are about to set the in-service entity
-                * to sd->next_in_service, i.e., to the (cached) value
-                * returned by bfq_lookup_next_entity(sd) the last
-                * time it was invoked, i.e., the last time when the
-                * service order in sd changed as a consequence of the
-                * activation or deactivation of an entity. In this
-                * respect, if we execute bfq_lookup_next_entity(sd)
-                * in this very moment, it may, although with low
-                * probability, yield a different entity than that
-                * pointed to by sd->next_in_service. This rare event
-                * happens in case there was no CLASS_IDLE entity to
-                * serve for sd when bfq_lookup_next_entity(sd) was
-                * invoked for the last time, while there is now one
-                * such entity.
-                *
-                * If the above event happens, then the scheduling of
-                * such entity in CLASS_IDLE is postponed until the
-                * service of the sd->next_in_service entity
-                * finishes. In fact, when the latter is expired,
-                * bfq_lookup_next_entity(sd) gets called again,
-                * exactly to update sd->next_in_service.
-                */
-
-               /* Make next_in_service entity become in_service_entity */
-               entity = sd->next_in_service;
-               sd->in_service_entity = entity;
-
-               /*
-                * Reset the accumulator of the amount of service that
-                * the entity is about to receive.
-                */
-               entity->service = 0;
-
-               /*
-                * If entity is no longer a candidate for next
-                * service, then we extract it from its active tree,
-                * for the following reason. To further boost the
-                * throughput in some special case, BFQ needs to know
-                * which is the next candidate entity to serve, while
-                * there is already an entity in service. In this
-                * respect, to make it easy to compute/update the next
-                * candidate entity to serve after the current
-                * candidate has been set in service, there is a case
-                * where it is necessary to extract the current
-                * candidate from its service tree. Such a case is
-                * when the entity just set in service cannot be also
-                * a candidate for next service. Details about when
-                * this conditions holds are reported in the comments
-                * on the function bfq_no_longer_next_in_service()
-                * invoked below.
-                */
-               if (bfq_no_longer_next_in_service(entity))
-                       bfq_active_extract(bfq_entity_service_tree(entity),
-                                          entity);
-
-               /*
-                * For the same reason why we may have just extracted
-                * entity from its active tree, we may need to update
-                * next_in_service for the sched_data of entity too,
-                * regardless of whether entity has been extracted.
-                * In fact, even if entity has not been extracted, a
-                * descendant entity may get extracted. Such an event
-                * would cause a change in next_in_service for the
-                * level of the descendant entity, and thus possibly
-                * back to upper levels.
-                *
-                * We cannot perform the resulting needed update
-                * before the end of this loop, because, to know which
-                * is the correct next-to-serve candidate entity for
-                * each level, we need first to find the leaf entity
-                * to set in service. In fact, only after we know
-                * which is the next-to-serve leaf entity, we can
-                * discover whether the parent entity of the leaf
-                * entity becomes the next-to-serve, and so on.
-                */
-
-       }
-
-       bfqq = bfq_entity_to_bfqq(entity);
-
-       /*
-        * We can finally update all next-to-serve entities along the
-        * path from the leaf entity just set in service to the root.
-        */
-       for_each_entity(entity) {
-               struct bfq_sched_data *sd = entity->sched_data;
-
-               if (!bfq_update_next_in_service(sd, NULL))
-                       break;
-       }
-
-       return bfqq;
-}
-
-static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
-{
-       struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
-       struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
-       struct bfq_entity *entity = in_serv_entity;
-
-       if (bfqd->in_service_bic) {
-               put_io_context(bfqd->in_service_bic->icq.ioc);
-               bfqd->in_service_bic = NULL;
-       }
-
-       bfq_clear_bfqq_wait_request(in_serv_bfqq);
-       hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
-       bfqd->in_service_queue = NULL;
-
-       /*
-        * When this function is called, all in-service entities have
-        * been properly deactivated or requeued, so we can safely
-        * execute the final step: reset in_service_entity along the
-        * path from entity to the root.
-        */
-       for_each_entity(entity)
-               entity->sched_data->in_service_entity = NULL;
-
-       /*
-        * in_serv_entity is no longer in service, so, if it is in no
-        * service tree either, then release the service reference to
-        * the queue it represents (taken with bfq_get_entity).
-        */
-       if (!in_serv_entity->on_st)
-               bfq_put_queue(in_serv_bfqq);
-}
-
-static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-                               bool ins_into_idle_tree, bool expiration)
-{
-       struct bfq_entity *entity = &bfqq->entity;
-
-       bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
-}
-
-static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-{
-       struct bfq_entity *entity = &bfqq->entity;
-
-       bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
-                                   false);
-       bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
-}
-
-static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-{
-       struct bfq_entity *entity = &bfqq->entity;
-
-       bfq_activate_requeue_entity(entity, false,
-                                   bfqq == bfqd->in_service_queue);
-}
-
-static void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
-
-/*
- * Called when the bfqq no longer has requests pending, remove it from
- * the service tree. As a special case, it can be invoked during an
- * expiration.
- */
-static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-                             bool expiration)
-{
-       bfq_log_bfqq(bfqd, bfqq, "del from busy");
-
-       bfq_clear_bfqq_busy(bfqq);
-
-       bfqd->busy_queues--;
-
-       if (bfqq->wr_coeff > 1)
-               bfqd->wr_busy_queues--;
-
-       bfqg_stats_update_dequeue(bfqq_group(bfqq));
-
-       bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
-}
-
-/*
- * Called when an inactive queue receives a new request.
- */
-static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
-{
-       bfq_log_bfqq(bfqd, bfqq, "add to busy");
-
-       bfq_activate_bfqq(bfqd, bfqq);
-
-       bfq_mark_bfqq_busy(bfqq);
-       bfqd->busy_queues++;
-
-       if (bfqq->wr_coeff > 1)
-               bfqd->wr_busy_queues++;
-}
-
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-
-/* bfqg stats flags */
-enum bfqg_stats_flags {
-       BFQG_stats_waiting = 0,
-       BFQG_stats_idling,
-       BFQG_stats_empty,
-};
-
-#define BFQG_FLAG_FNS(name)                                            \
-static void bfqg_stats_mark_##name(struct bfqg_stats *stats)   \
-{                                                                      \
-       stats->flags |= (1 << BFQG_stats_##name);                       \
-}                                                                      \
-static void bfqg_stats_clear_##name(struct bfqg_stats *stats)  \
-{                                                                      \
-       stats->flags &= ~(1 << BFQG_stats_##name);                      \
-}                                                                      \
-static int bfqg_stats_##name(struct bfqg_stats *stats)         \
-{                                                                      \
-       return (stats->flags & (1 << BFQG_stats_##name)) != 0;          \
-}                                                                      \
-
-BFQG_FLAG_FNS(waiting)
-BFQG_FLAG_FNS(idling)
-BFQG_FLAG_FNS(empty)
-#undef BFQG_FLAG_FNS
-
-/* This should be called with the queue_lock held. */
-static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
-{
-       unsigned long long now;
-
-       if (!bfqg_stats_waiting(stats))
-               return;
-
-       now = sched_clock();
-       if (time_after64(now, stats->start_group_wait_time))
-               blkg_stat_add(&stats->group_wait_time,
-                             now - stats->start_group_wait_time);
-       bfqg_stats_clear_waiting(stats);
-}
-
-/* This should be called with the queue_lock held. */
-static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
-                                                struct bfq_group *curr_bfqg)
-{
-       struct bfqg_stats *stats = &bfqg->stats;
-
-       if (bfqg_stats_waiting(stats))
-               return;
-       if (bfqg == curr_bfqg)
-               return;
-       stats->start_group_wait_time = sched_clock();
-       bfqg_stats_mark_waiting(stats);
-}
-
-/* This should be called with the queue_lock held. */
-static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
-{
-       unsigned long long now;
-
-       if (!bfqg_stats_empty(stats))
-               return;
-
-       now = sched_clock();
-       if (time_after64(now, stats->start_empty_time))
-               blkg_stat_add(&stats->empty_time,
-                             now - stats->start_empty_time);
-       bfqg_stats_clear_empty(stats);
-}
-
-static void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
-{
-       blkg_stat_add(&bfqg->stats.dequeue, 1);
-}
-
-static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
-{
-       struct bfqg_stats *stats = &bfqg->stats;
-
-       if (blkg_rwstat_total(&stats->queued))
-               return;
-
-       /*
-        * group is already marked empty. This can happen if bfqq got new
-        * request in parent group and moved to this group while being added
-        * to service tree. Just ignore the event and move on.
-        */
-       if (bfqg_stats_empty(stats))
-               return;
-
-       stats->start_empty_time = sched_clock();
-       bfqg_stats_mark_empty(stats);
-}
-
-static void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
-{
-       struct bfqg_stats *stats = &bfqg->stats;
-
-       if (bfqg_stats_idling(stats)) {
-               unsigned long long now = sched_clock();
-
-               if (time_after64(now, stats->start_idle_time))
-                       blkg_stat_add(&stats->idle_time,
-                                     now - stats->start_idle_time);
-               bfqg_stats_clear_idling(stats);
-       }
-}
-
-static void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
-{
-       struct bfqg_stats *stats = &bfqg->stats;
-
-       stats->start_idle_time = sched_clock();
-       bfqg_stats_mark_idling(stats);
-}
-
-static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
-{
-       struct bfqg_stats *stats = &bfqg->stats;
-
-       blkg_stat_add(&stats->avg_queue_size_sum,
-                     blkg_rwstat_total(&stats->queued));
-       blkg_stat_add(&stats->avg_queue_size_samples, 1);
-       bfqg_stats_update_group_wait_time(stats);
-}
-
-/*
- * blk-cgroup policy-related handlers
- * The following functions help in converting between blk-cgroup
- * internal structures and BFQ-specific structures.
- */
-
-static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
-{
-       return pd ? container_of(pd, struct bfq_group, pd) : NULL;
-}
-
-static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
-{
-       return pd_to_blkg(&bfqg->pd);
-}
-
-static struct blkcg_policy blkcg_policy_bfq;
-
-static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
-{
-       return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
-}
-
-/*
- * bfq_group handlers
- * The following functions help in navigating the bfq_group hierarchy
- * by allowing to find the parent of a bfq_group or the bfq_group
- * associated to a bfq_queue.
- */
-
-static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
-{
-       struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
-
-       return pblkg ? blkg_to_bfqg(pblkg) : NULL;
-}
-
-static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
-{
-       struct bfq_entity *group_entity = bfqq->entity.parent;
-
-       return group_entity ? container_of(group_entity, struct bfq_group,
-                                          entity) :
-                             bfqq->bfqd->root_group;
-}
-
-/*
- * The following two functions handle get and put of a bfq_group by
- * wrapping the related blk-cgroup hooks.
- */
-
-static void bfqg_get(struct bfq_group *bfqg)
-{
-       return blkg_get(bfqg_to_blkg(bfqg));
-}
-
-static void bfqg_put(struct bfq_group *bfqg)
-{
-       return blkg_put(bfqg_to_blkg(bfqg));
-}
-
-static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
-                                    struct bfq_queue *bfqq,
-                                    unsigned int op)
-{
-       blkg_rwstat_add(&bfqg->stats.queued, op, 1);
-       bfqg_stats_end_empty_time(&bfqg->stats);
-       if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
-               bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
-}
-
-static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
-{
-       blkg_rwstat_add(&bfqg->stats.queued, op, -1);
-}
-
-static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
-{
-       blkg_rwstat_add(&bfqg->stats.merged, op, 1);
-}
-
-static void bfqg_stats_update_completion(struct bfq_group *bfqg,
-                       uint64_t start_time, uint64_t io_start_time,
-                       unsigned int op)
-{
-       struct bfqg_stats *stats = &bfqg->stats;
-       unsigned long long now = sched_clock();
-
-       if (time_after64(now, io_start_time))
-               blkg_rwstat_add(&stats->service_time, op,
-                               now - io_start_time);
-       if (time_after64(io_start_time, start_time))
-               blkg_rwstat_add(&stats->wait_time, op,
-                               io_start_time - start_time);
-}
-
-/* @stats = 0 */
-static void bfqg_stats_reset(struct bfqg_stats *stats)
-{
-       /* queued stats shouldn't be cleared */
-       blkg_rwstat_reset(&stats->merged);
-       blkg_rwstat_reset(&stats->service_time);
-       blkg_rwstat_reset(&stats->wait_time);
-       blkg_stat_reset(&stats->time);
-       blkg_stat_reset(&stats->avg_queue_size_sum);
-       blkg_stat_reset(&stats->avg_queue_size_samples);
-       blkg_stat_reset(&stats->dequeue);
-       blkg_stat_reset(&stats->group_wait_time);
-       blkg_stat_reset(&stats->idle_time);
-       blkg_stat_reset(&stats->empty_time);
-}
-
-/* @to += @from */
-static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
-{
-       if (!to || !from)
-               return;
-
-       /* queued stats shouldn't be cleared */
-       blkg_rwstat_add_aux(&to->merged, &from->merged);
-       blkg_rwstat_add_aux(&to->service_time, &from->service_time);
-       blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
-       blkg_stat_add_aux(&from->time, &from->time);
-       blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
-       blkg_stat_add_aux(&to->avg_queue_size_samples,
-                         &from->avg_queue_size_samples);
-       blkg_stat_add_aux(&to->dequeue, &from->dequeue);
-       blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
-       blkg_stat_add_aux(&to->idle_time, &from->idle_time);
-       blkg_stat_add_aux(&to->empty_time, &from->empty_time);
-}
-
-/*
- * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
- * recursive stats can still account for the amount used by this bfqg after
- * it's gone.
- */
-static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
-{
-       struct bfq_group *parent;
-
-       if (!bfqg) /* root_group */
-               return;
-
-       parent = bfqg_parent(bfqg);
-
-       lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
-
-       if (unlikely(!parent))
-               return;
-
-       bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
-       bfqg_stats_reset(&bfqg->stats);
-}
-
-static void bfq_init_entity(struct bfq_entity *entity,
-                           struct bfq_group *bfqg)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-
-       entity->weight = entity->new_weight;
-       entity->orig_weight = entity->new_weight;
-       if (bfqq) {
-               bfqq->ioprio = bfqq->new_ioprio;
-               bfqq->ioprio_class = bfqq->new_ioprio_class;
-               bfqg_get(bfqg);
-       }
-       entity->parent = bfqg->my_entity; /* NULL for root group */
-       entity->sched_data = &bfqg->sched_data;
-}
-
-static void bfqg_stats_exit(struct bfqg_stats *stats)
-{
-       blkg_rwstat_exit(&stats->merged);
-       blkg_rwstat_exit(&stats->service_time);
-       blkg_rwstat_exit(&stats->wait_time);
-       blkg_rwstat_exit(&stats->queued);
-       blkg_stat_exit(&stats->time);
-       blkg_stat_exit(&stats->avg_queue_size_sum);
-       blkg_stat_exit(&stats->avg_queue_size_samples);
-       blkg_stat_exit(&stats->dequeue);
-       blkg_stat_exit(&stats->group_wait_time);
-       blkg_stat_exit(&stats->idle_time);
-       blkg_stat_exit(&stats->empty_time);
-}
-
-static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
-{
-       if (blkg_rwstat_init(&stats->merged, gfp) ||
-           blkg_rwstat_init(&stats->service_time, gfp) ||
-           blkg_rwstat_init(&stats->wait_time, gfp) ||
-           blkg_rwstat_init(&stats->queued, gfp) ||
-           blkg_stat_init(&stats->time, gfp) ||
-           blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
-           blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
-           blkg_stat_init(&stats->dequeue, gfp) ||
-           blkg_stat_init(&stats->group_wait_time, gfp) ||
-           blkg_stat_init(&stats->idle_time, gfp) ||
-           blkg_stat_init(&stats->empty_time, gfp)) {
-               bfqg_stats_exit(stats);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
-{
-       return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
-}
-
-static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
-{
-       return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
-}
-
-static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
-{
-       struct bfq_group_data *bgd;
-
-       bgd = kzalloc(sizeof(*bgd), gfp);
-       if (!bgd)
-               return NULL;
-       return &bgd->pd;
-}
-
-static void bfq_cpd_init(struct blkcg_policy_data *cpd)
-{
-       struct bfq_group_data *d = cpd_to_bfqgd(cpd);
-
-       d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
-               CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
-}
-
-static void bfq_cpd_free(struct blkcg_policy_data *cpd)
-{
-       kfree(cpd_to_bfqgd(cpd));
-}
-
-static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
-{
-       struct bfq_group *bfqg;
-
-       bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
-       if (!bfqg)
-               return NULL;
-
-       if (bfqg_stats_init(&bfqg->stats, gfp)) {
-               kfree(bfqg);
-               return NULL;
-       }
-
-       return &bfqg->pd;
-}
-
-static void bfq_pd_init(struct blkg_policy_data *pd)
-{
-       struct blkcg_gq *blkg = pd_to_blkg(pd);
-       struct bfq_group *bfqg = blkg_to_bfqg(blkg);
-       struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
-       struct bfq_entity *entity = &bfqg->entity;
-       struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
-
-       entity->orig_weight = entity->weight = entity->new_weight = d->weight;
-       entity->my_sched_data = &bfqg->sched_data;
-       bfqg->my_entity = entity; /*
-                                  * the root_group's will be set to NULL
-                                  * in bfq_init_queue()
-                                  */
-       bfqg->bfqd = bfqd;
-}
-
-static void bfq_pd_free(struct blkg_policy_data *pd)
-{
-       struct bfq_group *bfqg = pd_to_bfqg(pd);
-
-       bfqg_stats_exit(&bfqg->stats);
-       return kfree(bfqg);
-}
-
-static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
-{
-       struct bfq_group *bfqg = pd_to_bfqg(pd);
-
-       bfqg_stats_reset(&bfqg->stats);
-}
-
-static void bfq_group_set_parent(struct bfq_group *bfqg,
-                                       struct bfq_group *parent)
-{
-       struct bfq_entity *entity;
-
-       entity = &bfqg->entity;
-       entity->parent = parent->my_entity;
-       entity->sched_data = &parent->sched_data;
-}
-
-static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
-                                        struct blkcg *blkcg)
-{
-       struct blkcg_gq *blkg;
-
-       blkg = blkg_lookup(blkcg, bfqd->queue);
-       if (likely(blkg))
-               return blkg_to_bfqg(blkg);
-       return NULL;
-}
-
-static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
-                                           struct blkcg *blkcg)
-{
-       struct bfq_group *bfqg, *parent;
-       struct bfq_entity *entity;
-
-       bfqg = bfq_lookup_bfqg(bfqd, blkcg);
-
-       if (unlikely(!bfqg))
-               return NULL;
-
-       /*
-        * Update chain of bfq_groups as we might be handling a leaf group
-        * which, along with some of its relatives, has not been hooked yet
-        * to the private hierarchy of BFQ.
-        */
-       entity = &bfqg->entity;
-       for_each_entity(entity) {
-               bfqg = container_of(entity, struct bfq_group, entity);
-               if (bfqg != bfqd->root_group) {
-                       parent = bfqg_parent(bfqg);
-                       if (!parent)
-                               parent = bfqd->root_group;
-                       bfq_group_set_parent(bfqg, parent);
-               }
-       }
-
-       return bfqg;
-}
-
-static void bfq_bfqq_expire(struct bfq_data *bfqd,
-                           struct bfq_queue *bfqq,
-                           bool compensate,
-                           enum bfqq_expiration reason);
-
-/**
- * bfq_bfqq_move - migrate @bfqq to @bfqg.
- * @bfqd: queue descriptor.
- * @bfqq: the queue to move.
- * @bfqg: the group to move to.
- *
- * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
- * it on the new one.  Avoid putting the entity on the old group idle tree.
- *
- * Must be called under the queue lock; the cgroup owning @bfqg must
- * not disappear (by now this just means that we are called under
- * rcu_read_lock()).
- */
-static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-                         struct bfq_group *bfqg)
-{
-       struct bfq_entity *entity = &bfqq->entity;
-
-       /* If bfqq is empty, then bfq_bfqq_expire also invokes
-        * bfq_del_bfqq_busy, thereby removing bfqq and its entity
-        * from data structures related to current group. Otherwise we
-        * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
-        * we do below.
-        */
-       if (bfqq == bfqd->in_service_queue)
-               bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
-                               false, BFQQE_PREEMPTED);
-
-       if (bfq_bfqq_busy(bfqq))
-               bfq_deactivate_bfqq(bfqd, bfqq, false, false);
-       else if (entity->on_st)
-               bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
-       bfqg_put(bfqq_group(bfqq));
-
-       /*
-        * Here we use a reference to bfqg.  We don't need a refcounter
-        * as the cgroup reference will not be dropped, so that its
-        * destroy() callback will not be invoked.
-        */
-       entity->parent = bfqg->my_entity;
-       entity->sched_data = &bfqg->sched_data;
-       bfqg_get(bfqg);
-
-       if (bfq_bfqq_busy(bfqq))
-               bfq_activate_bfqq(bfqd, bfqq);
-
-       if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
-               bfq_schedule_dispatch(bfqd);
-}
-
-/**
- * __bfq_bic_change_cgroup - move @bic to @cgroup.
- * @bfqd: the queue descriptor.
- * @bic: the bic to move.
- * @blkcg: the blk-cgroup to move to.
- *
- * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
- * has to make sure that the reference to cgroup is valid across the call.
- *
- * NOTE: an alternative approach might have been to store the current
- * cgroup in bfqq and getting a reference to it, reducing the lookup
- * time here, at the price of slightly more complex code.
- */
-static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
-                                               struct bfq_io_cq *bic,
-                                               struct blkcg *blkcg)
-{
-       struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
-       struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
-       struct bfq_group *bfqg;
-       struct bfq_entity *entity;
-
-       bfqg = bfq_find_set_group(bfqd, blkcg);
-
-       if (unlikely(!bfqg))
-               bfqg = bfqd->root_group;
-
-       if (async_bfqq) {
-               entity = &async_bfqq->entity;
-
-               if (entity->sched_data != &bfqg->sched_data) {
-                       bic_set_bfqq(bic, NULL, 0);
-                       bfq_log_bfqq(bfqd, async_bfqq,
-                                    "bic_change_group: %p %d",
-                                    async_bfqq,
-                                    async_bfqq->ref);
-                       bfq_put_queue(async_bfqq);
-               }
-       }
-
-       if (sync_bfqq) {
-               entity = &sync_bfqq->entity;
-               if (entity->sched_data != &bfqg->sched_data)
-                       bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
-       }
-
-       return bfqg;
-}
-
-static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
-{
-       struct bfq_data *bfqd = bic_to_bfqd(bic);
-       struct bfq_group *bfqg = NULL;
-       uint64_t serial_nr;
-
-       rcu_read_lock();
-       serial_nr = bio_blkcg(bio)->css.serial_nr;
-
-       /*
-        * Check whether blkcg has changed.  The condition may trigger
-        * spuriously on a newly created cic but there's no harm.
-        */
-       if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
-               goto out;
-
-       bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
-       bic->blkcg_serial_nr = serial_nr;
-out:
-       rcu_read_unlock();
-}
-
-/**
- * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
- * @st: the service tree being flushed.
- */
-static void bfq_flush_idle_tree(struct bfq_service_tree *st)
-{
-       struct bfq_entity *entity = st->first_idle;
-
-       for (; entity ; entity = st->first_idle)
-               __bfq_deactivate_entity(entity, false);
-}
-
-/**
- * bfq_reparent_leaf_entity - move leaf entity to the root_group.
- * @bfqd: the device data structure with the root group.
- * @entity: the entity to move.
- */
-static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
-                                    struct bfq_entity *entity)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
-
-       bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
-}
-
-/**
- * bfq_reparent_active_entities - move to the root group all active
- *                                entities.
- * @bfqd: the device data structure with the root group.
- * @bfqg: the group to move from.
- * @st: the service tree with the entities.
- *
- * Needs queue_lock to be taken and reference to be valid over the call.
- */
-static void bfq_reparent_active_entities(struct bfq_data *bfqd,
-                                        struct bfq_group *bfqg,
-                                        struct bfq_service_tree *st)
-{
-       struct rb_root *active = &st->active;
-       struct bfq_entity *entity = NULL;
-
-       if (!RB_EMPTY_ROOT(&st->active))
-               entity = bfq_entity_of(rb_first(active));
-
-       for (; entity ; entity = bfq_entity_of(rb_first(active)))
-               bfq_reparent_leaf_entity(bfqd, entity);
-
-       if (bfqg->sched_data.in_service_entity)
-               bfq_reparent_leaf_entity(bfqd,
-                       bfqg->sched_data.in_service_entity);
-}
-
-/**
- * bfq_pd_offline - deactivate the entity associated with @pd,
- *                 and reparent its children entities.
- * @pd: descriptor of the policy going offline.
- *
- * blkio already grabs the queue_lock for us, so no need to use
- * RCU-based magic
- */
-static void bfq_pd_offline(struct blkg_policy_data *pd)
-{
-       struct bfq_service_tree *st;
-       struct bfq_group *bfqg = pd_to_bfqg(pd);
-       struct bfq_data *bfqd = bfqg->bfqd;
-       struct bfq_entity *entity = bfqg->my_entity;
-       unsigned long flags;
-       int i;
-
-       if (!entity) /* root group */
-               return;
-
-       spin_lock_irqsave(&bfqd->lock, flags);
-       /*
-        * Empty all service_trees belonging to this group before
-        * deactivating the group itself.
-        */
-       for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
-               st = bfqg->sched_data.service_tree + i;
-
-               /*
-                * The idle tree may still contain bfq_queues belonging
-                * to exited task because they never migrated to a different
-                * cgroup from the one being destroyed now.  No one else
-                * can access them so it's safe to act without any lock.
-                */
-               bfq_flush_idle_tree(st);
-
-               /*
-                * It may happen that some queues are still active
-                * (busy) upon group destruction (if the corresponding
-                * processes have been forced to terminate). We move
-                * all the leaf entities corresponding to these queues
-                * to the root_group.
-                * Also, it may happen that the group has an entity
-                * in service, which is disconnected from the active
-                * tree: it must be moved, too.
-                * There is no need to put the sync queues, as the
-                * scheduler has taken no reference.
-                */
-               bfq_reparent_active_entities(bfqd, bfqg, st);
-       }
-
-       __bfq_deactivate_entity(entity, false);
-       bfq_put_async_queues(bfqd, bfqg);
-
-       spin_unlock_irqrestore(&bfqd->lock, flags);
-       /*
-        * @blkg is going offline and will be ignored by
-        * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
-        * that they don't get lost.  If IOs complete after this point, the
-        * stats for them will be lost.  Oh well...
-        */
-       bfqg_stats_xfer_dead(bfqg);
-}
-
-static void bfq_end_wr_async(struct bfq_data *bfqd)
-{
-       struct blkcg_gq *blkg;
-
-       list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
-               struct bfq_group *bfqg = blkg_to_bfqg(blkg);
-
-               bfq_end_wr_async_queues(bfqd, bfqg);
-       }
-       bfq_end_wr_async_queues(bfqd, bfqd->root_group);
-}
-
-static int bfq_io_show_weight(struct seq_file *sf, void *v)
-{
-       struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
-       struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
-       unsigned int val = 0;
-
-       if (bfqgd)
-               val = bfqgd->weight;
-
-       seq_printf(sf, "%u\n", val);
-
-       return 0;
-}
-
-static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
-                                   struct cftype *cftype,
-                                   u64 val)
-{
-       struct blkcg *blkcg = css_to_blkcg(css);
-       struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
-       struct blkcg_gq *blkg;
-       int ret = -ERANGE;
-
-       if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
-               return ret;
-
-       ret = 0;
-       spin_lock_irq(&blkcg->lock);
-       bfqgd->weight = (unsigned short)val;
-       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
-               struct bfq_group *bfqg = blkg_to_bfqg(blkg);
-
-               if (!bfqg)
-                       continue;
-               /*
-                * Setting the prio_changed flag of the entity
-                * to 1 with new_weight == weight would re-set
-                * the value of the weight to its ioprio mapping.
-                * Set the flag only if necessary.
-                */
-               if ((unsigned short)val != bfqg->entity.new_weight) {
-                       bfqg->entity.new_weight = (unsigned short)val;
-                       /*
-                        * Make sure that the above new value has been
-                        * stored in bfqg->entity.new_weight before
-                        * setting the prio_changed flag. In fact,
-                        * this flag may be read asynchronously (in
-                        * critical sections protected by a different
-                        * lock than that held here), and finding this
-                        * flag set may cause the execution of the code
-                        * for updating parameters whose value may
-                        * depend also on bfqg->entity.new_weight (in
-                        * __bfq_entity_update_weight_prio).
-                        * This barrier makes sure that the new value
-                        * of bfqg->entity.new_weight is correctly
-                        * seen in that code.
-                        */
-                       smp_wmb();
-                       bfqg->entity.prio_changed = 1;
-               }
-       }
-       spin_unlock_irq(&blkcg->lock);
-
-       return ret;
-}
-
-static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
-                                char *buf, size_t nbytes,
-                                loff_t off)
-{
-       u64 weight;
-       /* First unsigned long found in the file is used */
-       int ret = kstrtoull(strim(buf), 0, &weight);
-
-       if (ret)
-               return ret;
-
-       return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
-}
-
-static int bfqg_print_stat(struct seq_file *sf, void *v)
-{
-       blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
-                         &blkcg_policy_bfq, seq_cft(sf)->private, false);
-       return 0;
-}
-
-static int bfqg_print_rwstat(struct seq_file *sf, void *v)
-{
-       blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
-                         &blkcg_policy_bfq, seq_cft(sf)->private, true);
-       return 0;
-}
-
-static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
-                                     struct blkg_policy_data *pd, int off)
-{
-       u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
-                                         &blkcg_policy_bfq, off);
-       return __blkg_prfill_u64(sf, pd, sum);
-}
-
-static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
-                                       struct blkg_policy_data *pd, int off)
-{
-       struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
-                                                          &blkcg_policy_bfq,
-                                                          off);
-       return __blkg_prfill_rwstat(sf, pd, &sum);
-}
-
-static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
-{
-       blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
-                         bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
-                         seq_cft(sf)->private, false);
-       return 0;
-}
-
-static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
-{
-       blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
-                         bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
-                         seq_cft(sf)->private, true);
-       return 0;
-}
-
-static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
-                              int off)
-{
-       u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
-
-       return __blkg_prfill_u64(sf, pd, sum >> 9);
-}
+ * following arrays, which entails that they can be initialized only in a
+ * function.
+ */
+static int T_slow[2];
+static int T_fast[2];
+static int device_speed_thresh[2];
 
-static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
-{
-       blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
-                         bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
-       return 0;
-}
+#define RQ_BIC(rq)             ((struct bfq_io_cq *) (rq)->elv.priv[0])
+#define RQ_BFQQ(rq)            ((rq)->elv.priv[1])
 
-static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
-                                        struct blkg_policy_data *pd, int off)
+struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
 {
-       struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
-                                       offsetof(struct blkcg_gq, stat_bytes));
-       u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
-               atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
-
-       return __blkg_prfill_u64(sf, pd, sum >> 9);
+       return bic->bfqq[is_sync];
 }
 
-static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
+void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
 {
-       blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
-                         bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
-                         false);
-       return 0;
+       bic->bfqq[is_sync] = bfqq;
 }
 
-static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
-                                     struct blkg_policy_data *pd, int off)
+struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
 {
-       struct bfq_group *bfqg = pd_to_bfqg(pd);
-       u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
-       u64 v = 0;
-
-       if (samples) {
-               v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
-               v = div64_u64(v, samples);
-       }
-       __blkg_prfill_u64(sf, pd, v);
-       return 0;
+       return bic->icq.q->elevator->elevator_data;
 }
 
-/* print avg_queue_size */
-static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
+/**
+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
+ * @icq: the iocontext queue.
+ */
+static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
 {
-       blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
-                         bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
-                         0, false);
-       return 0;
+       /* bic->icq is the first member, %NULL will convert to %NULL */
+       return container_of(icq, struct bfq_io_cq, icq);
 }
 
-static struct bfq_group *
-bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+/**
+ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
+ * @bfqd: the lookup key.
+ * @ioc: the io_context of the process doing I/O.
+ * @q: the request queue.
+ */
+static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+                                       struct io_context *ioc,
+                                       struct request_queue *q)
 {
-       int ret;
-
-       ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
-       if (ret)
-               return NULL;
-
-       return blkg_to_bfqg(bfqd->queue->root_blkg);
-}
-
-static struct cftype bfq_blkcg_legacy_files[] = {
-       {
-               .name = "bfq.weight",
-               .flags = CFTYPE_NOT_ON_ROOT,
-               .seq_show = bfq_io_show_weight,
-               .write_u64 = bfq_io_set_weight_legacy,
-       },
-
-       /* statistics, covers only the tasks in the bfqg */
-       {
-               .name = "bfq.time",
-               .private = offsetof(struct bfq_group, stats.time),
-               .seq_show = bfqg_print_stat,
-       },
-       {
-               .name = "bfq.sectors",
-               .seq_show = bfqg_print_stat_sectors,
-       },
-       {
-               .name = "bfq.io_service_bytes",
-               .private = (unsigned long)&blkcg_policy_bfq,
-               .seq_show = blkg_print_stat_bytes,
-       },
-       {
-               .name = "bfq.io_serviced",
-               .private = (unsigned long)&blkcg_policy_bfq,
-               .seq_show = blkg_print_stat_ios,
-       },
-       {
-               .name = "bfq.io_service_time",
-               .private = offsetof(struct bfq_group, stats.service_time),
-               .seq_show = bfqg_print_rwstat,
-       },
-       {
-               .name = "bfq.io_wait_time",
-               .private = offsetof(struct bfq_group, stats.wait_time),
-               .seq_show = bfqg_print_rwstat,
-       },
-       {
-               .name = "bfq.io_merged",
-               .private = offsetof(struct bfq_group, stats.merged),
-               .seq_show = bfqg_print_rwstat,
-       },
-       {
-               .name = "bfq.io_queued",
-               .private = offsetof(struct bfq_group, stats.queued),
-               .seq_show = bfqg_print_rwstat,
-       },
-
-       /* the same statictics which cover the bfqg and its descendants */
-       {
-               .name = "bfq.time_recursive",
-               .private = offsetof(struct bfq_group, stats.time),
-               .seq_show = bfqg_print_stat_recursive,
-       },
-       {
-               .name = "bfq.sectors_recursive",
-               .seq_show = bfqg_print_stat_sectors_recursive,
-       },
-       {
-               .name = "bfq.io_service_bytes_recursive",
-               .private = (unsigned long)&blkcg_policy_bfq,
-               .seq_show = blkg_print_stat_bytes_recursive,
-       },
-       {
-               .name = "bfq.io_serviced_recursive",
-               .private = (unsigned long)&blkcg_policy_bfq,
-               .seq_show = blkg_print_stat_ios_recursive,
-       },
-       {
-               .name = "bfq.io_service_time_recursive",
-               .private = offsetof(struct bfq_group, stats.service_time),
-               .seq_show = bfqg_print_rwstat_recursive,
-       },
-       {
-               .name = "bfq.io_wait_time_recursive",
-               .private = offsetof(struct bfq_group, stats.wait_time),
-               .seq_show = bfqg_print_rwstat_recursive,
-       },
-       {
-               .name = "bfq.io_merged_recursive",
-               .private = offsetof(struct bfq_group, stats.merged),
-               .seq_show = bfqg_print_rwstat_recursive,
-       },
-       {
-               .name = "bfq.io_queued_recursive",
-               .private = offsetof(struct bfq_group, stats.queued),
-               .seq_show = bfqg_print_rwstat_recursive,
-       },
-       {
-               .name = "bfq.avg_queue_size",
-               .seq_show = bfqg_print_avg_queue_size,
-       },
-       {
-               .name = "bfq.group_wait_time",
-               .private = offsetof(struct bfq_group, stats.group_wait_time),
-               .seq_show = bfqg_print_stat,
-       },
-       {
-               .name = "bfq.idle_time",
-               .private = offsetof(struct bfq_group, stats.idle_time),
-               .seq_show = bfqg_print_stat,
-       },
-       {
-               .name = "bfq.empty_time",
-               .private = offsetof(struct bfq_group, stats.empty_time),
-               .seq_show = bfqg_print_stat,
-       },
-       {
-               .name = "bfq.dequeue",
-               .private = offsetof(struct bfq_group, stats.dequeue),
-               .seq_show = bfqg_print_stat,
-       },
-       { }     /* terminate */
-};
-
-static struct cftype bfq_blkg_files[] = {
-       {
-               .name = "bfq.weight",
-               .flags = CFTYPE_NOT_ON_ROOT,
-               .seq_show = bfq_io_show_weight,
-               .write = bfq_io_set_weight,
-       },
-       {} /* terminate */
-};
+       if (ioc) {
+               unsigned long flags;
+               struct bfq_io_cq *icq;
 
-#else  /* CONFIG_BFQ_GROUP_IOSCHED */
-
-static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
-                       struct bfq_queue *bfqq, unsigned int op) { }
-static inline void
-bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
-static inline void
-bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
-static inline void bfqg_stats_update_completion(struct bfq_group *bfqg,
-                       uint64_t start_time, uint64_t io_start_time,
-                       unsigned int op) { }
-static inline void
-bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
-                                    struct bfq_group *curr_bfqg) { }
-static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { }
-static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
-static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
-static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
-static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
-static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
-
-static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
-                         struct bfq_group *bfqg) {}
-
-static void bfq_init_entity(struct bfq_entity *entity,
-                           struct bfq_group *bfqg)
-{
-       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
+               spin_lock_irqsave(q->queue_lock, flags);
+               icq = icq_to_bic(ioc_lookup_icq(ioc, q));
+               spin_unlock_irqrestore(q->queue_lock, flags);
 
-       entity->weight = entity->new_weight;
-       entity->orig_weight = entity->new_weight;
-       if (bfqq) {
-               bfqq->ioprio = bfqq->new_ioprio;
-               bfqq->ioprio_class = bfqq->new_ioprio_class;
+               return icq;
        }
-       entity->sched_data = &bfqg->sched_data;
-}
-
-static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
-
-static void bfq_end_wr_async(struct bfq_data *bfqd)
-{
-       bfq_end_wr_async_queues(bfqd, bfqd->root_group);
-}
-
-static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
-                                           struct blkcg *blkcg)
-{
-       return bfqd->root_group;
-}
 
-static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
-{
-       return bfqq->bfqd->root_group;
+       return NULL;
 }
 
-static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd,
-                                                   int node)
+/*
+ * Scheduler run of queue, if there are requests pending and no one in the
+ * driver that will restart queueing.
+ */
+void bfq_schedule_dispatch(struct bfq_data *bfqd)
 {
-       struct bfq_group *bfqg;
-       int i;
-
-       bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
-       if (!bfqg)
-               return NULL;
-
-       for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
-               bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
-
-       return bfqg;
+       if (bfqd->queued != 0) {
+               bfq_log(bfqd, "schedule dispatch");
+               blk_mq_run_hw_queues(bfqd->queue, true);
+       }
 }
-#endif /* CONFIG_BFQ_GROUP_IOSCHED */
 
 #define bfq_class_idle(bfqq)   ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
 #define bfq_class_rt(bfqq)     ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
@@ -3731,6 +397,222 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
        }
 }
 
+static struct bfq_queue *
+bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+                    sector_t sector, struct rb_node **ret_parent,
+                    struct rb_node ***rb_link)
+{
+       struct rb_node **p, *parent;
+       struct bfq_queue *bfqq = NULL;
+
+       parent = NULL;
+       p = &root->rb_node;
+       while (*p) {
+               struct rb_node **n;
+
+               parent = *p;
+               bfqq = rb_entry(parent, struct bfq_queue, pos_node);
+
+               /*
+                * Sort strictly based on sector. Smallest to the left,
+                * largest to the right.
+                */
+               if (sector > blk_rq_pos(bfqq->next_rq))
+                       n = &(*p)->rb_right;
+               else if (sector < blk_rq_pos(bfqq->next_rq))
+                       n = &(*p)->rb_left;
+               else
+                       break;
+               p = n;
+               bfqq = NULL;
+       }
+
+       *ret_parent = parent;
+       if (rb_link)
+               *rb_link = p;
+
+       bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
+               (unsigned long long)sector,
+               bfqq ? bfqq->pid : 0);
+
+       return bfqq;
+}
+
+void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{
+       struct rb_node **p, *parent;
+       struct bfq_queue *__bfqq;
+
+       if (bfqq->pos_root) {
+               rb_erase(&bfqq->pos_node, bfqq->pos_root);
+               bfqq->pos_root = NULL;
+       }
+
+       if (bfq_class_idle(bfqq))
+               return;
+       if (!bfqq->next_rq)
+               return;
+
+       bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
+       __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
+                       blk_rq_pos(bfqq->next_rq), &parent, &p);
+       if (!__bfqq) {
+               rb_link_node(&bfqq->pos_node, parent, p);
+               rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
+       } else
+               bfqq->pos_root = NULL;
+}
+
+/*
+ * Tell whether there are active queues or groups with differentiated weights.
+ */
+static bool bfq_differentiated_weights(struct bfq_data *bfqd)
+{
+       /*
+        * For weights to differ, at least one of the trees must contain
+        * at least two nodes.
+        */
+       return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
+               (bfqd->queue_weights_tree.rb_node->rb_left ||
+                bfqd->queue_weights_tree.rb_node->rb_right)
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+              ) ||
+              (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
+               (bfqd->group_weights_tree.rb_node->rb_left ||
+                bfqd->group_weights_tree.rb_node->rb_right)
+#endif
+              );
+}
+
+/*
+ * The following function returns true if every queue must receive the
+ * same share of the throughput (this condition is used when deciding
+ * whether idling may be disabled, see the comments in the function
+ * bfq_bfqq_may_idle()).
+ *
+ * Such a scenario occurs when:
+ * 1) all active queues have the same weight,
+ * 2) all active groups at the same level in the groups tree have the same
+ *    weight,
+ * 3) all active groups at the same level in the groups tree have the same
+ *    number of children.
+ *
+ * Unfortunately, keeping the necessary state for evaluating exactly the
+ * above symmetry conditions would be quite complex and time-consuming.
+ * Therefore this function evaluates, instead, the following stronger
+ * sub-conditions, for which it is much easier to maintain the needed
+ * state:
+ * 1) all active queues have the same weight,
+ * 2) all active groups have the same weight,
+ * 3) all active groups have at most one active child each.
+ * In particular, the last two conditions are always true if hierarchical
+ * support and the cgroups interface are not enabled, thus no state needs
+ * to be maintained in this case.
+ */
+static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
+{
+       return !bfq_differentiated_weights(bfqd);
+}
+
+/*
+ * If the weight-counter tree passed as input contains no counter for
+ * the weight of the input entity, then add that counter; otherwise just
+ * increment the existing counter.
+ *
+ * Note that weight-counter trees contain few nodes in mostly symmetric
+ * scenarios. For example, if all queues have the same weight, then the
+ * weight-counter tree for the queues may contain at most one node.
+ * This holds even if low_latency is on, because weight-raised queues
+ * are not inserted in the tree.
+ * In most scenarios, the rate at which nodes are created/destroyed
+ * should be low too.
+ */
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
+                         struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /*
+        * Do not insert if the entity is already associated with a
+        * counter, which happens if:
+        *   1) the entity is associated with a queue,
+        *   2) a request arrival has caused the queue to become both
+        *      non-weight-raised, and hence change its weight, and
+        *      backlogged; in this respect, each of the two events
+        *      causes an invocation of this function,
+        *   3) this is the invocation of this function caused by the
+        *      second event. This second invocation is actually useless,
+        *      and we handle this fact by exiting immediately. More
+        *      efficient or clearer solutions might possibly be adopted.
+        */
+       if (entity->weight_counter)
+               return;
+
+       while (*new) {
+               struct bfq_weight_counter *__counter = container_of(*new,
+                                               struct bfq_weight_counter,
+                                               weights_node);
+               parent = *new;
+
+               if (entity->weight == __counter->weight) {
+                       entity->weight_counter = __counter;
+                       goto inc_counter;
+               }
+               if (entity->weight < __counter->weight)
+                       new = &((*new)->rb_left);
+               else
+                       new = &((*new)->rb_right);
+       }
+
+       entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
+                                        GFP_ATOMIC);
+
+       /*
+        * In the unlucky event of an allocation failure, we just
+        * exit. This will cause the weight of entity to not be
+        * considered in bfq_differentiated_weights, which, in its
+        * turn, causes the scenario to be deemed wrongly symmetric in
+        * case entity's weight would have been the only weight making
+        * the scenario asymmetric. On the bright side, no unbalance
+        * will however occur when entity becomes inactive again (the
+        * invocation of this function is triggered by an activation
+        * of entity). In fact, bfq_weights_tree_remove does nothing
+        * if !entity->weight_counter.
+        */
+       if (unlikely(!entity->weight_counter))
+               return;
+
+       entity->weight_counter->weight = entity->weight;
+       rb_link_node(&entity->weight_counter->weights_node, parent, new);
+       rb_insert_color(&entity->weight_counter->weights_node, root);
+
+inc_counter:
+       entity->weight_counter->num_active++;
+}
+
+/*
+ * Decrement the weight counter associated with the entity, and, if the
+ * counter reaches 0, remove the counter from the tree.
+ * See the comments to the function bfq_weights_tree_add() for considerations
+ * about overhead.
+ */
+void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
+                            struct rb_root *root)
+{
+       if (!entity->weight_counter)
+               return;
+
+       entity->weight_counter->num_active--;
+       if (entity->weight_counter->num_active > 0)
+               goto reset_entity_pointer;
+
+       rb_erase(&entity->weight_counter->weights_node, root);
+       kfree(entity->weight_counter);
+
+reset_entity_pointer:
+       entity->weight_counter = NULL;
+}
+
 /*
  * Return expired entry, or NULL to just start from scratch in rbtree.
  */
@@ -3837,6 +719,269 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
        }
 }
 
+static void
+bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
+{
+       if (bic->saved_idle_window)
+               bfq_mark_bfqq_idle_window(bfqq);
+       else
+               bfq_clear_bfqq_idle_window(bfqq);
+
+       if (bic->saved_IO_bound)
+               bfq_mark_bfqq_IO_bound(bfqq);
+       else
+               bfq_clear_bfqq_IO_bound(bfqq);
+
+       bfqq->ttime = bic->saved_ttime;
+       bfqq->wr_coeff = bic->saved_wr_coeff;
+       bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
+       bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
+       bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
+
+       if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
+           time_is_before_jiffies(bfqq->last_wr_start_finish +
+                                  bfqq->wr_cur_max_time))) {
+               bfq_log_bfqq(bfqq->bfqd, bfqq,
+                   "resume state: switching off wr");
+
+               bfqq->wr_coeff = 1;
+       }
+
+       /* make sure weight will be updated, however we got here */
+       bfqq->entity.prio_changed = 1;
+}
+
+static int bfqq_process_refs(struct bfq_queue *bfqq)
+{
+       return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
+}
+
+/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
+static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{
+       struct bfq_queue *item;
+       struct hlist_node *n;
+
+       hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
+               hlist_del_init(&item->burst_list_node);
+       hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
+       bfqd->burst_size = 1;
+       bfqd->burst_parent_entity = bfqq->entity.parent;
+}
+
+/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
+static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{
+       /* Increment burst size to take into account also bfqq */
+       bfqd->burst_size++;
+
+       if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
+               struct bfq_queue *pos, *bfqq_item;
+               struct hlist_node *n;
+
+               /*
+                * Enough queues have been activated shortly after each
+                * other to consider this burst as large.
+                */
+               bfqd->large_burst = true;
+
+               /*
+                * We can now mark all queues in the burst list as
+                * belonging to a large burst.
+                */
+               hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
+                                    burst_list_node)
+                       bfq_mark_bfqq_in_large_burst(bfqq_item);
+               bfq_mark_bfqq_in_large_burst(bfqq);
+
+               /*
+                * From now on, and until the current burst finishes, any
+                * new queue being activated shortly after the last queue
+                * was inserted in the burst can be immediately marked as
+                * belonging to a large burst. So the burst list is not
+                * needed any more. Remove it.
+                */
+               hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
+                                         burst_list_node)
+                       hlist_del_init(&pos->burst_list_node);
+       } else /*
+               * Burst not yet large: add bfqq to the burst list. Do
+               * not increment the ref counter for bfqq, because bfqq
+               * is removed from the burst list before freeing bfqq
+               * in put_queue.
+               */
+               hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
+}
+
+/*
+ * If many queues belonging to the same group happen to be created
+ * shortly after each other, then the processes associated with these
+ * queues have typically a common goal. In particular, bursts of queue
+ * creations are usually caused by services or applications that spawn
+ * many parallel threads/processes. Examples are systemd during boot,
+ * or git grep. To help these processes get their job done as soon as
+ * possible, it is usually better to not grant either weight-raising
+ * or device idling to their queues.
+ *
+ * In this comment we describe, firstly, the reasons why this fact
+ * holds, and, secondly, the next function, which implements the main
+ * steps needed to properly mark these queues so that they can then be
+ * treated in a different way.
+ *
+ * The above services or applications benefit mostly from a high
+ * throughput: the quicker the requests of the activated queues are
+ * cumulatively served, the sooner the target job of these queues gets
+ * completed. As a consequence, weight-raising any of these queues,
+ * which also implies idling the device for it, is almost always
+ * counterproductive. In most cases it just lowers throughput.
+ *
+ * On the other hand, a burst of queue creations may be caused also by
+ * the start of an application that does not consist of a lot of
+ * parallel I/O-bound threads. In fact, with a complex application,
+ * several short processes may need to be executed to start-up the
+ * application. In this respect, to start an application as quickly as
+ * possible, the best thing to do is in any case to privilege the I/O
+ * related to the application with respect to all other
+ * I/O. Therefore, the best strategy to start as quickly as possible
+ * an application that causes a burst of queue creations is to
+ * weight-raise all the queues created during the burst. This is the
+ * exact opposite of the best strategy for the other type of bursts.
+ *
+ * In the end, to take the best action for each of the two cases, the
+ * two types of bursts need to be distinguished. Fortunately, this
+ * seems relatively easy, by looking at the sizes of the bursts. In
+ * particular, we found a threshold such that only bursts with a
+ * larger size than that threshold are apparently caused by
+ * services or commands such as systemd or git grep. For brevity,
+ * hereafter we call just 'large' these bursts. BFQ *does not*
+ * weight-raise queues whose creation occurs in a large burst. In
+ * addition, for each of these queues BFQ performs or does not perform
+ * idling depending on which choice boosts the throughput more. The
+ * exact choice depends on the device and request pattern at
+ * hand.
+ *
+ * Unfortunately, false positives may occur while an interactive task
+ * is starting (e.g., an application is being started). The
+ * consequence is that the queues associated with the task do not
+ * enjoy weight raising as expected. Fortunately these false positives
+ * are very rare. They typically occur if some service happens to
+ * start doing I/O exactly when the interactive task starts.
+ *
+ * Turning back to the next function, it implements all the steps
+ * needed to detect the occurrence of a large burst and to properly
+ * mark all the queues belonging to it (so that they can then be
+ * treated in a different way). This goal is achieved by maintaining a
+ * "burst list" that holds, temporarily, the queues that belong to the
+ * burst in progress. The list is then used to mark these queues as
+ * belonging to a large burst if the burst does become large. The main
+ * steps are the following.
+ *
+ * . when the very first queue is created, the queue is inserted into the
+ *   list (as it could be the first queue in a possible burst)
+ *
+ * . if the current burst has not yet become large, and a queue Q that does
+ *   not yet belong to the burst is activated shortly after the last time
+ *   at which a new queue entered the burst list, then the function appends
+ *   Q to the burst list
+ *
+ * . if, as a consequence of the previous step, the burst size reaches
+ *   the large-burst threshold, then
+ *
+ *     . all the queues in the burst list are marked as belonging to a
+ *       large burst
+ *
+ *     . the burst list is deleted; in fact, the burst list already served
+ *       its purpose (keeping temporarily track of the queues in a burst,
+ *       so as to be able to mark them as belonging to a large burst in the
+ *       previous sub-step), and now is not needed any more
+ *
+ *     . the device enters a large-burst mode
+ *
+ * . if a queue Q that does not belong to the burst is created while
+ *   the device is in large-burst mode and shortly after the last time
+ *   at which a queue either entered the burst list or was marked as
+ *   belonging to the current large burst, then Q is immediately marked
+ *   as belonging to a large burst.
+ *
+ * . if a queue Q that does not belong to the burst is created a while
+ *   later, i.e., not shortly after, than the last time at which a queue
+ *   either entered the burst list or was marked as belonging to the
+ *   current large burst, then the current burst is deemed as finished and:
+ *
+ *        . the large-burst mode is reset if set
+ *
+ *        . the burst list is emptied
+ *
+ *        . Q is inserted in the burst list, as Q may be the first queue
+ *          in a possible new burst (then the burst list contains just Q
+ *          after this step).
+ */
+static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{
+       /*
+        * If bfqq is already in the burst list or is part of a large
+        * burst, or finally has just been split, then there is
+        * nothing else to do.
+        */
+       if (!hlist_unhashed(&bfqq->burst_list_node) ||
+           bfq_bfqq_in_large_burst(bfqq) ||
+           time_is_after_eq_jiffies(bfqq->split_time +
+                                    msecs_to_jiffies(10)))
+               return;
+
+       /*
+        * If bfqq's creation happens late enough, or bfqq belongs to
+        * a different group than the burst group, then the current
+        * burst is finished, and related data structures must be
+        * reset.
+        *
+        * In this respect, consider the special case where bfqq is
+        * the very first queue created after BFQ is selected for this
+        * device. In this case, last_ins_in_burst and
+        * burst_parent_entity are not yet significant when we get
+        * here. But it is easy to verify that, whether or not the
+        * following condition is true, bfqq will end up being
+        * inserted into the burst list. In particular the list will
+        * happen to contain only bfqq. And this is exactly what has
+        * to happen, as bfqq may be the first queue of the first
+        * burst.
+        */
+       if (time_is_before_jiffies(bfqd->last_ins_in_burst +
+           bfqd->bfq_burst_interval) ||
+           bfqq->entity.parent != bfqd->burst_parent_entity) {
+               bfqd->large_burst = false;
+               bfq_reset_burst_list(bfqd, bfqq);
+               goto end;
+       }
+
+       /*
+        * If we get here, then bfqq is being activated shortly after the
+        * last queue. So, if the current burst is also large, we can mark
+        * bfqq as belonging to this large burst immediately.
+        */
+       if (bfqd->large_burst) {
+               bfq_mark_bfqq_in_large_burst(bfqq);
+               goto end;
+       }
+
+       /*
+        * If we get here, then a large-burst state has not yet been
+        * reached, but bfqq is being activated shortly after the last
+        * queue. Then we add bfqq to the burst.
+        */
+       bfq_add_to_burst(bfqd, bfqq);
+end:
+       /*
+        * At this point, bfqq either has been added to the current
+        * burst or has caused the current burst to terminate and a
+        * possible new burst to start. In particular, in the second
+        * case, bfqq has become the first queue in the possible new
+        * burst.  In both cases last_ins_in_burst needs to be moved
+        * forward.
+        */
+       bfqd->last_ins_in_burst = jiffies;
+}
+
 static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
 {
        struct bfq_entity *entity = &bfqq->entity;
@@ -3869,11 +1014,6 @@ static int bfq_min_budget(struct bfq_data *bfqd)
                return bfqd->bfq_max_budget / 32;
 }
 
-static void bfq_bfqq_expire(struct bfq_data *bfqd,
-                           struct bfq_queue *bfqq,
-                           bool compensate,
-                           enum bfqq_expiration reason);
-
 /*
  * The next function, invoked after the input queue bfqq switches from
  * idle to busy, updates the budget of bfqq. The function also tells
@@ -4050,6 +1190,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
                                             unsigned int old_wr_coeff,
                                             bool wr_or_deserves_wr,
                                             bool interactive,
+                                            bool in_burst,
                                             bool soft_rt)
 {
        if (old_wr_coeff == 1 && wr_or_deserves_wr) {
@@ -4081,7 +1222,9 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
                if (interactive) { /* update wr coeff and duration */
                        bfqq->wr_coeff = bfqd->bfq_wr_coeff;
                        bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
-               } else if (soft_rt) {
+               } else if (in_burst)
+                       bfqq->wr_coeff = 1;
+               else if (soft_rt) {
                        /*
                         * The application is now or still meeting the
                         * requirements for being deemed soft rt.  We
@@ -4141,7 +1284,8 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
                                             struct request *rq,
                                             bool *interactive)
 {
-       bool soft_rt, wr_or_deserves_wr, bfqq_wants_to_preempt,
+       bool soft_rt, in_burst, wr_or_deserves_wr,
+               bfqq_wants_to_preempt,
                idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
                /*
                 * See the comments on
@@ -4157,14 +1301,19 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
        /*
         * bfqq deserves to be weight-raised if:
         * - it is sync,
-        * - it has been idle for enough time or is soft real-time.
+        * - it does not belong to a large burst,
+        * - it has been idle for enough time or is soft real-time,
+        * - is linked to a bfq_io_cq (it is not shared in any sense).
         */
+       in_burst = bfq_bfqq_in_large_burst(bfqq);
        soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
+               !in_burst &&
                time_is_before_jiffies(bfqq->soft_rt_next_start);
-       *interactive = idle_for_long_time;
+       *interactive = !in_burst && idle_for_long_time;
        wr_or_deserves_wr = bfqd->low_latency &&
                (bfqq->wr_coeff > 1 ||
-                (bfq_bfqq_sync(bfqq) && (*interactive || soft_rt)));
+                (bfq_bfqq_sync(bfqq) &&
+                 bfqq->bic && (*interactive || soft_rt)));
 
        /*
         * Using the last flag, update budget and check whether bfqq
@@ -4175,6 +1324,31 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
                                                    arrived_in_time,
                                                    wr_or_deserves_wr);
 
+       /*
+        * If bfqq happened to be activated in a burst, but has been
+        * idle for much more than an interactive queue, then we
+        * assume that, in the overall I/O initiated in the burst, the
+        * I/O associated with bfqq is finished. So bfqq does not need
+        * to be treated as a queue belonging to a burst
+        * anymore. Accordingly, we reset bfqq's in_large_burst flag
+        * if set, and remove bfqq from the burst list if it's
+        * there. We do not decrement burst_size, because the fact
+        * that bfqq does not need to belong to the burst list any
+        * more does not invalidate the fact that bfqq was created in
+        * a burst.
+        */
+       if (likely(!bfq_bfqq_just_created(bfqq)) &&
+           idle_for_long_time &&
+           time_is_before_jiffies(
+                   bfqq->budget_timeout +
+                   msecs_to_jiffies(10000))) {
+               hlist_del_init(&bfqq->burst_list_node);
+               bfq_clear_bfqq_in_large_burst(bfqq);
+       }
+
+       bfq_clear_bfqq_just_created(bfqq);
+
+
        if (!bfq_bfqq_IO_bound(bfqq)) {
                if (arrived_in_time) {
                        bfqq->requests_within_timer++;
@@ -4186,14 +1360,23 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
        }
 
        if (bfqd->low_latency) {
-               bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
-                                                old_wr_coeff,
-                                                wr_or_deserves_wr,
-                                                *interactive,
-                                                soft_rt);
-
-               if (old_wr_coeff != bfqq->wr_coeff)
-                       bfqq->entity.prio_changed = 1;
+               if (unlikely(time_is_after_jiffies(bfqq->split_time)))
+                       /* wraparound */
+                       bfqq->split_time =
+                               jiffies - bfqd->bfq_wr_min_idle_time - 1;
+
+               if (time_is_before_jiffies(bfqq->split_time +
+                                          bfqd->bfq_wr_min_idle_time)) {
+                       bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
+                                                        old_wr_coeff,
+                                                        wr_or_deserves_wr,
+                                                        *interactive,
+                                                        in_burst,
+                                                        soft_rt);
+
+                       if (old_wr_coeff != bfqq->wr_coeff)
+                               bfqq->entity.prio_changed = 1;
+               }
        }
 
        bfqq->last_idle_bklogged = jiffies;
@@ -4240,6 +1423,12 @@ static void bfq_add_request(struct request *rq)
        next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
        bfqq->next_rq = next_rq;
 
+       /*
+        * Adjust priority tree position, if next_rq changes.
+        */
+       if (prev != bfqq->next_rq)
+               bfq_pos_tree_add_move(bfqd, bfqq);
+
        if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
                bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
                                                 rq, &interactive);
@@ -4368,6 +1557,14 @@ static void bfq_remove_request(struct request_queue *q,
                         */
                        bfqq->entity.budget = bfqq->entity.service = 0;
                }
+
+               /*
+                * Remove queue from request-position tree as it is empty.
+                */
+               if (bfqq->pos_root) {
+                       rb_erase(&bfqq->pos_node, bfqq->pos_root);
+                       bfqq->pos_root = NULL;
+               }
        }
 
        if (rq->cmd_flags & REQ_META)
@@ -4445,11 +1642,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
                                         bfqd->last_position);
                bfqq->next_rq = next_rq;
                /*
-                * If next_rq changes, update the queue's budget to fit
-                * the new request.
+                * If next_rq changes, update both the queue's budget to
+                * fit the new request and the queue's position in its
+                * rq_pos_tree.
                 */
-               if (prev != bfqq->next_rq)
+               if (prev != bfqq->next_rq) {
                        bfq_updated_next_req(bfqd, bfqq);
+                       bfq_pos_tree_add_move(bfqd, bfqq);
+               }
        }
 }
 
@@ -4504,8 +1704,8 @@ static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
        bfqq->entity.prio_changed = 1;
 }
 
-static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
-                                   struct bfq_group *bfqg)
+void bfq_end_wr_async_queues(struct bfq_data *bfqd,
+                            struct bfq_group *bfqg)
 {
        int i, j;
 
@@ -4532,12 +1732,349 @@ static void bfq_end_wr(struct bfq_data *bfqd)
        spin_unlock_irq(&bfqd->lock);
 }
 
+static sector_t bfq_io_struct_pos(void *io_struct, bool request)
+{
+       if (request)
+               return blk_rq_pos(io_struct);
+       else
+               return ((struct bio *)io_struct)->bi_iter.bi_sector;
+}
+
+static int bfq_rq_close_to_sector(void *io_struct, bool request,
+                                 sector_t sector)
+{
+       return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
+              BFQQ_CLOSE_THR;
+}
+
+static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
+                                        struct bfq_queue *bfqq,
+                                        sector_t sector)
+{
+       struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
+       struct rb_node *parent, *node;
+       struct bfq_queue *__bfqq;
+
+       if (RB_EMPTY_ROOT(root))
+               return NULL;
+
+       /*
+        * First, if we find a request starting at the end of the last
+        * request, choose it.
+        */
+       __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
+       if (__bfqq)
+               return __bfqq;
+
+       /*
+        * If the exact sector wasn't found, the parent of the NULL leaf
+        * will contain the closest sector (rq_pos_tree sorted by
+        * next_request position).
+        */
+       __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
+       if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
+               return __bfqq;
+
+       if (blk_rq_pos(__bfqq->next_rq) < sector)
+               node = rb_next(&__bfqq->pos_node);
+       else
+               node = rb_prev(&__bfqq->pos_node);
+       if (!node)
+               return NULL;
+
+       __bfqq = rb_entry(node, struct bfq_queue, pos_node);
+       if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
+               return __bfqq;
+
+       return NULL;
+}
+
+static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
+                                                  struct bfq_queue *cur_bfqq,
+                                                  sector_t sector)
+{
+       struct bfq_queue *bfqq;
+
+       /*
+        * We shall notice if some of the queues are cooperating,
+        * e.g., working closely on the same area of the device. In
+        * that case, we can group them together and: 1) don't waste
+        * time idling, and 2) serve the union of their requests in
+        * the best possible order for throughput.
+        */
+       bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
+       if (!bfqq || bfqq == cur_bfqq)
+               return NULL;
+
+       return bfqq;
+}
+
+static struct bfq_queue *
+bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+{
+       int process_refs, new_process_refs;
+       struct bfq_queue *__bfqq;
+
+       /*
+        * If there are no process references on the new_bfqq, then it is
+        * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
+        * may have dropped their last reference (not just their last process
+        * reference).
+        */
+       if (!bfqq_process_refs(new_bfqq))
+               return NULL;
+
+       /* Avoid a circular list and skip interim queue merges. */
+       while ((__bfqq = new_bfqq->new_bfqq)) {
+               if (__bfqq == bfqq)
+                       return NULL;
+               new_bfqq = __bfqq;
+       }
+
+       process_refs = bfqq_process_refs(bfqq);
+       new_process_refs = bfqq_process_refs(new_bfqq);
+       /*
+        * If the process for the bfqq has gone away, there is no
+        * sense in merging the queues.
+        */
+       if (process_refs == 0 || new_process_refs == 0)
+               return NULL;
+
+       bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
+               new_bfqq->pid);
+
+       /*
+        * Merging is just a redirection: the requests of the process
+        * owning one of the two queues are redirected to the other queue.
+        * The latter queue, in its turn, is set as shared if this is the
+        * first time that the requests of some process are redirected to
+        * it.
+        *
+        * We redirect bfqq to new_bfqq and not the opposite, because
+        * we are in the context of the process owning bfqq, thus we
+        * have the io_cq of this process. So we can immediately
+        * configure this io_cq to redirect the requests of the
+        * process to new_bfqq. In contrast, the io_cq of new_bfqq is
+        * not available any more (new_bfqq->bic == NULL).
+        *
+        * Anyway, even in case new_bfqq coincides with the in-service
+        * queue, redirecting requests the in-service queue is the
+        * best option, as we feed the in-service queue with new
+        * requests close to the last request served and, by doing so,
+        * are likely to increase the throughput.
+        */
+       bfqq->new_bfqq = new_bfqq;
+       new_bfqq->ref += process_refs;
+       return new_bfqq;
+}
+
+static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+                                       struct bfq_queue *new_bfqq)
+{
+       if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
+           (bfqq->ioprio_class != new_bfqq->ioprio_class))
+               return false;
+
+       /*
+        * If either of the queues has already been detected as seeky,
+        * then merging it with the other queue is unlikely to lead to
+        * sequential I/O.
+        */
+       if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
+               return false;
+
+       /*
+        * Interleaved I/O is known to be done by (some) applications
+        * only for reads, so it does not make sense to merge async
+        * queues.
+        */
+       if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
+               return false;
+
+       return true;
+}
+
+/*
+ * If this function returns true, then bfqq cannot be merged. The idea
+ * is that true cooperation happens very early after processes start
+ * to do I/O. Usually, late cooperations are just accidental false
+ * positives. In case bfqq is weight-raised, such false positives
+ * would evidently degrade latency guarantees for bfqq.
+ */
+static bool wr_from_too_long(struct bfq_queue *bfqq)
+{
+       return bfqq->wr_coeff > 1 &&
+               time_is_before_jiffies(bfqq->last_wr_start_finish +
+                                      msecs_to_jiffies(100));
+}
+
+/*
+ * Attempt to schedule a merge of bfqq with the currently in-service
+ * queue or with a close queue among the scheduled queues.  Return
+ * NULL if no merge was scheduled, a pointer to the shared bfq_queue
+ * structure otherwise.
+ *
+ * The OOM queue is not allowed to participate to cooperation: in fact, since
+ * the requests temporarily redirected to the OOM queue could be redirected
+ * again to dedicated queues at any time, the state needed to correctly
+ * handle merging with the OOM queue would be quite complex and expensive
+ * to maintain. Besides, in such a critical condition as an out of memory,
+ * the benefits of queue merging may be little relevant, or even negligible.
+ *
+ * Weight-raised queues can be merged only if their weight-raising
+ * period has just started. In fact cooperating processes are usually
+ * started together. Thus, with this filter we avoid false positives
+ * that would jeopardize low-latency guarantees.
+ *
+ * WARNING: queue merging may impair fairness among non-weight raised
+ * queues, for at least two reasons: 1) the original weight of a
+ * merged queue may change during the merged state, 2) even being the
+ * weight the same, a merged queue may be bloated with many more
+ * requests than the ones produced by its originally-associated
+ * process.
+ */
+static struct bfq_queue *
+bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+                    void *io_struct, bool request)
+{
+       struct bfq_queue *in_service_bfqq, *new_bfqq;
+
+       if (bfqq->new_bfqq)
+               return bfqq->new_bfqq;
+
+       if (!io_struct ||
+           wr_from_too_long(bfqq) ||
+           unlikely(bfqq == &bfqd->oom_bfqq))
+               return NULL;
+
+       /* If there is only one backlogged queue, don't search. */
+       if (bfqd->busy_queues == 1)
+               return NULL;
+
+       in_service_bfqq = bfqd->in_service_queue;
+
+       if (!in_service_bfqq || in_service_bfqq == bfqq
+           || wr_from_too_long(in_service_bfqq) ||
+           unlikely(in_service_bfqq == &bfqd->oom_bfqq))
+               goto check_scheduled;
+
+       if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+           bfqq->entity.parent == in_service_bfqq->entity.parent &&
+           bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
+               new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
+               if (new_bfqq)
+                       return new_bfqq;
+       }
+       /*
+        * Check whether there is a cooperator among currently scheduled
+        * queues. The only thing we need is that the bio/request is not
+        * NULL, as we need it to establish whether a cooperator exists.
+        */
+check_scheduled:
+       new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
+                       bfq_io_struct_pos(io_struct, request));
+
+       if (new_bfqq && !wr_from_too_long(new_bfqq) &&
+           likely(new_bfqq != &bfqd->oom_bfqq) &&
+           bfq_may_be_close_cooperator(bfqq, new_bfqq))
+               return bfq_setup_merge(bfqq, new_bfqq);
+
+       return NULL;
+}
+
+static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+{
+       struct bfq_io_cq *bic = bfqq->bic;
+
+       /*
+        * If !bfqq->bic, the queue is already shared or its requests
+        * have already been redirected to a shared queue; both idle window
+        * and weight raising state have already been saved. Do nothing.
+        */
+       if (!bic)
+               return;
+
+       bic->saved_ttime = bfqq->ttime;
+       bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
+       bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+       bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+       bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+       bic->saved_wr_coeff = bfqq->wr_coeff;
+       bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
+       bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
+       bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
+}
+
+static void
+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+               struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+{
+       bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+               (unsigned long)new_bfqq->pid);
+       /* Save weight raising and idle window of the merged queues */
+       bfq_bfqq_save_state(bfqq);
+       bfq_bfqq_save_state(new_bfqq);
+       if (bfq_bfqq_IO_bound(bfqq))
+               bfq_mark_bfqq_IO_bound(new_bfqq);
+       bfq_clear_bfqq_IO_bound(bfqq);
+
+       /*
+        * If bfqq is weight-raised, then let new_bfqq inherit
+        * weight-raising. To reduce false positives, neglect the case
+        * where bfqq has just been created, but has not yet made it
+        * to be weight-raised (which may happen because EQM may merge
+        * bfqq even before bfq_add_request is executed for the first
+        * time for bfqq). Handling this case would however be very
+        * easy, thanks to the flag just_created.
+        */
+       if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
+               new_bfqq->wr_coeff = bfqq->wr_coeff;
+               new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
+               new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
+               new_bfqq->wr_start_at_switch_to_srt =
+                       bfqq->wr_start_at_switch_to_srt;
+               if (bfq_bfqq_busy(new_bfqq))
+                       bfqd->wr_busy_queues++;
+               new_bfqq->entity.prio_changed = 1;
+       }
+
+       if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
+               bfqq->wr_coeff = 1;
+               bfqq->entity.prio_changed = 1;
+               if (bfq_bfqq_busy(bfqq))
+                       bfqd->wr_busy_queues--;
+       }
+
+       bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
+                    bfqd->wr_busy_queues);
+
+       /*
+        * Merge queues (that is, let bic redirect its requests to new_bfqq)
+        */
+       bic_set_bfqq(bic, new_bfqq, 1);
+       bfq_mark_bfqq_coop(new_bfqq);
+       /*
+        * new_bfqq now belongs to at least two bics (it is a shared queue):
+        * set new_bfqq->bic to NULL. bfqq either:
+        * - does not belong to any bic any more, and hence bfqq->bic must
+        *   be set to NULL, or
+        * - is a queue whose owning bics have already been redirected to a
+        *   different queue, hence the queue is destined to not belong to
+        *   any bic soon and bfqq->bic is already NULL (therefore the next
+        *   assignment causes no harm).
+        */
+       new_bfqq->bic = NULL;
+       bfqq->bic = NULL;
+       /* release process reference to bfqq */
+       bfq_put_queue(bfqq);
+}
+
 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
                                struct bio *bio)
 {
        struct bfq_data *bfqd = q->elevator->elevator_data;
        bool is_sync = op_is_sync(bio->bi_opf);
-       struct bfq_queue *bfqq = bfqd->bio_bfqq;
+       struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
 
        /*
         * Disallow merge of a sync bio into an async request.
@@ -4552,6 +2089,37 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
        if (!bfqq)
                return false;
 
+       /*
+        * We take advantage of this function to perform an early merge
+        * of the queues of possible cooperating processes.
+        */
+       new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
+       if (new_bfqq) {
+               /*
+                * bic still points to bfqq, then it has not yet been
+                * redirected to some other bfq_queue, and a queue
+                * merge beween bfqq and new_bfqq can be safely
+                * fulfillled, i.e., bic can be redirected to new_bfqq
+                * and bfqq can be put.
+                */
+               bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
+                               new_bfqq);
+               /*
+                * If we get here, bio will be queued into new_queue,
+                * so use new_bfqq to decide whether bio and rq can be
+                * merged.
+                */
+               bfqq = new_bfqq;
+
+               /*
+                * Change also bqfd->bio_bfqq, as
+                * bfqd->bio_bic now points to new_bfqq, and
+                * this function may be invoked again (and then may
+                * use again bqfd->bio_bfqq).
+                */
+               bfqd->bio_bfqq = bfqq;
+       }
+
        return bfqq == RQ_BFQQ(rq);
 }
 
@@ -4645,14 +2213,8 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
 static void bfq_arm_slice_timer(struct bfq_data *bfqd)
 {
        struct bfq_queue *bfqq = bfqd->in_service_queue;
-       struct bfq_io_cq *bic;
        u32 sl;
 
-       /* Processes have exited, don't wait. */
-       bic = bfqd->in_service_bic;
-       if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
-               return;
-
        bfq_mark_bfqq_wait_request(bfqq);
 
        /*
@@ -4662,13 +2224,17 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
         */
        sl = bfqd->bfq_slice_idle;
        /*
-        * Unless the queue is being weight-raised, grant only minimum
-        * idle time if the queue is seeky. A long idling is preserved
-        * for a weight-raised queue, because it is needed for
-        * guaranteeing to the queue its reserved share of the
-        * throughput.
+        * Unless the queue is being weight-raised or the scenario is
+        * asymmetric, grant only minimum idle time if the queue
+        * is seeky. A long idling is preserved for a weight-raised
+        * queue, or, more in general, in an asymmetric scenario,
+        * because a long idling is needed for guaranteeing to a queue
+        * its reserved share of the throughput (in particular, it is
+        * needed if the queue has a higher weight than some other
+        * queue).
         */
-       if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1)
+       if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
+           bfq_symmetric_scenario(bfqd))
                sl = min_t(u64, sl, BFQ_MIN_TT);
 
        bfqd->last_idling_start = ktime_get();
@@ -4959,6 +2525,15 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
 
 static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 {
+       /*
+        * If this bfqq is shared between multiple processes, check
+        * to make sure that those processes are still issuing I/Os
+        * within the mean seek distance. If not, it may be time to
+        * break the queues apart again.
+        */
+       if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
+               bfq_mark_bfqq_split_coop(bfqq);
+
        if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
                if (bfqq->dispatched == 0)
                        /*
@@ -4970,8 +2545,13 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
                        bfqq->budget_timeout = jiffies;
 
                bfq_del_bfqq_busy(bfqd, bfqq, true);
-       } else
+       } else {
                bfq_requeue_bfqq(bfqd, bfqq);
+               /*
+                * Resort priority tree of potential close cooperators.
+                */
+               bfq_pos_tree_add_move(bfqd, bfqq);
+       }
 
        /*
         * All in-service entities must have been properly deactivated
@@ -5344,10 +2924,10 @@ static unsigned long bfq_smallest_from_now(void)
  * former on a timeslice basis, without violating service domain
  * guarantees among the latter.
  */
-static void bfq_bfqq_expire(struct bfq_data *bfqd,
-                           struct bfq_queue *bfqq,
-                           bool compensate,
-                           enum bfqq_expiration reason)
+void bfq_bfqq_expire(struct bfq_data *bfqd,
+                    struct bfq_queue *bfqq,
+                    bool compensate,
+                    enum bfqq_expiration reason)
 {
        bool slow;
        unsigned long delta = 0;
@@ -5518,6 +3098,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
 {
        struct bfq_data *bfqd = bfqq->bfqd;
        bool idling_boosts_thr, idling_boosts_thr_without_issues,
+               idling_needed_for_service_guarantees,
                asymmetric_scenario;
 
        if (bfqd->strict_guarantees)
@@ -5527,18 +3108,24 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
         * The next variable takes into account the cases where idling
         * boosts the throughput.
         *
-        * The value of the variable is computed considering that
-        * idling is usually beneficial for the throughput if:
+        * The value of the variable is computed considering, first, that
+        * idling is virtually always beneficial for the throughput if:
         * (a) the device is not NCQ-capable, or
-        * (b) regardless of the presence of NCQ, the request pattern
-        *     for bfqq is I/O-bound (possible throughput losses
-        *     caused by granting idling to seeky queues are mitigated
-        *     by the fact that, in all scenarios where boosting
-        *     throughput is the best thing to do, i.e., in all
-        *     symmetric scenarios, only a minimal idle time is
-        *     allowed to seeky queues).
+        * (b) regardless of the presence of NCQ, the device is rotational
+        *     and the request pattern for bfqq is I/O-bound and sequential.
+        *
+        * Secondly, and in contrast to the above item (b), idling an
+        * NCQ-capable flash-based device would not boost the
+        * throughput even with sequential I/O; rather it would lower
+        * the throughput in proportion to how fast the device
+        * is. Accordingly, the next variable is true if any of the
+        * above conditions (a) and (b) is true, and, in particular,
+        * happens to be false if bfqd is an NCQ-capable flash-based
+        * device.
         */
-       idling_boosts_thr = !bfqd->hw_tag || bfq_bfqq_IO_bound(bfqq);
+       idling_boosts_thr = !bfqd->hw_tag ||
+               (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
+                bfq_bfqq_idle_window(bfqq));
 
        /*
         * The value of the next variable,
@@ -5579,14 +3166,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
                bfqd->wr_busy_queues == 0;
 
        /*
-        * There is then a case where idling must be performed not for
-        * throughput concerns, but to preserve service guarantees. To
-        * introduce it, we can note that allowing the drive to
-        * enqueue more than one request at a time, and hence
+        * There is then a case where idling must be performed not
+        * for throughput concerns, but to preserve service
+        * guarantees.
+        *
+        * To introduce this case, we can note that allowing the drive
+        * to enqueue more than one request at a time, and hence
         * delegating de facto final scheduling decisions to the
-        * drive's internal scheduler, causes loss of control on the
+        * drive's internal scheduler, entails loss of control on the
         * actual request service order. In particular, the critical
-        * situation is when requests from different processes happens
+        * situation is when requests from different processes happen
         * to be present, at the same time, in the internal queue(s)
         * of the drive. In such a situation, the drive, by deciding
         * the service order of the internally-queued requests, does
@@ -5597,51 +3186,114 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
         * the service distribution enforced by the drive's internal
         * scheduler is likely to coincide with the desired
         * device-throughput distribution only in a completely
-        * symmetric scenario where: (i) each of these processes must
-        * get the same throughput as the others; (ii) all these
-        * processes have the same I/O pattern (either sequential or
-        * random).  In fact, in such a scenario, the drive will tend
-        * to treat the requests of each of these processes in about
-        * the same way as the requests of the others, and thus to
-        * provide each of these processes with about the same
-        * throughput (which is exactly the desired throughput
-        * distribution). In contrast, in any asymmetric scenario,
-        * device idling is certainly needed to guarantee that bfqq
-        * receives its assigned fraction of the device throughput
-        * (see [1] for details).
+        * symmetric scenario where:
+        * (i)  each of these processes must get the same throughput as
+        *      the others;
+        * (ii) all these processes have the same I/O pattern
+               (either sequential or random).
+        * In fact, in such a scenario, the drive will tend to treat
+        * the requests of each of these processes in about the same
+        * way as the requests of the others, and thus to provide
+        * each of these processes with about the same throughput
+        * (which is exactly the desired throughput distribution). In
+        * contrast, in any asymmetric scenario, device idling is
+        * certainly needed to guarantee that bfqq receives its
+        * assigned fraction of the device throughput (see [1] for
+        * details).
+        *
+        * We address this issue by controlling, actually, only the
+        * symmetry sub-condition (i), i.e., provided that
+        * sub-condition (i) holds, idling is not performed,
+        * regardless of whether sub-condition (ii) holds. In other
+        * words, only if sub-condition (i) holds, then idling is
+        * allowed, and the device tends to be prevented from queueing
+        * many requests, possibly of several processes. The reason
+        * for not controlling also sub-condition (ii) is that we
+        * exploit preemption to preserve guarantees in case of
+        * symmetric scenarios, even if (ii) does not hold, as
+        * explained in the next two paragraphs.
         *
-        * As for sub-condition (i), actually we check only whether
-        * bfqq is being weight-raised. In fact, if bfqq is not being
-        * weight-raised, we have that:
-        * - if the process associated with bfqq is not I/O-bound, then
-        *   it is not either latency- or throughput-critical; therefore
-        *   idling is not needed for bfqq;
-        * - if the process asociated with bfqq is I/O-bound, then
-        *   idling is already granted with bfqq (see the comments on
-        *   idling_boosts_thr).
+        * Even if a queue, say Q, is expired when it remains idle, Q
+        * can still preempt the new in-service queue if the next
+        * request of Q arrives soon (see the comments on
+        * bfq_bfqq_update_budg_for_activation). If all queues and
+        * groups have the same weight, this form of preemption,
+        * combined with the hole-recovery heuristic described in the
+        * comments on function bfq_bfqq_update_budg_for_activation,
+        * are enough to preserve a correct bandwidth distribution in
+        * the mid term, even without idling. In fact, even if not
+        * idling allows the internal queues of the device to contain
+        * many requests, and thus to reorder requests, we can rather
+        * safely assume that the internal scheduler still preserves a
+        * minimum of mid-term fairness. The motivation for using
+        * preemption instead of idling is that, by not idling,
+        * service guarantees are preserved without minimally
+        * sacrificing throughput. In other words, both a high
+        * throughput and its desired distribution are obtained.
         *
-        * We do not check sub-condition (ii) at all, i.e., the next
-        * variable is true if and only if bfqq is being
-        * weight-raised. We do not need to control sub-condition (ii)
-        * for the following reason:
-        * - if bfqq is being weight-raised, then idling is already
-        *   guaranteed to bfqq by sub-condition (i);
-        * - if bfqq is not being weight-raised, then idling is
-        *   already guaranteed to bfqq (only) if it matters, i.e., if
-        *   bfqq is associated to a currently I/O-bound process (see
-        *   the above comment on sub-condition (i)).
+        * More precisely, this preemption-based, idleless approach
+        * provides fairness in terms of IOPS, and not sectors per
+        * second. This can be seen with a simple example. Suppose
+        * that there are two queues with the same weight, but that
+        * the first queue receives requests of 8 sectors, while the
+        * second queue receives requests of 1024 sectors. In
+        * addition, suppose that each of the two queues contains at
+        * most one request at a time, which implies that each queue
+        * always remains idle after it is served. Finally, after
+        * remaining idle, each queue receives very quickly a new
+        * request. It follows that the two queues are served
+        * alternatively, preempting each other if needed. This
+        * implies that, although both queues have the same weight,
+        * the queue with large requests receives a service that is
+        * 1024/8 times as high as the service received by the other
+        * queue.
+        *
+        * On the other hand, device idling is performed, and thus
+        * pure sector-domain guarantees are provided, for the
+        * following queues, which are likely to need stronger
+        * throughput guarantees: weight-raised queues, and queues
+        * with a higher weight than other queues. When such queues
+        * are active, sub-condition (i) is false, which triggers
+        * device idling.
+        *
+        * According to the above considerations, the next variable is
+        * true (only) if sub-condition (i) holds. To compute the
+        * value of this variable, we not only use the return value of
+        * the function bfq_symmetric_scenario(), but also check
+        * whether bfqq is being weight-raised, because
+        * bfq_symmetric_scenario() does not take into account also
+        * weight-raised queues (see comments on
+        * bfq_weights_tree_add()).
         *
         * As a side note, it is worth considering that the above
         * device-idling countermeasures may however fail in the
         * following unlucky scenario: if idling is (correctly)
-        * disabled in a time period during which the symmetry
-        * sub-condition holds, and hence the device is allowed to
+        * disabled in a time period during which all symmetry
+        * sub-conditions hold, and hence the device is allowed to
         * enqueue many requests, but at some later point in time some
         * sub-condition stops to hold, then it may become impossible
         * to let requests be served in the desired order until all
         * the requests already queued in the device have been served.
         */
-       asymmetric_scenario = bfqq->wr_coeff > 1;
+       asymmetric_scenario = bfqq->wr_coeff > 1 ||
+               !bfq_symmetric_scenario(bfqd);
+
+       /*
+        * Finally, there is a case where maximizing throughput is the
+        * best choice even if it may cause unfairness toward
+        * bfqq. Such a case is when bfqq became active in a burst of
+        * queue activations. Queues that became active during a large
+        * burst benefit only from throughput, as discussed in the
+        * comments on bfq_handle_burst. Thus, if bfqq became active
+        * in a burst and not idling the device maximizes throughput,
+        * then the device must no be idled, because not idling the
+        * device provides bfqq and all other queues in the burst with
+        * maximum benefit. Combining this and the above case, we can
+        * now establish when idling is actually needed to preserve
+        * service guarantees.
+        */
+       idling_needed_for_service_guarantees =
+               asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
 
        /*
         * We have now all the components we need to compute the return
@@ -5652,7 +3304,8 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
         *    is necessary to preserve service guarantees.
         */
        return bfq_bfqq_sync(bfqq) &&
-               (idling_boosts_thr_without_issues || asymmetric_scenario);
+               (idling_boosts_thr_without_issues ||
+                idling_needed_for_service_guarantees);
 }
 
 /*
@@ -5791,15 +3444,17 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
                        bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
 
                /*
-                * If too much time has elapsed from the beginning of
-                * this weight-raising period, then end weight
-                * raising.
+                * If the queue was activated in a burst, or too much
+                * time has elapsed from the beginning of this
+                * weight-raising period, then end weight raising.
                 */
-               if (time_is_before_jiffies(bfqq->last_wr_start_finish +
-                                          bfqq->wr_cur_max_time)) {
+               if (bfq_bfqq_in_large_burst(bfqq))
+                       bfq_bfqq_end_wr(bfqq);
+               else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
+                                               bfqq->wr_cur_max_time)) {
                        if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
                        time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
-                                                  bfq_wr_duration(bfqd)))
+                                              bfq_wr_duration(bfqd)))
                                bfq_bfqq_end_wr(bfqq);
                        else {
                                /* switch back to interactive wr */
@@ -5846,11 +3501,6 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
         */
        bfq_update_wr_data(bfqd, bfqq);
 
-       if (!bfqd->in_service_bic) {
-               atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
-               bfqd->in_service_bic = RQ_BIC(rq);
-       }
-
        /*
         * Expire bfqq, pretending that its budget expired, if bfqq
         * belongs to CLASS_IDLE and other queues are waiting for
@@ -5969,6 +3619,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
        struct request *rq;
 
        spin_lock_irq(&bfqd->lock);
+
        rq = __bfq_dispatch_request(hctx);
        spin_unlock_irq(&bfqd->lock);
 
@@ -5982,7 +3633,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
  * Scheduler lock must be held here. Recall not to use bfqq after calling
  * this function on it.
  */
-static void bfq_put_queue(struct bfq_queue *bfqq)
+void bfq_put_queue(struct bfq_queue *bfqq)
 {
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
        struct bfq_group *bfqg = bfqq_group(bfqq);
@@ -5996,7 +3647,16 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
        if (bfqq->ref)
                return;
 
-       bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+       if (bfq_bfqq_sync(bfqq))
+               /*
+                * The fact that this queue is being destroyed does not
+                * invalidate the fact that this queue may have been
+                * activated during the current burst. As a consequence,
+                * although the queue does not exist anymore, and hence
+                * needs to be removed from the burst list if there,
+                * the burst size has not to be decremented.
+                */
+               hlist_del_init(&bfqq->burst_list_node);
 
        kmem_cache_free(bfq_pool, bfqq);
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -6004,6 +3664,25 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
 #endif
 }
 
+static void bfq_put_cooperator(struct bfq_queue *bfqq)
+{
+       struct bfq_queue *__bfqq, *next;
+
+       /*
+        * If this queue was scheduled to merge with another queue, be
+        * sure to drop the reference taken on that queue (and others in
+        * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
+        */
+       __bfqq = bfqq->new_bfqq;
+       while (__bfqq) {
+               if (__bfqq == bfqq)
+                       break;
+               next = __bfqq->new_bfqq;
+               bfq_put_queue(__bfqq);
+               __bfqq = next;
+       }
+}
+
 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 {
        if (bfqq == bfqd->in_service_queue) {
@@ -6013,6 +3692,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 
        bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
 
+       bfq_put_cooperator(bfqq);
+
        bfq_put_queue(bfqq); /* release process reference */
 }
 
@@ -6030,7 +3711,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
                spin_lock_irqsave(&bfqd->lock, flags);
                bfq_exit_bfqq(bfqd, bfqq);
                bic_set_bfqq(bic, NULL, is_sync);
-               spin_unlock_irq(&bfqd->lock);
+               spin_unlock_irqrestore(&bfqd->lock, flags);
        }
 }
 
@@ -6093,6 +3774,10 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
        bfqq->entity.prio_changed = 1;
 }
 
+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+                                      struct bio *bio, bool is_sync,
+                                      struct bfq_io_cq *bic);
+
 static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
 {
        struct bfq_data *bfqd = bic_to_bfqd(bic);
@@ -6126,6 +3811,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 {
        RB_CLEAR_NODE(&bfqq->entity.rb_node);
        INIT_LIST_HEAD(&bfqq->fifo);
+       INIT_HLIST_NODE(&bfqq->burst_list_node);
 
        bfqq->ref = 0;
        bfqq->bfqd = bfqd;
@@ -6137,6 +3823,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                if (!bfq_class_idle(bfqq))
                        bfq_mark_bfqq_idle_window(bfqq);
                bfq_mark_bfqq_sync(bfqq);
+               bfq_mark_bfqq_just_created(bfqq);
        } else
                bfq_clear_bfqq_sync(bfqq);
 
@@ -6152,8 +3839,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
        bfqq->budget_timeout = bfq_smallest_from_now();
 
        bfqq->wr_coeff = 1;
-       bfqq->last_wr_start_finish = bfq_smallest_from_now();
+       bfqq->last_wr_start_finish = jiffies;
        bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
+       bfqq->split_time = bfq_smallest_from_now();
 
        /*
         * Set to the value for which bfqq will not be deemed as
@@ -6288,6 +3976,11 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
        if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
                return;
 
+       /* Idle window just restored, statistics are meaningless. */
+       if (time_is_after_eq_jiffies(bfqq->split_time +
+                                    bfqd->bfq_wr_min_idle_time))
+               return;
+
        enable_idle = bfq_bfqq_idle_window(bfqq);
 
        if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
@@ -6383,7 +4076,39 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 
 static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
 {
-       struct bfq_queue *bfqq = RQ_BFQQ(rq);
+       struct bfq_queue *bfqq = RQ_BFQQ(rq),
+               *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
+
+       if (new_bfqq) {
+               if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
+                       new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
+               /*
+                * Release the request's reference to the old bfqq
+                * and make sure one is taken to the shared queue.
+                */
+               new_bfqq->allocated++;
+               bfqq->allocated--;
+               new_bfqq->ref++;
+               bfq_clear_bfqq_just_created(bfqq);
+               /*
+                * If the bic associated with the process
+                * issuing this request still points to bfqq
+                * (and thus has not been already redirected
+                * to new_bfqq or even some other bfq_queue),
+                * then complete the merge and redirect it to
+                * new_bfqq.
+                */
+               if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+                       bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+                                       bfqq, new_bfqq);
+               /*
+                * rq is about to be enqueued into new_bfqq,
+                * release rq reference on bfqq
+                */
+               bfq_put_queue(bfqq);
+               rq->elv.priv[1] = new_bfqq;
+               bfqq = new_bfqq;
+       }
 
        bfq_add_request(rq);
 
@@ -6483,6 +4208,9 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
                 * mechanism).
                 */
                bfqq->budget_timeout = jiffies;
+
+               bfq_weights_tree_remove(bfqd, &bfqq->entity,
+                                       &bfqd->queue_weights_tree);
        }
 
        now_ns = ktime_get_ns();
@@ -6599,6 +4327,65 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
        rq->elv.priv[1] = NULL;
 }
 
+/*
+ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
+ * was the last process referring to that bfqq.
+ */
+static struct bfq_queue *
+bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+{
+       bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
+
+       if (bfqq_process_refs(bfqq) == 1) {
+               bfqq->pid = current->pid;
+               bfq_clear_bfqq_coop(bfqq);
+               bfq_clear_bfqq_split_coop(bfqq);
+               return bfqq;
+       }
+
+       bic_set_bfqq(bic, NULL, 1);
+
+       bfq_put_cooperator(bfqq);
+
+       bfq_put_queue(bfqq);
+       return NULL;
+}
+
+static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
+                                                  struct bfq_io_cq *bic,
+                                                  struct bio *bio,
+                                                  bool split, bool is_sync,
+                                                  bool *new_queue)
+{
+       struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
+
+       if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
+               return bfqq;
+
+       if (new_queue)
+               *new_queue = true;
+
+       if (bfqq)
+               bfq_put_queue(bfqq);
+       bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
+
+       bic_set_bfqq(bic, bfqq, is_sync);
+       if (split && is_sync) {
+               if ((bic->was_in_burst_list && bfqd->large_burst) ||
+                   bic->saved_in_large_burst)
+                       bfq_mark_bfqq_in_large_burst(bfqq);
+               else {
+                       bfq_clear_bfqq_in_large_burst(bfqq);
+                       if (bic->was_in_burst_list)
+                               hlist_add_head(&bfqq->burst_list_node,
+                                              &bfqd->burst_list);
+               }
+               bfqq->split_time = jiffies;
+       }
+
+       return bfqq;
+}
+
 /*
  * Allocate bfq data structures associated with this request.
  */
@@ -6609,6 +4396,8 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
        struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
        const int is_sync = rq_is_sync(rq);
        struct bfq_queue *bfqq;
+       bool new_queue = false;
+       bool split = false;
 
        spin_lock_irq(&bfqd->lock);
 
@@ -6619,12 +4408,26 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
 
        bfq_bic_update_cgroup(bic, bio);
 
-       bfqq = bic_to_bfqq(bic, is_sync);
-       if (!bfqq || bfqq == &bfqd->oom_bfqq) {
-               if (bfqq)
-                       bfq_put_queue(bfqq);
-               bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
-               bic_set_bfqq(bic, bfqq, is_sync);
+       bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
+                                        &new_queue);
+
+       if (likely(!new_queue)) {
+               /* If the queue was seeky for too long, break it apart. */
+               if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
+                       bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
+
+                       /* Update bic before losing reference to bfqq */
+                       if (bfq_bfqq_in_large_burst(bfqq))
+                               bic->saved_in_large_burst = true;
+
+                       bfqq = bfq_split_bfqq(bic, bfqq);
+                       split = true;
+
+                       if (!bfqq)
+                               bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
+                                                                true, is_sync,
+                                                                NULL);
+               }
        }
 
        bfqq->allocated++;
@@ -6635,6 +4438,27 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
        rq->elv.priv[0] = bic;
        rq->elv.priv[1] = bfqq;
 
+       /*
+        * If a bfq_queue has only one process reference, it is owned
+        * by only this bic: we can then set bfqq->bic = bic. in
+        * addition, if the queue has also just been split, we have to
+        * resume its state.
+        */
+       if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
+               bfqq->bic = bic;
+               if (split) {
+                       /*
+                        * The queue has just been split from a shared
+                        * queue: restore the idle window and the
+                        * possible weight raising period.
+                        */
+                       bfq_bfqq_resume_state(bfqq, bic);
+               }
+       }
+
+       if (unlikely(bfq_bfqq_just_created(bfqq)))
+               bfq_handle_burst(bfqd, bfqq);
+
        spin_unlock_irq(&bfqd->lock);
 
        return 0;
@@ -6730,7 +4554,7 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
  * we reparent them to the root cgroup (i.e., the only one that will
  * exist for sure until all the requests on a device are gone).
  */
-static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
+void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
 {
        int i, j;
 
@@ -6777,6 +4601,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
        root_group->my_entity = NULL;
        root_group->bfqd = bfqd;
 #endif
+       root_group->rq_pos_tree = RB_ROOT;
        for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
                root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
        root_group->sched_data.bfq_class_idle_last_service = jiffies;
@@ -6813,6 +4638,10 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
        bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
        bfqd->oom_bfqq.entity.new_weight =
                bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
+
+       /* oom_bfqq does not participate to bursts */
+       bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
+
        /*
         * Trigger weight initialization, according to ioprio, at the
         * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
@@ -6828,8 +4657,12 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
                     HRTIMER_MODE_REL);
        bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
 
+       bfqd->queue_weights_tree = RB_ROOT;
+       bfqd->group_weights_tree = RB_ROOT;
+
        INIT_LIST_HEAD(&bfqd->active_list);
        INIT_LIST_HEAD(&bfqd->idle_list);
+       INIT_HLIST_HEAD(&bfqd->burst_list);
 
        bfqd->hw_tag = -1;
 
@@ -6844,6 +4677,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
 
        bfqd->bfq_requests_within_timer = 120;
 
+       bfqd->bfq_large_burst_thresh = 8;
+       bfqd->bfq_burst_interval = msecs_to_jiffies(180);
+
        bfqd->low_latency = true;
 
        /*
@@ -7134,24 +4970,6 @@ static struct elevator_type iosched_bfq_mq = {
        .elevator_owner =       THIS_MODULE,
 };
 
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-static struct blkcg_policy blkcg_policy_bfq = {
-       .dfl_cftypes            = bfq_blkg_files,
-       .legacy_cftypes         = bfq_blkcg_legacy_files,
-
-       .cpd_alloc_fn           = bfq_cpd_alloc,
-       .cpd_init_fn            = bfq_cpd_init,
-       .cpd_bind_fn            = bfq_cpd_init,
-       .cpd_free_fn            = bfq_cpd_free,
-
-       .pd_alloc_fn            = bfq_pd_alloc,
-       .pd_init_fn             = bfq_pd_init,
-       .pd_offline_fn          = bfq_pd_offline,
-       .pd_free_fn             = bfq_pd_free,
-       .pd_reset_stats_fn      = bfq_pd_reset_stats,
-};
-#endif
-
 static int __init bfq_init(void)
 {
        int ret;