2 * Functions related to sysfs handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/blktrace_api.h>
12 struct queue_sysfs_entry
{
13 struct attribute attr
;
14 ssize_t (*show
)(struct request_queue
*, char *);
15 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
19 queue_var_show(unsigned int var
, char *page
)
21 return sprintf(page
, "%d\n", var
);
25 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
27 char *p
= (char *) page
;
29 *var
= simple_strtoul(p
, &p
, 10);
33 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
35 return queue_var_show(q
->nr_requests
, (page
));
39 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
41 struct request_list
*rl
= &q
->rq
;
43 int ret
= queue_var_store(&nr
, page
, count
);
44 if (nr
< BLKDEV_MIN_RQ
)
47 spin_lock_irq(q
->queue_lock
);
49 blk_queue_congestion_threshold(q
);
51 if (rl
->count
[BLK_RW_SYNC
] >= queue_congestion_on_threshold(q
))
52 blk_set_queue_congested(q
, BLK_RW_SYNC
);
53 else if (rl
->count
[BLK_RW_SYNC
] < queue_congestion_off_threshold(q
))
54 blk_clear_queue_congested(q
, BLK_RW_SYNC
);
56 if (rl
->count
[BLK_RW_ASYNC
] >= queue_congestion_on_threshold(q
))
57 blk_set_queue_congested(q
, BLK_RW_ASYNC
);
58 else if (rl
->count
[BLK_RW_ASYNC
] < queue_congestion_off_threshold(q
))
59 blk_clear_queue_congested(q
, BLK_RW_ASYNC
);
61 if (rl
->count
[BLK_RW_SYNC
] >= q
->nr_requests
) {
62 blk_set_queue_full(q
, BLK_RW_SYNC
);
63 } else if (rl
->count
[BLK_RW_SYNC
]+1 <= q
->nr_requests
) {
64 blk_clear_queue_full(q
, BLK_RW_SYNC
);
65 wake_up(&rl
->wait
[BLK_RW_SYNC
]);
68 if (rl
->count
[BLK_RW_ASYNC
] >= q
->nr_requests
) {
69 blk_set_queue_full(q
, BLK_RW_ASYNC
);
70 } else if (rl
->count
[BLK_RW_ASYNC
]+1 <= q
->nr_requests
) {
71 blk_clear_queue_full(q
, BLK_RW_ASYNC
);
72 wake_up(&rl
->wait
[BLK_RW_ASYNC
]);
74 spin_unlock_irq(q
->queue_lock
);
78 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
80 int ra_kb
= q
->backing_dev_info
.ra_pages
<< (PAGE_CACHE_SHIFT
- 10);
82 return queue_var_show(ra_kb
, (page
));
86 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
89 ssize_t ret
= queue_var_store(&ra_kb
, page
, count
);
91 q
->backing_dev_info
.ra_pages
= ra_kb
>> (PAGE_CACHE_SHIFT
- 10);
96 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
98 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
100 return queue_var_show(max_sectors_kb
, (page
));
103 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
105 return queue_var_show(queue_logical_block_size(q
), page
);
109 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
111 unsigned long max_sectors_kb
,
112 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
113 page_kb
= 1 << (PAGE_CACHE_SHIFT
- 10);
114 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
116 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
119 spin_lock_irq(q
->queue_lock
);
120 blk_queue_max_sectors(q
, max_sectors_kb
<< 1);
121 spin_unlock_irq(q
->queue_lock
);
126 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
128 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
130 return queue_var_show(max_hw_sectors_kb
, (page
));
133 static ssize_t
queue_nonrot_show(struct request_queue
*q
, char *page
)
135 return queue_var_show(!blk_queue_nonrot(q
), page
);
138 static ssize_t
queue_nonrot_store(struct request_queue
*q
, const char *page
,
142 ssize_t ret
= queue_var_store(&nm
, page
, count
);
144 spin_lock_irq(q
->queue_lock
);
146 queue_flag_clear(QUEUE_FLAG_NONROT
, q
);
148 queue_flag_set(QUEUE_FLAG_NONROT
, q
);
149 spin_unlock_irq(q
->queue_lock
);
154 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
156 return queue_var_show(blk_queue_nomerges(q
), page
);
159 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
163 ssize_t ret
= queue_var_store(&nm
, page
, count
);
165 spin_lock_irq(q
->queue_lock
);
167 queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
169 queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
170 spin_unlock_irq(q
->queue_lock
);
175 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
177 unsigned int set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
179 return queue_var_show(set
!= 0, page
);
183 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
185 ssize_t ret
= -EINVAL
;
186 #if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
189 ret
= queue_var_store(&val
, page
, count
);
190 spin_lock_irq(q
->queue_lock
);
192 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
194 queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
195 spin_unlock_irq(q
->queue_lock
);
200 static ssize_t
queue_iostats_show(struct request_queue
*q
, char *page
)
202 return queue_var_show(blk_queue_io_stat(q
), page
);
205 static ssize_t
queue_iostats_store(struct request_queue
*q
, const char *page
,
209 ssize_t ret
= queue_var_store(&stats
, page
, count
);
211 spin_lock_irq(q
->queue_lock
);
213 queue_flag_set(QUEUE_FLAG_IO_STAT
, q
);
215 queue_flag_clear(QUEUE_FLAG_IO_STAT
, q
);
216 spin_unlock_irq(q
->queue_lock
);
221 static struct queue_sysfs_entry queue_requests_entry
= {
222 .attr
= {.name
= "nr_requests", .mode
= S_IRUGO
| S_IWUSR
},
223 .show
= queue_requests_show
,
224 .store
= queue_requests_store
,
227 static struct queue_sysfs_entry queue_ra_entry
= {
228 .attr
= {.name
= "read_ahead_kb", .mode
= S_IRUGO
| S_IWUSR
},
229 .show
= queue_ra_show
,
230 .store
= queue_ra_store
,
233 static struct queue_sysfs_entry queue_max_sectors_entry
= {
234 .attr
= {.name
= "max_sectors_kb", .mode
= S_IRUGO
| S_IWUSR
},
235 .show
= queue_max_sectors_show
,
236 .store
= queue_max_sectors_store
,
239 static struct queue_sysfs_entry queue_max_hw_sectors_entry
= {
240 .attr
= {.name
= "max_hw_sectors_kb", .mode
= S_IRUGO
},
241 .show
= queue_max_hw_sectors_show
,
244 static struct queue_sysfs_entry queue_iosched_entry
= {
245 .attr
= {.name
= "scheduler", .mode
= S_IRUGO
| S_IWUSR
},
246 .show
= elv_iosched_show
,
247 .store
= elv_iosched_store
,
250 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
251 .attr
= {.name
= "hw_sector_size", .mode
= S_IRUGO
},
252 .show
= queue_logical_block_size_show
,
255 static struct queue_sysfs_entry queue_logical_block_size_entry
= {
256 .attr
= {.name
= "logical_block_size", .mode
= S_IRUGO
},
257 .show
= queue_logical_block_size_show
,
260 static struct queue_sysfs_entry queue_nonrot_entry
= {
261 .attr
= {.name
= "rotational", .mode
= S_IRUGO
| S_IWUSR
},
262 .show
= queue_nonrot_show
,
263 .store
= queue_nonrot_store
,
266 static struct queue_sysfs_entry queue_nomerges_entry
= {
267 .attr
= {.name
= "nomerges", .mode
= S_IRUGO
| S_IWUSR
},
268 .show
= queue_nomerges_show
,
269 .store
= queue_nomerges_store
,
272 static struct queue_sysfs_entry queue_rq_affinity_entry
= {
273 .attr
= {.name
= "rq_affinity", .mode
= S_IRUGO
| S_IWUSR
},
274 .show
= queue_rq_affinity_show
,
275 .store
= queue_rq_affinity_store
,
278 static struct queue_sysfs_entry queue_iostats_entry
= {
279 .attr
= {.name
= "iostats", .mode
= S_IRUGO
| S_IWUSR
},
280 .show
= queue_iostats_show
,
281 .store
= queue_iostats_store
,
284 static struct attribute
*default_attrs
[] = {
285 &queue_requests_entry
.attr
,
286 &queue_ra_entry
.attr
,
287 &queue_max_hw_sectors_entry
.attr
,
288 &queue_max_sectors_entry
.attr
,
289 &queue_iosched_entry
.attr
,
290 &queue_hw_sector_size_entry
.attr
,
291 &queue_logical_block_size_entry
.attr
,
292 &queue_nonrot_entry
.attr
,
293 &queue_nomerges_entry
.attr
,
294 &queue_rq_affinity_entry
.attr
,
295 &queue_iostats_entry
.attr
,
299 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
302 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
304 struct queue_sysfs_entry
*entry
= to_queue(attr
);
305 struct request_queue
*q
=
306 container_of(kobj
, struct request_queue
, kobj
);
311 mutex_lock(&q
->sysfs_lock
);
312 if (test_bit(QUEUE_FLAG_DEAD
, &q
->queue_flags
)) {
313 mutex_unlock(&q
->sysfs_lock
);
316 res
= entry
->show(q
, page
);
317 mutex_unlock(&q
->sysfs_lock
);
322 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
323 const char *page
, size_t length
)
325 struct queue_sysfs_entry
*entry
= to_queue(attr
);
326 struct request_queue
*q
;
332 q
= container_of(kobj
, struct request_queue
, kobj
);
333 mutex_lock(&q
->sysfs_lock
);
334 if (test_bit(QUEUE_FLAG_DEAD
, &q
->queue_flags
)) {
335 mutex_unlock(&q
->sysfs_lock
);
338 res
= entry
->store(q
, page
, length
);
339 mutex_unlock(&q
->sysfs_lock
);
344 * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
345 * @kobj: the kobj belonging of the request queue to be released
348 * blk_cleanup_queue is the pair to blk_init_queue() or
349 * blk_queue_make_request(). It should be called when a request queue is
350 * being released; typically when a block device is being de-registered.
351 * Currently, its primary task it to free all the &struct request
352 * structures that were allocated to the queue and the queue itself.
355 * Hopefully the low level driver will have finished any
356 * outstanding requests first...
358 static void blk_release_queue(struct kobject
*kobj
)
360 struct request_queue
*q
=
361 container_of(kobj
, struct request_queue
, kobj
);
362 struct request_list
*rl
= &q
->rq
;
367 mempool_destroy(rl
->rq_pool
);
370 __blk_queue_free_tags(q
);
372 blk_trace_shutdown(q
);
374 bdi_destroy(&q
->backing_dev_info
);
375 kmem_cache_free(blk_requestq_cachep
, q
);
378 static struct sysfs_ops queue_sysfs_ops
= {
379 .show
= queue_attr_show
,
380 .store
= queue_attr_store
,
383 struct kobj_type blk_queue_ktype
= {
384 .sysfs_ops
= &queue_sysfs_ops
,
385 .default_attrs
= default_attrs
,
386 .release
= blk_release_queue
,
389 int blk_register_queue(struct gendisk
*disk
)
393 struct request_queue
*q
= disk
->queue
;
398 ret
= kobject_add(&q
->kobj
, kobject_get(&disk_to_dev(disk
)->kobj
),
403 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
408 ret
= elv_register_queue(q
);
410 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
411 kobject_del(&q
->kobj
);
418 void blk_unregister_queue(struct gendisk
*disk
)
420 struct request_queue
*q
= disk
->queue
;
426 elv_unregister_queue(q
);
428 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
429 kobject_del(&q
->kobj
);
430 kobject_put(&disk_to_dev(disk
)->kobj
);