1 #include <linux/module.h>
3 #include <linux/moduleparam.h>
4 #include <linux/sched.h>
6 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/blk-mq.h>
10 #include <linux/hrtimer.h>
13 struct list_head list
;
14 struct llist_node ll_list
;
15 struct call_single_data csd
;
19 struct nullb_queue
*nq
;
23 unsigned long *tag_map
;
24 wait_queue_head_t wait
;
25 unsigned int queue_depth
;
27 struct nullb_cmd
*cmds
;
31 struct list_head list
;
33 struct request_queue
*q
;
35 struct blk_mq_tag_set tag_set
;
37 unsigned int queue_depth
;
40 struct nullb_queue
*queues
;
41 unsigned int nr_queues
;
44 static LIST_HEAD(nullb_list
);
45 static struct mutex lock
;
46 static int null_major
;
47 static int nullb_indexes
;
49 struct completion_queue
{
50 struct llist_head list
;
55 * These are per-cpu for now, they will need to be configured by the
56 * complete_queues parameter and appropriately mapped.
58 static DEFINE_PER_CPU(struct completion_queue
, completion_queues
);
72 static int submit_queues
;
73 module_param(submit_queues
, int, S_IRUGO
);
74 MODULE_PARM_DESC(submit_queues
, "Number of submission queues");
76 static int home_node
= NUMA_NO_NODE
;
77 module_param(home_node
, int, S_IRUGO
);
78 MODULE_PARM_DESC(home_node
, "Home node for the device");
80 static int queue_mode
= NULL_Q_MQ
;
81 module_param(queue_mode
, int, S_IRUGO
);
82 MODULE_PARM_DESC(use_mq
, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
85 module_param(gb
, int, S_IRUGO
);
86 MODULE_PARM_DESC(gb
, "Size in GB");
89 module_param(bs
, int, S_IRUGO
);
90 MODULE_PARM_DESC(bs
, "Block size (in bytes)");
92 static int nr_devices
= 2;
93 module_param(nr_devices
, int, S_IRUGO
);
94 MODULE_PARM_DESC(nr_devices
, "Number of devices to register");
96 static int irqmode
= NULL_IRQ_SOFTIRQ
;
97 module_param(irqmode
, int, S_IRUGO
);
98 MODULE_PARM_DESC(irqmode
, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
100 static int completion_nsec
= 10000;
101 module_param(completion_nsec
, int, S_IRUGO
);
102 MODULE_PARM_DESC(completion_nsec
, "Time in ns to complete a request in hardware. Default: 10,000ns");
104 static int hw_queue_depth
= 64;
105 module_param(hw_queue_depth
, int, S_IRUGO
);
106 MODULE_PARM_DESC(hw_queue_depth
, "Queue depth for each hardware queue. Default: 64");
108 static bool use_per_node_hctx
= false;
109 module_param(use_per_node_hctx
, bool, S_IRUGO
);
110 MODULE_PARM_DESC(use_per_node_hctx
, "Use per-node allocation for hardware context queues. Default: false");
112 static void put_tag(struct nullb_queue
*nq
, unsigned int tag
)
114 clear_bit_unlock(tag
, nq
->tag_map
);
116 if (waitqueue_active(&nq
->wait
))
120 static unsigned int get_tag(struct nullb_queue
*nq
)
125 tag
= find_first_zero_bit(nq
->tag_map
, nq
->queue_depth
);
126 if (tag
>= nq
->queue_depth
)
128 } while (test_and_set_bit_lock(tag
, nq
->tag_map
));
133 static void free_cmd(struct nullb_cmd
*cmd
)
135 put_tag(cmd
->nq
, cmd
->tag
);
138 static struct nullb_cmd
*__alloc_cmd(struct nullb_queue
*nq
)
140 struct nullb_cmd
*cmd
;
145 cmd
= &nq
->cmds
[tag
];
154 static struct nullb_cmd
*alloc_cmd(struct nullb_queue
*nq
, int can_wait
)
156 struct nullb_cmd
*cmd
;
159 cmd
= __alloc_cmd(nq
);
160 if (cmd
|| !can_wait
)
164 prepare_to_wait(&nq
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
165 cmd
= __alloc_cmd(nq
);
172 finish_wait(&nq
->wait
, &wait
);
176 static void end_cmd(struct nullb_cmd
*cmd
)
178 switch (queue_mode
) {
180 blk_mq_end_io(cmd
->rq
, 0);
183 INIT_LIST_HEAD(&cmd
->rq
->queuelist
);
184 blk_end_request_all(cmd
->rq
, 0);
187 bio_endio(cmd
->bio
, 0);
194 static enum hrtimer_restart
null_cmd_timer_expired(struct hrtimer
*timer
)
196 struct completion_queue
*cq
;
197 struct llist_node
*entry
;
198 struct nullb_cmd
*cmd
;
200 cq
= &per_cpu(completion_queues
, smp_processor_id());
202 while ((entry
= llist_del_all(&cq
->list
)) != NULL
) {
203 entry
= llist_reverse_order(entry
);
205 cmd
= container_of(entry
, struct nullb_cmd
, ll_list
);
211 return HRTIMER_NORESTART
;
214 static void null_cmd_end_timer(struct nullb_cmd
*cmd
)
216 struct completion_queue
*cq
= &per_cpu(completion_queues
, get_cpu());
218 cmd
->ll_list
.next
= NULL
;
219 if (llist_add(&cmd
->ll_list
, &cq
->list
)) {
220 ktime_t kt
= ktime_set(0, completion_nsec
);
222 hrtimer_start(&cq
->timer
, kt
, HRTIMER_MODE_REL
);
228 static void null_softirq_done_fn(struct request
*rq
)
230 end_cmd(blk_mq_rq_to_pdu(rq
));
233 static inline void null_handle_cmd(struct nullb_cmd
*cmd
)
235 /* Complete IO by inline, softirq or timer */
237 case NULL_IRQ_SOFTIRQ
:
238 switch (queue_mode
) {
240 blk_mq_complete_request(cmd
->rq
);
243 blk_complete_request(cmd
->rq
);
247 * XXX: no proper submitting cpu information available.
257 null_cmd_end_timer(cmd
);
262 static struct nullb_queue
*nullb_to_queue(struct nullb
*nullb
)
266 if (nullb
->nr_queues
!= 1)
267 index
= raw_smp_processor_id() / ((nr_cpu_ids
+ nullb
->nr_queues
- 1) / nullb
->nr_queues
);
269 return &nullb
->queues
[index
];
272 static void null_queue_bio(struct request_queue
*q
, struct bio
*bio
)
274 struct nullb
*nullb
= q
->queuedata
;
275 struct nullb_queue
*nq
= nullb_to_queue(nullb
);
276 struct nullb_cmd
*cmd
;
278 cmd
= alloc_cmd(nq
, 1);
281 null_handle_cmd(cmd
);
284 static int null_rq_prep_fn(struct request_queue
*q
, struct request
*req
)
286 struct nullb
*nullb
= q
->queuedata
;
287 struct nullb_queue
*nq
= nullb_to_queue(nullb
);
288 struct nullb_cmd
*cmd
;
290 cmd
= alloc_cmd(nq
, 0);
297 return BLKPREP_DEFER
;
300 static void null_request_fn(struct request_queue
*q
)
304 while ((rq
= blk_fetch_request(q
)) != NULL
) {
305 struct nullb_cmd
*cmd
= rq
->special
;
307 spin_unlock_irq(q
->queue_lock
);
308 null_handle_cmd(cmd
);
309 spin_lock_irq(q
->queue_lock
);
313 static int null_queue_rq(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
)
315 struct nullb_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
318 cmd
->nq
= hctx
->driver_data
;
320 null_handle_cmd(cmd
);
321 return BLK_MQ_RQ_QUEUE_OK
;
324 static struct blk_mq_hw_ctx
*null_alloc_hctx(struct blk_mq_tag_set
*set
,
325 unsigned int hctx_index
)
327 int b_size
= DIV_ROUND_UP(set
->nr_hw_queues
, nr_online_nodes
);
328 int tip
= (set
->nr_hw_queues
% nr_online_nodes
);
332 * Split submit queues evenly wrt to the number of nodes. If uneven,
333 * fill the first buckets with one extra, until the rest is filled with
336 for (i
= 0, n
= 1; i
< hctx_index
; i
++, n
++) {
337 if (n
% b_size
== 0) {
343 b_size
= set
->nr_hw_queues
/ nr_online_nodes
;
348 * A node might not be online, therefore map the relative node id to the
351 for_each_online_node(n
) {
357 return kzalloc_node(sizeof(struct blk_mq_hw_ctx
), GFP_KERNEL
, n
);
360 static void null_free_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int hctx_index
)
365 static void null_init_queue(struct nullb
*nullb
, struct nullb_queue
*nq
)
370 init_waitqueue_head(&nq
->wait
);
371 nq
->queue_depth
= nullb
->queue_depth
;
374 static int null_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
377 struct nullb
*nullb
= data
;
378 struct nullb_queue
*nq
= &nullb
->queues
[index
];
380 hctx
->driver_data
= nq
;
381 null_init_queue(nullb
, nq
);
387 static struct blk_mq_ops null_mq_ops
= {
388 .queue_rq
= null_queue_rq
,
389 .map_queue
= blk_mq_map_queue
,
390 .init_hctx
= null_init_hctx
,
391 .complete
= null_softirq_done_fn
,
392 .alloc_hctx
= blk_mq_alloc_single_hw_queue
,
393 .free_hctx
= blk_mq_free_single_hw_queue
,
396 static struct blk_mq_ops null_mq_ops_pernode
= {
397 .queue_rq
= null_queue_rq
,
398 .map_queue
= blk_mq_map_queue
,
399 .init_hctx
= null_init_hctx
,
400 .complete
= null_softirq_done_fn
,
401 .alloc_hctx
= null_alloc_hctx
,
402 .free_hctx
= null_free_hctx
,
405 static void null_del_dev(struct nullb
*nullb
)
407 list_del_init(&nullb
->list
);
409 del_gendisk(nullb
->disk
);
410 blk_cleanup_queue(nullb
->q
);
411 if (queue_mode
== NULL_Q_MQ
)
412 blk_mq_free_tag_set(&nullb
->tag_set
);
413 put_disk(nullb
->disk
);
417 static int null_open(struct block_device
*bdev
, fmode_t mode
)
422 static void null_release(struct gendisk
*disk
, fmode_t mode
)
426 static const struct block_device_operations null_fops
= {
427 .owner
= THIS_MODULE
,
429 .release
= null_release
,
432 static int setup_commands(struct nullb_queue
*nq
)
434 struct nullb_cmd
*cmd
;
437 nq
->cmds
= kzalloc(nq
->queue_depth
* sizeof(*cmd
), GFP_KERNEL
);
441 tag_size
= ALIGN(nq
->queue_depth
, BITS_PER_LONG
) / BITS_PER_LONG
;
442 nq
->tag_map
= kzalloc(tag_size
* sizeof(unsigned long), GFP_KERNEL
);
448 for (i
= 0; i
< nq
->queue_depth
; i
++) {
450 INIT_LIST_HEAD(&cmd
->list
);
451 cmd
->ll_list
.next
= NULL
;
458 static void cleanup_queue(struct nullb_queue
*nq
)
464 static void cleanup_queues(struct nullb
*nullb
)
468 for (i
= 0; i
< nullb
->nr_queues
; i
++)
469 cleanup_queue(&nullb
->queues
[i
]);
471 kfree(nullb
->queues
);
474 static int setup_queues(struct nullb
*nullb
)
476 nullb
->queues
= kzalloc(submit_queues
* sizeof(struct nullb_queue
),
481 nullb
->nr_queues
= 0;
482 nullb
->queue_depth
= hw_queue_depth
;
487 static int init_driver_queues(struct nullb
*nullb
)
489 struct nullb_queue
*nq
;
492 for (i
= 0; i
< submit_queues
; i
++) {
493 nq
= &nullb
->queues
[i
];
495 null_init_queue(nullb
, nq
);
497 ret
= setup_commands(nq
);
505 cleanup_queues(nullb
);
509 static int null_add_dev(void)
511 struct gendisk
*disk
;
515 nullb
= kzalloc_node(sizeof(*nullb
), GFP_KERNEL
, home_node
);
519 spin_lock_init(&nullb
->lock
);
521 if (queue_mode
== NULL_Q_MQ
&& use_per_node_hctx
)
522 submit_queues
= nr_online_nodes
;
524 if (setup_queues(nullb
))
527 if (queue_mode
== NULL_Q_MQ
) {
528 if (use_per_node_hctx
)
529 nullb
->tag_set
.ops
= &null_mq_ops_pernode
;
531 nullb
->tag_set
.ops
= &null_mq_ops
;
532 nullb
->tag_set
.nr_hw_queues
= submit_queues
;
533 nullb
->tag_set
.queue_depth
= hw_queue_depth
;
534 nullb
->tag_set
.numa_node
= home_node
;
535 nullb
->tag_set
.cmd_size
= sizeof(struct nullb_cmd
);
536 nullb
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
537 nullb
->tag_set
.driver_data
= nullb
;
539 if (blk_mq_alloc_tag_set(&nullb
->tag_set
))
540 goto out_cleanup_queues
;
542 nullb
->q
= blk_mq_init_queue(&nullb
->tag_set
);
544 goto out_cleanup_tags
;
545 } else if (queue_mode
== NULL_Q_BIO
) {
546 nullb
->q
= blk_alloc_queue_node(GFP_KERNEL
, home_node
);
548 goto out_cleanup_queues
;
549 blk_queue_make_request(nullb
->q
, null_queue_bio
);
550 init_driver_queues(nullb
);
552 nullb
->q
= blk_init_queue_node(null_request_fn
, &nullb
->lock
, home_node
);
554 goto out_cleanup_queues
;
555 blk_queue_prep_rq(nullb
->q
, null_rq_prep_fn
);
556 blk_queue_softirq_done(nullb
->q
, null_softirq_done_fn
);
557 init_driver_queues(nullb
);
560 nullb
->q
->queuedata
= nullb
;
561 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, nullb
->q
);
563 disk
= nullb
->disk
= alloc_disk_node(1, home_node
);
565 goto out_cleanup_blk_queue
;
568 list_add_tail(&nullb
->list
, &nullb_list
);
569 nullb
->index
= nullb_indexes
++;
572 blk_queue_logical_block_size(nullb
->q
, bs
);
573 blk_queue_physical_block_size(nullb
->q
, bs
);
575 size
= gb
* 1024 * 1024 * 1024ULL;
576 sector_div(size
, bs
);
577 set_capacity(disk
, size
);
579 disk
->flags
|= GENHD_FL_EXT_DEVT
;
580 disk
->major
= null_major
;
581 disk
->first_minor
= nullb
->index
;
582 disk
->fops
= &null_fops
;
583 disk
->private_data
= nullb
;
584 disk
->queue
= nullb
->q
;
585 sprintf(disk
->disk_name
, "nullb%d", nullb
->index
);
589 out_cleanup_blk_queue
:
590 blk_cleanup_queue(nullb
->q
);
592 if (queue_mode
== NULL_Q_MQ
)
593 blk_mq_free_tag_set(&nullb
->tag_set
);
595 cleanup_queues(nullb
);
602 static int __init
null_init(void)
606 if (bs
> PAGE_SIZE
) {
607 pr_warn("null_blk: invalid block size\n");
608 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE
);
612 if (queue_mode
== NULL_Q_MQ
&& use_per_node_hctx
) {
613 if (submit_queues
< nr_online_nodes
) {
614 pr_warn("null_blk: submit_queues param is set to %u.",
616 submit_queues
= nr_online_nodes
;
618 } else if (submit_queues
> nr_cpu_ids
)
619 submit_queues
= nr_cpu_ids
;
620 else if (!submit_queues
)
625 /* Initialize a separate list for each CPU for issuing softirqs */
626 for_each_possible_cpu(i
) {
627 struct completion_queue
*cq
= &per_cpu(completion_queues
, i
);
629 init_llist_head(&cq
->list
);
631 if (irqmode
!= NULL_IRQ_TIMER
)
634 hrtimer_init(&cq
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
635 cq
->timer
.function
= null_cmd_timer_expired
;
638 null_major
= register_blkdev(0, "nullb");
642 for (i
= 0; i
< nr_devices
; i
++) {
643 if (null_add_dev()) {
644 unregister_blkdev(null_major
, "nullb");
649 pr_info("null: module loaded\n");
653 static void __exit
null_exit(void)
657 unregister_blkdev(null_major
, "nullb");
660 while (!list_empty(&nullb_list
)) {
661 nullb
= list_entry(nullb_list
.next
, struct nullb
, list
);
667 module_init(null_init
);
668 module_exit(null_exit
);
670 MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
671 MODULE_LICENSE("GPL");