2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
5 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/sched.h>
10 #include <linux/blkdev.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/blk-mq.h>
14 #include <linux/hrtimer.h>
15 #include <linux/lightnvm.h>
16 #include <linux/configfs.h>
17 #include <linux/badblocks.h>
19 #define SECTOR_SHIFT 9
20 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
21 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
22 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
23 #define SECTOR_MASK (PAGE_SECTORS - 1)
27 #define TICKS_PER_SEC 50ULL
28 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
30 static inline u64
mb_per_tick(int mbps
)
32 return (1 << 20) / TICKS_PER_SEC
* ((u64
) mbps
);
36 struct list_head list
;
37 struct llist_node ll_list
;
38 struct __call_single_data csd
;
43 struct nullb_queue
*nq
;
48 unsigned long *tag_map
;
49 wait_queue_head_t wait
;
50 unsigned int queue_depth
;
51 struct nullb_device
*dev
;
53 struct nullb_cmd
*cmds
;
57 * Status flags for nullb_device.
59 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
60 * UP: Device is currently on and visible in userspace.
61 * THROTTLED: Device is being throttled.
62 * CACHE: Device is using a write-back cache.
64 enum nullb_device_flags
{
65 NULLB_DEV_FL_CONFIGURED
= 0,
67 NULLB_DEV_FL_THROTTLED
= 2,
68 NULLB_DEV_FL_CACHE
= 3,
72 * nullb_page is a page in memory for nullb devices.
74 * @page: The page holding the data.
75 * @bitmap: The bitmap represents which sector in the page has data.
76 * Each bit represents one block size. For example, sector 8
77 * will use the 7th bit
78 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
79 * page is being flushing to storage. FREE means the cache page is freed and
80 * should be skipped from flushing to storage. Please see
81 * null_make_cache_space
87 #define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
88 #define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
92 struct config_item item
;
93 struct radix_tree_root data
; /* data stored in the disk */
94 struct radix_tree_root cache
; /* disk cache data */
95 unsigned long flags
; /* device flags */
96 unsigned int curr_cache
;
97 struct badblocks badblocks
;
99 unsigned long size
; /* device size in MB */
100 unsigned long completion_nsec
; /* time in ns to complete a request */
101 unsigned long cache_size
; /* disk cache size in MB */
102 unsigned int submit_queues
; /* number of submission queues */
103 unsigned int home_node
; /* home node for the device */
104 unsigned int queue_mode
; /* block interface */
105 unsigned int blocksize
; /* block size */
106 unsigned int irqmode
; /* IRQ completion handler */
107 unsigned int hw_queue_depth
; /* queue depth */
108 unsigned int index
; /* index of the disk, only valid with a disk */
109 unsigned int mbps
; /* Bandwidth throttle cap (in MB/s) */
110 bool use_lightnvm
; /* register as a LightNVM device */
111 bool blocking
; /* blocking blk-mq device */
112 bool use_per_node_hctx
; /* use per-node allocation for hardware context */
113 bool power
; /* power on/off the device */
114 bool memory_backed
; /* if data is stored in memory */
115 bool discard
; /* if support discard */
119 struct nullb_device
*dev
;
120 struct list_head list
;
122 struct request_queue
*q
;
123 struct gendisk
*disk
;
124 struct nvm_dev
*ndev
;
125 struct blk_mq_tag_set
*tag_set
;
126 struct blk_mq_tag_set __tag_set
;
127 unsigned int queue_depth
;
128 atomic_long_t cur_bytes
;
129 struct hrtimer bw_timer
;
130 unsigned long cache_flush_pos
;
133 struct nullb_queue
*queues
;
134 unsigned int nr_queues
;
135 char disk_name
[DISK_NAME_LEN
];
138 static LIST_HEAD(nullb_list
);
139 static struct mutex lock
;
140 static int null_major
;
141 static DEFINE_IDA(nullb_indexes
);
142 static struct kmem_cache
*ppa_cache
;
143 static struct blk_mq_tag_set tag_set
;
147 NULL_IRQ_SOFTIRQ
= 1,
157 static int g_no_sched
;
158 module_param_named(no_sched
, g_no_sched
, int, S_IRUGO
);
159 MODULE_PARM_DESC(no_sched
, "No io scheduler");
161 static int g_submit_queues
= 1;
162 module_param_named(submit_queues
, g_submit_queues
, int, S_IRUGO
);
163 MODULE_PARM_DESC(submit_queues
, "Number of submission queues");
165 static int g_home_node
= NUMA_NO_NODE
;
166 module_param_named(home_node
, g_home_node
, int, S_IRUGO
);
167 MODULE_PARM_DESC(home_node
, "Home node for the device");
169 static int g_queue_mode
= NULL_Q_MQ
;
171 static int null_param_store_val(const char *str
, int *val
, int min
, int max
)
175 ret
= kstrtoint(str
, 10, &new_val
);
179 if (new_val
< min
|| new_val
> max
)
186 static int null_set_queue_mode(const char *str
, const struct kernel_param
*kp
)
188 return null_param_store_val(str
, &g_queue_mode
, NULL_Q_BIO
, NULL_Q_MQ
);
191 static const struct kernel_param_ops null_queue_mode_param_ops
= {
192 .set
= null_set_queue_mode
,
193 .get
= param_get_int
,
196 device_param_cb(queue_mode
, &null_queue_mode_param_ops
, &g_queue_mode
, S_IRUGO
);
197 MODULE_PARM_DESC(queue_mode
, "Block interface to use (0=bio,1=rq,2=multiqueue)");
199 static int g_gb
= 250;
200 module_param_named(gb
, g_gb
, int, S_IRUGO
);
201 MODULE_PARM_DESC(gb
, "Size in GB");
203 static int g_bs
= 512;
204 module_param_named(bs
, g_bs
, int, S_IRUGO
);
205 MODULE_PARM_DESC(bs
, "Block size (in bytes)");
207 static int nr_devices
= 1;
208 module_param(nr_devices
, int, S_IRUGO
);
209 MODULE_PARM_DESC(nr_devices
, "Number of devices to register");
211 static bool g_use_lightnvm
;
212 module_param_named(use_lightnvm
, g_use_lightnvm
, bool, S_IRUGO
);
213 MODULE_PARM_DESC(use_lightnvm
, "Register as a LightNVM device");
215 static bool g_blocking
;
216 module_param_named(blocking
, g_blocking
, bool, S_IRUGO
);
217 MODULE_PARM_DESC(blocking
, "Register as a blocking blk-mq driver device");
219 static bool shared_tags
;
220 module_param(shared_tags
, bool, S_IRUGO
);
221 MODULE_PARM_DESC(shared_tags
, "Share tag set between devices for blk-mq");
223 static int g_irqmode
= NULL_IRQ_SOFTIRQ
;
225 static int null_set_irqmode(const char *str
, const struct kernel_param
*kp
)
227 return null_param_store_val(str
, &g_irqmode
, NULL_IRQ_NONE
,
231 static const struct kernel_param_ops null_irqmode_param_ops
= {
232 .set
= null_set_irqmode
,
233 .get
= param_get_int
,
236 device_param_cb(irqmode
, &null_irqmode_param_ops
, &g_irqmode
, S_IRUGO
);
237 MODULE_PARM_DESC(irqmode
, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
239 static unsigned long g_completion_nsec
= 10000;
240 module_param_named(completion_nsec
, g_completion_nsec
, ulong
, S_IRUGO
);
241 MODULE_PARM_DESC(completion_nsec
, "Time in ns to complete a request in hardware. Default: 10,000ns");
243 static int g_hw_queue_depth
= 64;
244 module_param_named(hw_queue_depth
, g_hw_queue_depth
, int, S_IRUGO
);
245 MODULE_PARM_DESC(hw_queue_depth
, "Queue depth for each hardware queue. Default: 64");
247 static bool g_use_per_node_hctx
;
248 module_param_named(use_per_node_hctx
, g_use_per_node_hctx
, bool, S_IRUGO
);
249 MODULE_PARM_DESC(use_per_node_hctx
, "Use per-node allocation for hardware context queues. Default: false");
251 static struct nullb_device
*null_alloc_dev(void);
252 static void null_free_dev(struct nullb_device
*dev
);
253 static void null_del_dev(struct nullb
*nullb
);
254 static int null_add_dev(struct nullb_device
*dev
);
255 static void null_free_device_storage(struct nullb_device
*dev
, bool is_cache
);
257 static inline struct nullb_device
*to_nullb_device(struct config_item
*item
)
259 return item
? container_of(item
, struct nullb_device
, item
) : NULL
;
262 static inline ssize_t
nullb_device_uint_attr_show(unsigned int val
, char *page
)
264 return snprintf(page
, PAGE_SIZE
, "%u\n", val
);
267 static inline ssize_t
nullb_device_ulong_attr_show(unsigned long val
,
270 return snprintf(page
, PAGE_SIZE
, "%lu\n", val
);
273 static inline ssize_t
nullb_device_bool_attr_show(bool val
, char *page
)
275 return snprintf(page
, PAGE_SIZE
, "%u\n", val
);
278 static ssize_t
nullb_device_uint_attr_store(unsigned int *val
,
279 const char *page
, size_t count
)
284 result
= kstrtouint(page
, 0, &tmp
);
292 static ssize_t
nullb_device_ulong_attr_store(unsigned long *val
,
293 const char *page
, size_t count
)
298 result
= kstrtoul(page
, 0, &tmp
);
306 static ssize_t
nullb_device_bool_attr_store(bool *val
, const char *page
,
312 result
= kstrtobool(page
, &tmp
);
320 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
321 #define NULLB_DEVICE_ATTR(NAME, TYPE) \
323 nullb_device_##NAME##_show(struct config_item *item, char *page) \
325 return nullb_device_##TYPE##_attr_show( \
326 to_nullb_device(item)->NAME, page); \
329 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
332 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
334 return nullb_device_##TYPE##_attr_store( \
335 &to_nullb_device(item)->NAME, page, count); \
337 CONFIGFS_ATTR(nullb_device_, NAME);
339 NULLB_DEVICE_ATTR(size
, ulong
);
340 NULLB_DEVICE_ATTR(completion_nsec
, ulong
);
341 NULLB_DEVICE_ATTR(submit_queues
, uint
);
342 NULLB_DEVICE_ATTR(home_node
, uint
);
343 NULLB_DEVICE_ATTR(queue_mode
, uint
);
344 NULLB_DEVICE_ATTR(blocksize
, uint
);
345 NULLB_DEVICE_ATTR(irqmode
, uint
);
346 NULLB_DEVICE_ATTR(hw_queue_depth
, uint
);
347 NULLB_DEVICE_ATTR(index
, uint
);
348 NULLB_DEVICE_ATTR(use_lightnvm
, bool);
349 NULLB_DEVICE_ATTR(blocking
, bool);
350 NULLB_DEVICE_ATTR(use_per_node_hctx
, bool);
351 NULLB_DEVICE_ATTR(memory_backed
, bool);
352 NULLB_DEVICE_ATTR(discard
, bool);
353 NULLB_DEVICE_ATTR(mbps
, uint
);
354 NULLB_DEVICE_ATTR(cache_size
, ulong
);
356 static ssize_t
nullb_device_power_show(struct config_item
*item
, char *page
)
358 return nullb_device_bool_attr_show(to_nullb_device(item
)->power
, page
);
361 static ssize_t
nullb_device_power_store(struct config_item
*item
,
362 const char *page
, size_t count
)
364 struct nullb_device
*dev
= to_nullb_device(item
);
368 ret
= nullb_device_bool_attr_store(&newp
, page
, count
);
372 if (!dev
->power
&& newp
) {
373 if (test_and_set_bit(NULLB_DEV_FL_UP
, &dev
->flags
))
375 if (null_add_dev(dev
)) {
376 clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
);
380 set_bit(NULLB_DEV_FL_CONFIGURED
, &dev
->flags
);
382 } else if (dev
->power
&& !newp
) {
385 null_del_dev(dev
->nullb
);
387 clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
);
393 CONFIGFS_ATTR(nullb_device_
, power
);
395 static ssize_t
nullb_device_badblocks_show(struct config_item
*item
, char *page
)
397 struct nullb_device
*t_dev
= to_nullb_device(item
);
399 return badblocks_show(&t_dev
->badblocks
, page
, 0);
402 static ssize_t
nullb_device_badblocks_store(struct config_item
*item
,
403 const char *page
, size_t count
)
405 struct nullb_device
*t_dev
= to_nullb_device(item
);
406 char *orig
, *buf
, *tmp
;
410 orig
= kstrndup(page
, count
, GFP_KERNEL
);
414 buf
= strstrip(orig
);
417 if (buf
[0] != '+' && buf
[0] != '-')
419 tmp
= strchr(&buf
[1], '-');
423 ret
= kstrtoull(buf
+ 1, 0, &start
);
426 ret
= kstrtoull(tmp
+ 1, 0, &end
);
432 /* enable badblocks */
433 cmpxchg(&t_dev
->badblocks
.shift
, -1, 0);
435 ret
= badblocks_set(&t_dev
->badblocks
, start
,
438 ret
= badblocks_clear(&t_dev
->badblocks
, start
,
446 CONFIGFS_ATTR(nullb_device_
, badblocks
);
448 static struct configfs_attribute
*nullb_device_attrs
[] = {
449 &nullb_device_attr_size
,
450 &nullb_device_attr_completion_nsec
,
451 &nullb_device_attr_submit_queues
,
452 &nullb_device_attr_home_node
,
453 &nullb_device_attr_queue_mode
,
454 &nullb_device_attr_blocksize
,
455 &nullb_device_attr_irqmode
,
456 &nullb_device_attr_hw_queue_depth
,
457 &nullb_device_attr_index
,
458 &nullb_device_attr_use_lightnvm
,
459 &nullb_device_attr_blocking
,
460 &nullb_device_attr_use_per_node_hctx
,
461 &nullb_device_attr_power
,
462 &nullb_device_attr_memory_backed
,
463 &nullb_device_attr_discard
,
464 &nullb_device_attr_mbps
,
465 &nullb_device_attr_cache_size
,
466 &nullb_device_attr_badblocks
,
470 static void nullb_device_release(struct config_item
*item
)
472 struct nullb_device
*dev
= to_nullb_device(item
);
474 null_free_device_storage(dev
, false);
478 static struct configfs_item_operations nullb_device_ops
= {
479 .release
= nullb_device_release
,
482 static const struct config_item_type nullb_device_type
= {
483 .ct_item_ops
= &nullb_device_ops
,
484 .ct_attrs
= nullb_device_attrs
,
485 .ct_owner
= THIS_MODULE
,
489 config_item
*nullb_group_make_item(struct config_group
*group
, const char *name
)
491 struct nullb_device
*dev
;
493 dev
= null_alloc_dev();
495 return ERR_PTR(-ENOMEM
);
497 config_item_init_type_name(&dev
->item
, name
, &nullb_device_type
);
503 nullb_group_drop_item(struct config_group
*group
, struct config_item
*item
)
505 struct nullb_device
*dev
= to_nullb_device(item
);
507 if (test_and_clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
)) {
510 null_del_dev(dev
->nullb
);
514 config_item_put(item
);
517 static ssize_t
memb_group_features_show(struct config_item
*item
, char *page
)
519 return snprintf(page
, PAGE_SIZE
, "memory_backed,discard,bandwidth,cache,badblocks\n");
522 CONFIGFS_ATTR_RO(memb_group_
, features
);
524 static struct configfs_attribute
*nullb_group_attrs
[] = {
525 &memb_group_attr_features
,
529 static struct configfs_group_operations nullb_group_ops
= {
530 .make_item
= nullb_group_make_item
,
531 .drop_item
= nullb_group_drop_item
,
534 static const struct config_item_type nullb_group_type
= {
535 .ct_group_ops
= &nullb_group_ops
,
536 .ct_attrs
= nullb_group_attrs
,
537 .ct_owner
= THIS_MODULE
,
540 static struct configfs_subsystem nullb_subsys
= {
543 .ci_namebuf
= "nullb",
544 .ci_type
= &nullb_group_type
,
549 static inline int null_cache_active(struct nullb
*nullb
)
551 return test_bit(NULLB_DEV_FL_CACHE
, &nullb
->dev
->flags
);
554 static struct nullb_device
*null_alloc_dev(void)
556 struct nullb_device
*dev
;
558 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
561 INIT_RADIX_TREE(&dev
->data
, GFP_ATOMIC
);
562 INIT_RADIX_TREE(&dev
->cache
, GFP_ATOMIC
);
563 if (badblocks_init(&dev
->badblocks
, 0)) {
568 dev
->size
= g_gb
* 1024;
569 dev
->completion_nsec
= g_completion_nsec
;
570 dev
->submit_queues
= g_submit_queues
;
571 dev
->home_node
= g_home_node
;
572 dev
->queue_mode
= g_queue_mode
;
573 dev
->blocksize
= g_bs
;
574 dev
->irqmode
= g_irqmode
;
575 dev
->hw_queue_depth
= g_hw_queue_depth
;
576 dev
->use_lightnvm
= g_use_lightnvm
;
577 dev
->blocking
= g_blocking
;
578 dev
->use_per_node_hctx
= g_use_per_node_hctx
;
582 static void null_free_dev(struct nullb_device
*dev
)
587 badblocks_exit(&dev
->badblocks
);
591 static void put_tag(struct nullb_queue
*nq
, unsigned int tag
)
593 clear_bit_unlock(tag
, nq
->tag_map
);
595 if (waitqueue_active(&nq
->wait
))
599 static unsigned int get_tag(struct nullb_queue
*nq
)
604 tag
= find_first_zero_bit(nq
->tag_map
, nq
->queue_depth
);
605 if (tag
>= nq
->queue_depth
)
607 } while (test_and_set_bit_lock(tag
, nq
->tag_map
));
612 static void free_cmd(struct nullb_cmd
*cmd
)
614 put_tag(cmd
->nq
, cmd
->tag
);
617 static enum hrtimer_restart
null_cmd_timer_expired(struct hrtimer
*timer
);
619 static struct nullb_cmd
*__alloc_cmd(struct nullb_queue
*nq
)
621 struct nullb_cmd
*cmd
;
626 cmd
= &nq
->cmds
[tag
];
629 if (nq
->dev
->irqmode
== NULL_IRQ_TIMER
) {
630 hrtimer_init(&cmd
->timer
, CLOCK_MONOTONIC
,
632 cmd
->timer
.function
= null_cmd_timer_expired
;
640 static struct nullb_cmd
*alloc_cmd(struct nullb_queue
*nq
, int can_wait
)
642 struct nullb_cmd
*cmd
;
645 cmd
= __alloc_cmd(nq
);
646 if (cmd
|| !can_wait
)
650 prepare_to_wait(&nq
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
651 cmd
= __alloc_cmd(nq
);
658 finish_wait(&nq
->wait
, &wait
);
662 static void end_cmd(struct nullb_cmd
*cmd
)
664 struct request_queue
*q
= NULL
;
665 int queue_mode
= cmd
->nq
->dev
->queue_mode
;
670 switch (queue_mode
) {
672 blk_mq_end_request(cmd
->rq
, cmd
->error
);
675 INIT_LIST_HEAD(&cmd
->rq
->queuelist
);
676 blk_end_request_all(cmd
->rq
, cmd
->error
);
679 cmd
->bio
->bi_status
= cmd
->error
;
686 /* Restart queue if needed, as we are freeing a tag */
687 if (queue_mode
== NULL_Q_RQ
&& blk_queue_stopped(q
)) {
690 spin_lock_irqsave(q
->queue_lock
, flags
);
691 blk_start_queue_async(q
);
692 spin_unlock_irqrestore(q
->queue_lock
, flags
);
696 static enum hrtimer_restart
null_cmd_timer_expired(struct hrtimer
*timer
)
698 end_cmd(container_of(timer
, struct nullb_cmd
, timer
));
700 return HRTIMER_NORESTART
;
703 static void null_cmd_end_timer(struct nullb_cmd
*cmd
)
705 ktime_t kt
= cmd
->nq
->dev
->completion_nsec
;
707 hrtimer_start(&cmd
->timer
, kt
, HRTIMER_MODE_REL
);
710 static void null_softirq_done_fn(struct request
*rq
)
712 struct nullb
*nullb
= rq
->q
->queuedata
;
714 if (nullb
->dev
->queue_mode
== NULL_Q_MQ
)
715 end_cmd(blk_mq_rq_to_pdu(rq
));
717 end_cmd(rq
->special
);
720 static struct nullb_page
*null_alloc_page(gfp_t gfp_flags
)
722 struct nullb_page
*t_page
;
724 t_page
= kmalloc(sizeof(struct nullb_page
), gfp_flags
);
728 t_page
->page
= alloc_pages(gfp_flags
, 0);
740 static void null_free_page(struct nullb_page
*t_page
)
742 __set_bit(NULLB_PAGE_FREE
, &t_page
->bitmap
);
743 if (test_bit(NULLB_PAGE_LOCK
, &t_page
->bitmap
))
745 __free_page(t_page
->page
);
749 static void null_free_sector(struct nullb
*nullb
, sector_t sector
,
752 unsigned int sector_bit
;
754 struct nullb_page
*t_page
, *ret
;
755 struct radix_tree_root
*root
;
757 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
758 idx
= sector
>> PAGE_SECTORS_SHIFT
;
759 sector_bit
= (sector
& SECTOR_MASK
);
761 t_page
= radix_tree_lookup(root
, idx
);
763 __clear_bit(sector_bit
, &t_page
->bitmap
);
765 if (!t_page
->bitmap
) {
766 ret
= radix_tree_delete_item(root
, idx
, t_page
);
767 WARN_ON(ret
!= t_page
);
770 nullb
->dev
->curr_cache
-= PAGE_SIZE
;
775 static struct nullb_page
*null_radix_tree_insert(struct nullb
*nullb
, u64 idx
,
776 struct nullb_page
*t_page
, bool is_cache
)
778 struct radix_tree_root
*root
;
780 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
782 if (radix_tree_insert(root
, idx
, t_page
)) {
783 null_free_page(t_page
);
784 t_page
= radix_tree_lookup(root
, idx
);
785 WARN_ON(!t_page
|| t_page
->page
->index
!= idx
);
787 nullb
->dev
->curr_cache
+= PAGE_SIZE
;
792 static void null_free_device_storage(struct nullb_device
*dev
, bool is_cache
)
794 unsigned long pos
= 0;
796 struct nullb_page
*ret
, *t_pages
[FREE_BATCH
];
797 struct radix_tree_root
*root
;
799 root
= is_cache
? &dev
->cache
: &dev
->data
;
804 nr_pages
= radix_tree_gang_lookup(root
,
805 (void **)t_pages
, pos
, FREE_BATCH
);
807 for (i
= 0; i
< nr_pages
; i
++) {
808 pos
= t_pages
[i
]->page
->index
;
809 ret
= radix_tree_delete_item(root
, pos
, t_pages
[i
]);
810 WARN_ON(ret
!= t_pages
[i
]);
815 } while (nr_pages
== FREE_BATCH
);
821 static struct nullb_page
*__null_lookup_page(struct nullb
*nullb
,
822 sector_t sector
, bool for_write
, bool is_cache
)
824 unsigned int sector_bit
;
826 struct nullb_page
*t_page
;
827 struct radix_tree_root
*root
;
829 idx
= sector
>> PAGE_SECTORS_SHIFT
;
830 sector_bit
= (sector
& SECTOR_MASK
);
832 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
833 t_page
= radix_tree_lookup(root
, idx
);
834 WARN_ON(t_page
&& t_page
->page
->index
!= idx
);
836 if (t_page
&& (for_write
|| test_bit(sector_bit
, &t_page
->bitmap
)))
842 static struct nullb_page
*null_lookup_page(struct nullb
*nullb
,
843 sector_t sector
, bool for_write
, bool ignore_cache
)
845 struct nullb_page
*page
= NULL
;
848 page
= __null_lookup_page(nullb
, sector
, for_write
, true);
851 return __null_lookup_page(nullb
, sector
, for_write
, false);
854 static struct nullb_page
*null_insert_page(struct nullb
*nullb
,
855 sector_t sector
, bool ignore_cache
)
858 struct nullb_page
*t_page
;
860 t_page
= null_lookup_page(nullb
, sector
, true, ignore_cache
);
864 spin_unlock_irq(&nullb
->lock
);
866 t_page
= null_alloc_page(GFP_NOIO
);
870 if (radix_tree_preload(GFP_NOIO
))
873 spin_lock_irq(&nullb
->lock
);
874 idx
= sector
>> PAGE_SECTORS_SHIFT
;
875 t_page
->page
->index
= idx
;
876 t_page
= null_radix_tree_insert(nullb
, idx
, t_page
, !ignore_cache
);
877 radix_tree_preload_end();
881 null_free_page(t_page
);
883 spin_lock_irq(&nullb
->lock
);
884 return null_lookup_page(nullb
, sector
, true, ignore_cache
);
887 static int null_flush_cache_page(struct nullb
*nullb
, struct nullb_page
*c_page
)
892 struct nullb_page
*t_page
, *ret
;
895 idx
= c_page
->page
->index
;
897 t_page
= null_insert_page(nullb
, idx
<< PAGE_SECTORS_SHIFT
, true);
899 __clear_bit(NULLB_PAGE_LOCK
, &c_page
->bitmap
);
900 if (test_bit(NULLB_PAGE_FREE
, &c_page
->bitmap
)) {
901 null_free_page(c_page
);
902 if (t_page
&& t_page
->bitmap
== 0) {
903 ret
= radix_tree_delete_item(&nullb
->dev
->data
,
905 null_free_page(t_page
);
913 src
= kmap_atomic(c_page
->page
);
914 dst
= kmap_atomic(t_page
->page
);
916 for (i
= 0; i
< PAGE_SECTORS
;
917 i
+= (nullb
->dev
->blocksize
>> SECTOR_SHIFT
)) {
918 if (test_bit(i
, &c_page
->bitmap
)) {
919 offset
= (i
<< SECTOR_SHIFT
);
920 memcpy(dst
+ offset
, src
+ offset
,
921 nullb
->dev
->blocksize
);
922 __set_bit(i
, &t_page
->bitmap
);
929 ret
= radix_tree_delete_item(&nullb
->dev
->cache
, idx
, c_page
);
931 nullb
->dev
->curr_cache
-= PAGE_SIZE
;
936 static int null_make_cache_space(struct nullb
*nullb
, unsigned long n
)
938 int i
, err
, nr_pages
;
939 struct nullb_page
*c_pages
[FREE_BATCH
];
940 unsigned long flushed
= 0, one_round
;
943 if ((nullb
->dev
->cache_size
* 1024 * 1024) >
944 nullb
->dev
->curr_cache
+ n
|| nullb
->dev
->curr_cache
== 0)
947 nr_pages
= radix_tree_gang_lookup(&nullb
->dev
->cache
,
948 (void **)c_pages
, nullb
->cache_flush_pos
, FREE_BATCH
);
950 * nullb_flush_cache_page could unlock before using the c_pages. To
951 * avoid race, we don't allow page free
953 for (i
= 0; i
< nr_pages
; i
++) {
954 nullb
->cache_flush_pos
= c_pages
[i
]->page
->index
;
956 * We found the page which is being flushed to disk by other
959 if (test_bit(NULLB_PAGE_LOCK
, &c_pages
[i
]->bitmap
))
962 __set_bit(NULLB_PAGE_LOCK
, &c_pages
[i
]->bitmap
);
966 for (i
= 0; i
< nr_pages
; i
++) {
967 if (c_pages
[i
] == NULL
)
969 err
= null_flush_cache_page(nullb
, c_pages
[i
]);
974 flushed
+= one_round
<< PAGE_SHIFT
;
978 nullb
->cache_flush_pos
= 0;
979 if (one_round
== 0) {
980 /* give other threads a chance */
981 spin_unlock_irq(&nullb
->lock
);
982 spin_lock_irq(&nullb
->lock
);
989 static int copy_to_nullb(struct nullb
*nullb
, struct page
*source
,
990 unsigned int off
, sector_t sector
, size_t n
, bool is_fua
)
992 size_t temp
, count
= 0;
994 struct nullb_page
*t_page
;
998 temp
= min_t(size_t, nullb
->dev
->blocksize
, n
- count
);
1000 if (null_cache_active(nullb
) && !is_fua
)
1001 null_make_cache_space(nullb
, PAGE_SIZE
);
1003 offset
= (sector
& SECTOR_MASK
) << SECTOR_SHIFT
;
1004 t_page
= null_insert_page(nullb
, sector
,
1005 !null_cache_active(nullb
) || is_fua
);
1009 src
= kmap_atomic(source
);
1010 dst
= kmap_atomic(t_page
->page
);
1011 memcpy(dst
+ offset
, src
+ off
+ count
, temp
);
1015 __set_bit(sector
& SECTOR_MASK
, &t_page
->bitmap
);
1018 null_free_sector(nullb
, sector
, true);
1021 sector
+= temp
>> SECTOR_SHIFT
;
1026 static int copy_from_nullb(struct nullb
*nullb
, struct page
*dest
,
1027 unsigned int off
, sector_t sector
, size_t n
)
1029 size_t temp
, count
= 0;
1030 unsigned int offset
;
1031 struct nullb_page
*t_page
;
1035 temp
= min_t(size_t, nullb
->dev
->blocksize
, n
- count
);
1037 offset
= (sector
& SECTOR_MASK
) << SECTOR_SHIFT
;
1038 t_page
= null_lookup_page(nullb
, sector
, false,
1039 !null_cache_active(nullb
));
1041 dst
= kmap_atomic(dest
);
1043 memset(dst
+ off
+ count
, 0, temp
);
1046 src
= kmap_atomic(t_page
->page
);
1047 memcpy(dst
+ off
+ count
, src
+ offset
, temp
);
1053 sector
+= temp
>> SECTOR_SHIFT
;
1058 static void null_handle_discard(struct nullb
*nullb
, sector_t sector
, size_t n
)
1062 spin_lock_irq(&nullb
->lock
);
1064 temp
= min_t(size_t, n
, nullb
->dev
->blocksize
);
1065 null_free_sector(nullb
, sector
, false);
1066 if (null_cache_active(nullb
))
1067 null_free_sector(nullb
, sector
, true);
1068 sector
+= temp
>> SECTOR_SHIFT
;
1071 spin_unlock_irq(&nullb
->lock
);
1074 static int null_handle_flush(struct nullb
*nullb
)
1078 if (!null_cache_active(nullb
))
1081 spin_lock_irq(&nullb
->lock
);
1083 err
= null_make_cache_space(nullb
,
1084 nullb
->dev
->cache_size
* 1024 * 1024);
1085 if (err
|| nullb
->dev
->curr_cache
== 0)
1089 WARN_ON(!radix_tree_empty(&nullb
->dev
->cache
));
1090 spin_unlock_irq(&nullb
->lock
);
1094 static int null_transfer(struct nullb
*nullb
, struct page
*page
,
1095 unsigned int len
, unsigned int off
, bool is_write
, sector_t sector
,
1101 err
= copy_from_nullb(nullb
, page
, off
, sector
, len
);
1102 flush_dcache_page(page
);
1104 flush_dcache_page(page
);
1105 err
= copy_to_nullb(nullb
, page
, off
, sector
, len
, is_fua
);
1111 static int null_handle_rq(struct nullb_cmd
*cmd
)
1113 struct request
*rq
= cmd
->rq
;
1114 struct nullb
*nullb
= cmd
->nq
->dev
->nullb
;
1118 struct req_iterator iter
;
1119 struct bio_vec bvec
;
1121 sector
= blk_rq_pos(rq
);
1123 if (req_op(rq
) == REQ_OP_DISCARD
) {
1124 null_handle_discard(nullb
, sector
, blk_rq_bytes(rq
));
1128 spin_lock_irq(&nullb
->lock
);
1129 rq_for_each_segment(bvec
, rq
, iter
) {
1131 err
= null_transfer(nullb
, bvec
.bv_page
, len
, bvec
.bv_offset
,
1132 op_is_write(req_op(rq
)), sector
,
1133 req_op(rq
) & REQ_FUA
);
1135 spin_unlock_irq(&nullb
->lock
);
1138 sector
+= len
>> SECTOR_SHIFT
;
1140 spin_unlock_irq(&nullb
->lock
);
1145 static int null_handle_bio(struct nullb_cmd
*cmd
)
1147 struct bio
*bio
= cmd
->bio
;
1148 struct nullb
*nullb
= cmd
->nq
->dev
->nullb
;
1152 struct bio_vec bvec
;
1153 struct bvec_iter iter
;
1155 sector
= bio
->bi_iter
.bi_sector
;
1157 if (bio_op(bio
) == REQ_OP_DISCARD
) {
1158 null_handle_discard(nullb
, sector
,
1159 bio_sectors(bio
) << SECTOR_SHIFT
);
1163 spin_lock_irq(&nullb
->lock
);
1164 bio_for_each_segment(bvec
, bio
, iter
) {
1166 err
= null_transfer(nullb
, bvec
.bv_page
, len
, bvec
.bv_offset
,
1167 op_is_write(bio_op(bio
)), sector
,
1168 bio_op(bio
) & REQ_FUA
);
1170 spin_unlock_irq(&nullb
->lock
);
1173 sector
+= len
>> SECTOR_SHIFT
;
1175 spin_unlock_irq(&nullb
->lock
);
1179 static void null_stop_queue(struct nullb
*nullb
)
1181 struct request_queue
*q
= nullb
->q
;
1183 if (nullb
->dev
->queue_mode
== NULL_Q_MQ
)
1184 blk_mq_stop_hw_queues(q
);
1186 spin_lock_irq(q
->queue_lock
);
1188 spin_unlock_irq(q
->queue_lock
);
1192 static void null_restart_queue_async(struct nullb
*nullb
)
1194 struct request_queue
*q
= nullb
->q
;
1195 unsigned long flags
;
1197 if (nullb
->dev
->queue_mode
== NULL_Q_MQ
)
1198 blk_mq_start_stopped_hw_queues(q
, true);
1200 spin_lock_irqsave(q
->queue_lock
, flags
);
1201 blk_start_queue_async(q
);
1202 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1206 static blk_status_t
null_handle_cmd(struct nullb_cmd
*cmd
)
1208 struct nullb_device
*dev
= cmd
->nq
->dev
;
1209 struct nullb
*nullb
= dev
->nullb
;
1212 if (test_bit(NULLB_DEV_FL_THROTTLED
, &dev
->flags
)) {
1213 struct request
*rq
= cmd
->rq
;
1215 if (!hrtimer_active(&nullb
->bw_timer
))
1216 hrtimer_restart(&nullb
->bw_timer
);
1218 if (atomic_long_sub_return(blk_rq_bytes(rq
),
1219 &nullb
->cur_bytes
) < 0) {
1220 null_stop_queue(nullb
);
1221 /* race with timer */
1222 if (atomic_long_read(&nullb
->cur_bytes
) > 0)
1223 null_restart_queue_async(nullb
);
1224 if (dev
->queue_mode
== NULL_Q_RQ
) {
1225 struct request_queue
*q
= nullb
->q
;
1227 spin_lock_irq(q
->queue_lock
);
1228 rq
->rq_flags
|= RQF_DONTPREP
;
1229 blk_requeue_request(q
, rq
);
1230 spin_unlock_irq(q
->queue_lock
);
1233 /* requeue request */
1234 return BLK_STS_RESOURCE
;
1238 if (nullb
->dev
->badblocks
.shift
!= -1) {
1240 sector_t sector
, size
, first_bad
;
1241 bool is_flush
= true;
1243 if (dev
->queue_mode
== NULL_Q_BIO
&&
1244 bio_op(cmd
->bio
) != REQ_OP_FLUSH
) {
1246 sector
= cmd
->bio
->bi_iter
.bi_sector
;
1247 size
= bio_sectors(cmd
->bio
);
1249 if (dev
->queue_mode
!= NULL_Q_BIO
&&
1250 req_op(cmd
->rq
) != REQ_OP_FLUSH
) {
1252 sector
= blk_rq_pos(cmd
->rq
);
1253 size
= blk_rq_sectors(cmd
->rq
);
1255 if (!is_flush
&& badblocks_check(&nullb
->dev
->badblocks
, sector
,
1256 size
, &first_bad
, &bad_sectors
)) {
1257 cmd
->error
= BLK_STS_IOERR
;
1262 if (dev
->memory_backed
) {
1263 if (dev
->queue_mode
== NULL_Q_BIO
) {
1264 if (bio_op(cmd
->bio
) == REQ_OP_FLUSH
)
1265 err
= null_handle_flush(nullb
);
1267 err
= null_handle_bio(cmd
);
1269 if (req_op(cmd
->rq
) == REQ_OP_FLUSH
)
1270 err
= null_handle_flush(nullb
);
1272 err
= null_handle_rq(cmd
);
1275 cmd
->error
= errno_to_blk_status(err
);
1277 /* Complete IO by inline, softirq or timer */
1278 switch (dev
->irqmode
) {
1279 case NULL_IRQ_SOFTIRQ
:
1280 switch (dev
->queue_mode
) {
1282 blk_mq_complete_request(cmd
->rq
);
1285 blk_complete_request(cmd
->rq
);
1289 * XXX: no proper submitting cpu information available.
1298 case NULL_IRQ_TIMER
:
1299 null_cmd_end_timer(cmd
);
1305 static enum hrtimer_restart
nullb_bwtimer_fn(struct hrtimer
*timer
)
1307 struct nullb
*nullb
= container_of(timer
, struct nullb
, bw_timer
);
1308 ktime_t timer_interval
= ktime_set(0, TIMER_INTERVAL
);
1309 unsigned int mbps
= nullb
->dev
->mbps
;
1311 if (atomic_long_read(&nullb
->cur_bytes
) == mb_per_tick(mbps
))
1312 return HRTIMER_NORESTART
;
1314 atomic_long_set(&nullb
->cur_bytes
, mb_per_tick(mbps
));
1315 null_restart_queue_async(nullb
);
1317 hrtimer_forward_now(&nullb
->bw_timer
, timer_interval
);
1319 return HRTIMER_RESTART
;
1322 static void nullb_setup_bwtimer(struct nullb
*nullb
)
1324 ktime_t timer_interval
= ktime_set(0, TIMER_INTERVAL
);
1326 hrtimer_init(&nullb
->bw_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1327 nullb
->bw_timer
.function
= nullb_bwtimer_fn
;
1328 atomic_long_set(&nullb
->cur_bytes
, mb_per_tick(nullb
->dev
->mbps
));
1329 hrtimer_start(&nullb
->bw_timer
, timer_interval
, HRTIMER_MODE_REL
);
1332 static struct nullb_queue
*nullb_to_queue(struct nullb
*nullb
)
1336 if (nullb
->nr_queues
!= 1)
1337 index
= raw_smp_processor_id() / ((nr_cpu_ids
+ nullb
->nr_queues
- 1) / nullb
->nr_queues
);
1339 return &nullb
->queues
[index
];
1342 static blk_qc_t
null_queue_bio(struct request_queue
*q
, struct bio
*bio
)
1344 struct nullb
*nullb
= q
->queuedata
;
1345 struct nullb_queue
*nq
= nullb_to_queue(nullb
);
1346 struct nullb_cmd
*cmd
;
1348 cmd
= alloc_cmd(nq
, 1);
1351 null_handle_cmd(cmd
);
1352 return BLK_QC_T_NONE
;
1355 static int null_rq_prep_fn(struct request_queue
*q
, struct request
*req
)
1357 struct nullb
*nullb
= q
->queuedata
;
1358 struct nullb_queue
*nq
= nullb_to_queue(nullb
);
1359 struct nullb_cmd
*cmd
;
1361 cmd
= alloc_cmd(nq
, 0);
1369 return BLKPREP_DEFER
;
1372 static void null_request_fn(struct request_queue
*q
)
1376 while ((rq
= blk_fetch_request(q
)) != NULL
) {
1377 struct nullb_cmd
*cmd
= rq
->special
;
1379 spin_unlock_irq(q
->queue_lock
);
1380 null_handle_cmd(cmd
);
1381 spin_lock_irq(q
->queue_lock
);
1385 static blk_status_t
null_queue_rq(struct blk_mq_hw_ctx
*hctx
,
1386 const struct blk_mq_queue_data
*bd
)
1388 struct nullb_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
1389 struct nullb_queue
*nq
= hctx
->driver_data
;
1391 might_sleep_if(hctx
->flags
& BLK_MQ_F_BLOCKING
);
1393 if (nq
->dev
->irqmode
== NULL_IRQ_TIMER
) {
1394 hrtimer_init(&cmd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1395 cmd
->timer
.function
= null_cmd_timer_expired
;
1400 blk_mq_start_request(bd
->rq
);
1402 return null_handle_cmd(cmd
);
1405 static const struct blk_mq_ops null_mq_ops
= {
1406 .queue_rq
= null_queue_rq
,
1407 .complete
= null_softirq_done_fn
,
1410 static void cleanup_queue(struct nullb_queue
*nq
)
1416 static void cleanup_queues(struct nullb
*nullb
)
1420 for (i
= 0; i
< nullb
->nr_queues
; i
++)
1421 cleanup_queue(&nullb
->queues
[i
]);
1423 kfree(nullb
->queues
);
1428 static void null_lnvm_end_io(struct request
*rq
, blk_status_t status
)
1430 struct nvm_rq
*rqd
= rq
->end_io_data
;
1432 /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
1433 rqd
->error
= status
? -EIO
: 0;
1436 blk_put_request(rq
);
1439 static int null_lnvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
1441 struct request_queue
*q
= dev
->q
;
1443 struct bio
*bio
= rqd
->bio
;
1445 rq
= blk_mq_alloc_request(q
,
1446 op_is_write(bio_op(bio
)) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
, 0);
1450 blk_init_request_from_bio(rq
, bio
);
1452 rq
->end_io_data
= rqd
;
1454 blk_execute_rq_nowait(q
, NULL
, rq
, 0, null_lnvm_end_io
);
1459 static int null_lnvm_id(struct nvm_dev
*dev
, struct nvm_id
*id
)
1461 struct nullb
*nullb
= dev
->q
->queuedata
;
1462 sector_t size
= (sector_t
)nullb
->dev
->size
* 1024 * 1024ULL;
1464 struct nvm_id_group
*grp
;
1471 id
->ppaf
.blk_offset
= 0;
1472 id
->ppaf
.blk_len
= 16;
1473 id
->ppaf
.pg_offset
= 16;
1474 id
->ppaf
.pg_len
= 16;
1475 id
->ppaf
.sect_offset
= 32;
1476 id
->ppaf
.sect_len
= 8;
1477 id
->ppaf
.pln_offset
= 40;
1478 id
->ppaf
.pln_len
= 8;
1479 id
->ppaf
.lun_offset
= 48;
1480 id
->ppaf
.lun_len
= 8;
1481 id
->ppaf
.ch_offset
= 56;
1482 id
->ppaf
.ch_len
= 8;
1484 sector_div(size
, nullb
->dev
->blocksize
); /* convert size to pages */
1485 size
>>= 8; /* concert size to pgs pr blk */
1493 grp
->num_lun
= size
+ 1;
1494 sector_div(blksize
, grp
->num_lun
);
1495 grp
->num_blk
= blksize
;
1498 grp
->fpg_sz
= nullb
->dev
->blocksize
;
1499 grp
->csecs
= nullb
->dev
->blocksize
;
1504 grp
->tbet
= 1500000;
1505 grp
->tbem
= 1500000;
1506 grp
->mpos
= 0x010101; /* single plane rwe */
1507 grp
->cpar
= nullb
->dev
->hw_queue_depth
;
1512 static void *null_lnvm_create_dma_pool(struct nvm_dev
*dev
, char *name
)
1514 mempool_t
*virtmem_pool
;
1516 virtmem_pool
= mempool_create_slab_pool(64, ppa_cache
);
1517 if (!virtmem_pool
) {
1518 pr_err("null_blk: Unable to create virtual memory pool\n");
1522 return virtmem_pool
;
1525 static void null_lnvm_destroy_dma_pool(void *pool
)
1527 mempool_destroy(pool
);
1530 static void *null_lnvm_dev_dma_alloc(struct nvm_dev
*dev
, void *pool
,
1531 gfp_t mem_flags
, dma_addr_t
*dma_handler
)
1533 return mempool_alloc(pool
, mem_flags
);
1536 static void null_lnvm_dev_dma_free(void *pool
, void *entry
,
1537 dma_addr_t dma_handler
)
1539 mempool_free(entry
, pool
);
1542 static struct nvm_dev_ops null_lnvm_dev_ops
= {
1543 .identity
= null_lnvm_id
,
1544 .submit_io
= null_lnvm_submit_io
,
1546 .create_dma_pool
= null_lnvm_create_dma_pool
,
1547 .destroy_dma_pool
= null_lnvm_destroy_dma_pool
,
1548 .dev_dma_alloc
= null_lnvm_dev_dma_alloc
,
1549 .dev_dma_free
= null_lnvm_dev_dma_free
,
1551 /* Simulate nvme protocol restriction */
1552 .max_phys_sect
= 64,
1555 static int null_nvm_register(struct nullb
*nullb
)
1557 struct nvm_dev
*dev
;
1560 dev
= nvm_alloc_dev(0);
1565 memcpy(dev
->name
, nullb
->disk_name
, DISK_NAME_LEN
);
1566 dev
->ops
= &null_lnvm_dev_ops
;
1568 rv
= nvm_register(dev
);
1577 static void null_nvm_unregister(struct nullb
*nullb
)
1579 nvm_unregister(nullb
->ndev
);
1582 static int null_nvm_register(struct nullb
*nullb
)
1584 pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
1587 static void null_nvm_unregister(struct nullb
*nullb
) {}
1588 #endif /* CONFIG_NVM */
1590 static void null_del_dev(struct nullb
*nullb
)
1592 struct nullb_device
*dev
= nullb
->dev
;
1594 ida_simple_remove(&nullb_indexes
, nullb
->index
);
1596 list_del_init(&nullb
->list
);
1598 if (dev
->use_lightnvm
)
1599 null_nvm_unregister(nullb
);
1601 del_gendisk(nullb
->disk
);
1603 if (test_bit(NULLB_DEV_FL_THROTTLED
, &nullb
->dev
->flags
)) {
1604 hrtimer_cancel(&nullb
->bw_timer
);
1605 atomic_long_set(&nullb
->cur_bytes
, LONG_MAX
);
1606 null_restart_queue_async(nullb
);
1609 blk_cleanup_queue(nullb
->q
);
1610 if (dev
->queue_mode
== NULL_Q_MQ
&&
1611 nullb
->tag_set
== &nullb
->__tag_set
)
1612 blk_mq_free_tag_set(nullb
->tag_set
);
1613 if (!dev
->use_lightnvm
)
1614 put_disk(nullb
->disk
);
1615 cleanup_queues(nullb
);
1616 if (null_cache_active(nullb
))
1617 null_free_device_storage(nullb
->dev
, true);
1622 static void null_config_discard(struct nullb
*nullb
)
1624 if (nullb
->dev
->discard
== false)
1626 nullb
->q
->limits
.discard_granularity
= nullb
->dev
->blocksize
;
1627 nullb
->q
->limits
.discard_alignment
= nullb
->dev
->blocksize
;
1628 blk_queue_max_discard_sectors(nullb
->q
, UINT_MAX
>> 9);
1629 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nullb
->q
);
1632 static int null_open(struct block_device
*bdev
, fmode_t mode
)
1637 static void null_release(struct gendisk
*disk
, fmode_t mode
)
1641 static const struct block_device_operations null_fops
= {
1642 .owner
= THIS_MODULE
,
1644 .release
= null_release
,
1647 static void null_init_queue(struct nullb
*nullb
, struct nullb_queue
*nq
)
1652 init_waitqueue_head(&nq
->wait
);
1653 nq
->queue_depth
= nullb
->queue_depth
;
1654 nq
->dev
= nullb
->dev
;
1657 static void null_init_queues(struct nullb
*nullb
)
1659 struct request_queue
*q
= nullb
->q
;
1660 struct blk_mq_hw_ctx
*hctx
;
1661 struct nullb_queue
*nq
;
1664 queue_for_each_hw_ctx(q
, hctx
, i
) {
1665 if (!hctx
->nr_ctx
|| !hctx
->tags
)
1667 nq
= &nullb
->queues
[i
];
1668 hctx
->driver_data
= nq
;
1669 null_init_queue(nullb
, nq
);
1674 static int setup_commands(struct nullb_queue
*nq
)
1676 struct nullb_cmd
*cmd
;
1679 nq
->cmds
= kzalloc(nq
->queue_depth
* sizeof(*cmd
), GFP_KERNEL
);
1683 tag_size
= ALIGN(nq
->queue_depth
, BITS_PER_LONG
) / BITS_PER_LONG
;
1684 nq
->tag_map
= kzalloc(tag_size
* sizeof(unsigned long), GFP_KERNEL
);
1690 for (i
= 0; i
< nq
->queue_depth
; i
++) {
1692 INIT_LIST_HEAD(&cmd
->list
);
1693 cmd
->ll_list
.next
= NULL
;
1700 static int setup_queues(struct nullb
*nullb
)
1702 nullb
->queues
= kzalloc(nullb
->dev
->submit_queues
*
1703 sizeof(struct nullb_queue
), GFP_KERNEL
);
1707 nullb
->nr_queues
= 0;
1708 nullb
->queue_depth
= nullb
->dev
->hw_queue_depth
;
1713 static int init_driver_queues(struct nullb
*nullb
)
1715 struct nullb_queue
*nq
;
1718 for (i
= 0; i
< nullb
->dev
->submit_queues
; i
++) {
1719 nq
= &nullb
->queues
[i
];
1721 null_init_queue(nullb
, nq
);
1723 ret
= setup_commands(nq
);
1731 static int null_gendisk_register(struct nullb
*nullb
)
1733 struct gendisk
*disk
;
1736 disk
= nullb
->disk
= alloc_disk_node(1, nullb
->dev
->home_node
);
1739 size
= (sector_t
)nullb
->dev
->size
* 1024 * 1024ULL;
1740 set_capacity(disk
, size
>> 9);
1742 disk
->flags
|= GENHD_FL_EXT_DEVT
| GENHD_FL_SUPPRESS_PARTITION_INFO
;
1743 disk
->major
= null_major
;
1744 disk
->first_minor
= nullb
->index
;
1745 disk
->fops
= &null_fops
;
1746 disk
->private_data
= nullb
;
1747 disk
->queue
= nullb
->q
;
1748 strncpy(disk
->disk_name
, nullb
->disk_name
, DISK_NAME_LEN
);
1754 static int null_init_tag_set(struct nullb
*nullb
, struct blk_mq_tag_set
*set
)
1756 set
->ops
= &null_mq_ops
;
1757 set
->nr_hw_queues
= nullb
? nullb
->dev
->submit_queues
:
1759 set
->queue_depth
= nullb
? nullb
->dev
->hw_queue_depth
:
1761 set
->numa_node
= nullb
? nullb
->dev
->home_node
: g_home_node
;
1762 set
->cmd_size
= sizeof(struct nullb_cmd
);
1763 set
->flags
= BLK_MQ_F_SHOULD_MERGE
;
1765 set
->flags
|= BLK_MQ_F_NO_SCHED
;
1766 set
->driver_data
= NULL
;
1768 if ((nullb
&& nullb
->dev
->blocking
) || g_blocking
)
1769 set
->flags
|= BLK_MQ_F_BLOCKING
;
1771 return blk_mq_alloc_tag_set(set
);
1774 static void null_validate_conf(struct nullb_device
*dev
)
1776 dev
->blocksize
= round_down(dev
->blocksize
, 512);
1777 dev
->blocksize
= clamp_t(unsigned int, dev
->blocksize
, 512, 4096);
1778 if (dev
->use_lightnvm
&& dev
->blocksize
!= 4096)
1779 dev
->blocksize
= 4096;
1781 if (dev
->use_lightnvm
&& dev
->queue_mode
!= NULL_Q_MQ
)
1782 dev
->queue_mode
= NULL_Q_MQ
;
1784 if (dev
->queue_mode
== NULL_Q_MQ
&& dev
->use_per_node_hctx
) {
1785 if (dev
->submit_queues
!= nr_online_nodes
)
1786 dev
->submit_queues
= nr_online_nodes
;
1787 } else if (dev
->submit_queues
> nr_cpu_ids
)
1788 dev
->submit_queues
= nr_cpu_ids
;
1789 else if (dev
->submit_queues
== 0)
1790 dev
->submit_queues
= 1;
1792 dev
->queue_mode
= min_t(unsigned int, dev
->queue_mode
, NULL_Q_MQ
);
1793 dev
->irqmode
= min_t(unsigned int, dev
->irqmode
, NULL_IRQ_TIMER
);
1795 /* Do memory allocation, so set blocking */
1796 if (dev
->memory_backed
)
1797 dev
->blocking
= true;
1798 else /* cache is meaningless */
1799 dev
->cache_size
= 0;
1800 dev
->cache_size
= min_t(unsigned long, ULONG_MAX
/ 1024 / 1024,
1802 dev
->mbps
= min_t(unsigned int, 1024 * 40, dev
->mbps
);
1803 /* can not stop a queue */
1804 if (dev
->queue_mode
== NULL_Q_BIO
)
1808 static int null_add_dev(struct nullb_device
*dev
)
1810 struct nullb
*nullb
;
1813 null_validate_conf(dev
);
1815 nullb
= kzalloc_node(sizeof(*nullb
), GFP_KERNEL
, dev
->home_node
);
1823 spin_lock_init(&nullb
->lock
);
1825 rv
= setup_queues(nullb
);
1827 goto out_free_nullb
;
1829 if (dev
->queue_mode
== NULL_Q_MQ
) {
1831 nullb
->tag_set
= &tag_set
;
1834 nullb
->tag_set
= &nullb
->__tag_set
;
1835 rv
= null_init_tag_set(nullb
, nullb
->tag_set
);
1839 goto out_cleanup_queues
;
1841 nullb
->q
= blk_mq_init_queue(nullb
->tag_set
);
1842 if (IS_ERR(nullb
->q
)) {
1844 goto out_cleanup_tags
;
1846 null_init_queues(nullb
);
1847 } else if (dev
->queue_mode
== NULL_Q_BIO
) {
1848 nullb
->q
= blk_alloc_queue_node(GFP_KERNEL
, dev
->home_node
);
1851 goto out_cleanup_queues
;
1853 blk_queue_make_request(nullb
->q
, null_queue_bio
);
1854 rv
= init_driver_queues(nullb
);
1856 goto out_cleanup_blk_queue
;
1858 nullb
->q
= blk_init_queue_node(null_request_fn
, &nullb
->lock
,
1862 goto out_cleanup_queues
;
1864 blk_queue_prep_rq(nullb
->q
, null_rq_prep_fn
);
1865 blk_queue_softirq_done(nullb
->q
, null_softirq_done_fn
);
1866 rv
= init_driver_queues(nullb
);
1868 goto out_cleanup_blk_queue
;
1872 set_bit(NULLB_DEV_FL_THROTTLED
, &dev
->flags
);
1873 nullb_setup_bwtimer(nullb
);
1876 if (dev
->cache_size
> 0) {
1877 set_bit(NULLB_DEV_FL_CACHE
, &nullb
->dev
->flags
);
1878 blk_queue_write_cache(nullb
->q
, true, true);
1879 blk_queue_flush_queueable(nullb
->q
, true);
1882 nullb
->q
->queuedata
= nullb
;
1883 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, nullb
->q
);
1884 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, nullb
->q
);
1887 nullb
->index
= ida_simple_get(&nullb_indexes
, 0, 0, GFP_KERNEL
);
1888 dev
->index
= nullb
->index
;
1889 mutex_unlock(&lock
);
1891 blk_queue_logical_block_size(nullb
->q
, dev
->blocksize
);
1892 blk_queue_physical_block_size(nullb
->q
, dev
->blocksize
);
1894 null_config_discard(nullb
);
1896 sprintf(nullb
->disk_name
, "nullb%d", nullb
->index
);
1898 if (dev
->use_lightnvm
)
1899 rv
= null_nvm_register(nullb
);
1901 rv
= null_gendisk_register(nullb
);
1904 goto out_cleanup_blk_queue
;
1907 list_add_tail(&nullb
->list
, &nullb_list
);
1908 mutex_unlock(&lock
);
1911 out_cleanup_blk_queue
:
1912 blk_cleanup_queue(nullb
->q
);
1914 if (dev
->queue_mode
== NULL_Q_MQ
&& nullb
->tag_set
== &nullb
->__tag_set
)
1915 blk_mq_free_tag_set(nullb
->tag_set
);
1917 cleanup_queues(nullb
);
1924 static int __init
null_init(void)
1928 struct nullb
*nullb
;
1929 struct nullb_device
*dev
;
1931 /* check for nullb_page.bitmap */
1932 if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE
>> SECTOR_SHIFT
))
1935 if (g_bs
> PAGE_SIZE
) {
1936 pr_warn("null_blk: invalid block size\n");
1937 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE
);
1941 if (g_use_lightnvm
&& g_bs
!= 4096) {
1942 pr_warn("null_blk: LightNVM only supports 4k block size\n");
1943 pr_warn("null_blk: defaults block size to 4k\n");
1947 if (g_use_lightnvm
&& g_queue_mode
!= NULL_Q_MQ
) {
1948 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
1949 pr_warn("null_blk: defaults queue mode to blk-mq\n");
1950 g_queue_mode
= NULL_Q_MQ
;
1953 if (g_queue_mode
== NULL_Q_MQ
&& g_use_per_node_hctx
) {
1954 if (g_submit_queues
!= nr_online_nodes
) {
1955 pr_warn("null_blk: submit_queues param is set to %u.\n",
1957 g_submit_queues
= nr_online_nodes
;
1959 } else if (g_submit_queues
> nr_cpu_ids
)
1960 g_submit_queues
= nr_cpu_ids
;
1961 else if (g_submit_queues
<= 0)
1962 g_submit_queues
= 1;
1964 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
) {
1965 ret
= null_init_tag_set(NULL
, &tag_set
);
1970 config_group_init(&nullb_subsys
.su_group
);
1971 mutex_init(&nullb_subsys
.su_mutex
);
1973 ret
= configfs_register_subsystem(&nullb_subsys
);
1979 null_major
= register_blkdev(0, "nullb");
1980 if (null_major
< 0) {
1985 if (g_use_lightnvm
) {
1986 ppa_cache
= kmem_cache_create("ppa_cache", 64 * sizeof(u64
),
1989 pr_err("null_blk: unable to create ppa cache\n");
1995 for (i
= 0; i
< nr_devices
; i
++) {
1996 dev
= null_alloc_dev();
2001 ret
= null_add_dev(dev
);
2008 pr_info("null: module loaded\n");
2012 while (!list_empty(&nullb_list
)) {
2013 nullb
= list_entry(nullb_list
.next
, struct nullb
, list
);
2015 null_del_dev(nullb
);
2018 kmem_cache_destroy(ppa_cache
);
2020 unregister_blkdev(null_major
, "nullb");
2022 configfs_unregister_subsystem(&nullb_subsys
);
2024 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
)
2025 blk_mq_free_tag_set(&tag_set
);
2029 static void __exit
null_exit(void)
2031 struct nullb
*nullb
;
2033 configfs_unregister_subsystem(&nullb_subsys
);
2035 unregister_blkdev(null_major
, "nullb");
2038 while (!list_empty(&nullb_list
)) {
2039 struct nullb_device
*dev
;
2041 nullb
= list_entry(nullb_list
.next
, struct nullb
, list
);
2043 null_del_dev(nullb
);
2046 mutex_unlock(&lock
);
2048 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
)
2049 blk_mq_free_tag_set(&tag_set
);
2051 kmem_cache_destroy(ppa_cache
);
2054 module_init(null_init
);
2055 module_exit(null_exit
);
2057 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
2058 MODULE_LICENSE("GPL");