1 // SPDX-License-Identifier: GPL-2.0-only
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
6 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/sched.h>
11 #include <linux/init.h>
14 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
15 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
16 #define SECTOR_MASK (PAGE_SECTORS - 1)
20 #define TICKS_PER_SEC 50ULL
21 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
23 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
24 static DECLARE_FAULT_ATTR(null_timeout_attr
);
25 static DECLARE_FAULT_ATTR(null_requeue_attr
);
28 static inline u64
mb_per_tick(int mbps
)
30 return (1 << 20) / TICKS_PER_SEC
* ((u64
) mbps
);
34 * Status flags for nullb_device.
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
38 * THROTTLED: Device is being throttled.
39 * CACHE: Device is using a write-back cache.
41 enum nullb_device_flags
{
42 NULLB_DEV_FL_CONFIGURED
= 0,
44 NULLB_DEV_FL_THROTTLED
= 2,
45 NULLB_DEV_FL_CACHE
= 3,
48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
50 * nullb_page is a page in memory for nullb devices.
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
63 DECLARE_BITMAP(bitmap
, MAP_SZ
);
65 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
66 #define NULLB_PAGE_FREE (MAP_SZ - 2)
68 static LIST_HEAD(nullb_list
);
69 static struct mutex lock
;
70 static int null_major
;
71 static DEFINE_IDA(nullb_indexes
);
72 static struct blk_mq_tag_set tag_set
;
86 static int g_no_sched
;
87 module_param_named(no_sched
, g_no_sched
, int, 0444);
88 MODULE_PARM_DESC(no_sched
, "No io scheduler");
90 static int g_submit_queues
= 1;
91 module_param_named(submit_queues
, g_submit_queues
, int, 0444);
92 MODULE_PARM_DESC(submit_queues
, "Number of submission queues");
94 static int g_home_node
= NUMA_NO_NODE
;
95 module_param_named(home_node
, g_home_node
, int, 0444);
96 MODULE_PARM_DESC(home_node
, "Home node for the device");
98 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
99 static char g_timeout_str
[80];
100 module_param_string(timeout
, g_timeout_str
, sizeof(g_timeout_str
), 0444);
102 static char g_requeue_str
[80];
103 module_param_string(requeue
, g_requeue_str
, sizeof(g_requeue_str
), 0444);
106 static int g_queue_mode
= NULL_Q_MQ
;
108 static int null_param_store_val(const char *str
, int *val
, int min
, int max
)
112 ret
= kstrtoint(str
, 10, &new_val
);
116 if (new_val
< min
|| new_val
> max
)
123 static int null_set_queue_mode(const char *str
, const struct kernel_param
*kp
)
125 return null_param_store_val(str
, &g_queue_mode
, NULL_Q_BIO
, NULL_Q_MQ
);
128 static const struct kernel_param_ops null_queue_mode_param_ops
= {
129 .set
= null_set_queue_mode
,
130 .get
= param_get_int
,
133 device_param_cb(queue_mode
, &null_queue_mode_param_ops
, &g_queue_mode
, 0444);
134 MODULE_PARM_DESC(queue_mode
, "Block interface to use (0=bio,1=rq,2=multiqueue)");
136 static int g_gb
= 250;
137 module_param_named(gb
, g_gb
, int, 0444);
138 MODULE_PARM_DESC(gb
, "Size in GB");
140 static int g_bs
= 512;
141 module_param_named(bs
, g_bs
, int, 0444);
142 MODULE_PARM_DESC(bs
, "Block size (in bytes)");
144 static int nr_devices
= 1;
145 module_param(nr_devices
, int, 0444);
146 MODULE_PARM_DESC(nr_devices
, "Number of devices to register");
148 static bool g_blocking
;
149 module_param_named(blocking
, g_blocking
, bool, 0444);
150 MODULE_PARM_DESC(blocking
, "Register as a blocking blk-mq driver device");
152 static bool shared_tags
;
153 module_param(shared_tags
, bool, 0444);
154 MODULE_PARM_DESC(shared_tags
, "Share tag set between devices for blk-mq");
156 static int g_irqmode
= NULL_IRQ_SOFTIRQ
;
158 static int null_set_irqmode(const char *str
, const struct kernel_param
*kp
)
160 return null_param_store_val(str
, &g_irqmode
, NULL_IRQ_NONE
,
164 static const struct kernel_param_ops null_irqmode_param_ops
= {
165 .set
= null_set_irqmode
,
166 .get
= param_get_int
,
169 device_param_cb(irqmode
, &null_irqmode_param_ops
, &g_irqmode
, 0444);
170 MODULE_PARM_DESC(irqmode
, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
172 static unsigned long g_completion_nsec
= 10000;
173 module_param_named(completion_nsec
, g_completion_nsec
, ulong
, 0444);
174 MODULE_PARM_DESC(completion_nsec
, "Time in ns to complete a request in hardware. Default: 10,000ns");
176 static int g_hw_queue_depth
= 64;
177 module_param_named(hw_queue_depth
, g_hw_queue_depth
, int, 0444);
178 MODULE_PARM_DESC(hw_queue_depth
, "Queue depth for each hardware queue. Default: 64");
180 static bool g_use_per_node_hctx
;
181 module_param_named(use_per_node_hctx
, g_use_per_node_hctx
, bool, 0444);
182 MODULE_PARM_DESC(use_per_node_hctx
, "Use per-node allocation for hardware context queues. Default: false");
185 module_param_named(zoned
, g_zoned
, bool, S_IRUGO
);
186 MODULE_PARM_DESC(zoned
, "Make device as a host-managed zoned block device. Default: false");
188 static unsigned long g_zone_size
= 256;
189 module_param_named(zone_size
, g_zone_size
, ulong
, S_IRUGO
);
190 MODULE_PARM_DESC(zone_size
, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
192 static unsigned int g_zone_nr_conv
;
193 module_param_named(zone_nr_conv
, g_zone_nr_conv
, uint
, 0444);
194 MODULE_PARM_DESC(zone_nr_conv
, "Number of conventional zones when block device is zoned. Default: 0");
196 static struct nullb_device
*null_alloc_dev(void);
197 static void null_free_dev(struct nullb_device
*dev
);
198 static void null_del_dev(struct nullb
*nullb
);
199 static int null_add_dev(struct nullb_device
*dev
);
200 static void null_free_device_storage(struct nullb_device
*dev
, bool is_cache
);
202 static inline struct nullb_device
*to_nullb_device(struct config_item
*item
)
204 return item
? container_of(item
, struct nullb_device
, item
) : NULL
;
207 static inline ssize_t
nullb_device_uint_attr_show(unsigned int val
, char *page
)
209 return snprintf(page
, PAGE_SIZE
, "%u\n", val
);
212 static inline ssize_t
nullb_device_ulong_attr_show(unsigned long val
,
215 return snprintf(page
, PAGE_SIZE
, "%lu\n", val
);
218 static inline ssize_t
nullb_device_bool_attr_show(bool val
, char *page
)
220 return snprintf(page
, PAGE_SIZE
, "%u\n", val
);
223 static ssize_t
nullb_device_uint_attr_store(unsigned int *val
,
224 const char *page
, size_t count
)
229 result
= kstrtouint(page
, 0, &tmp
);
237 static ssize_t
nullb_device_ulong_attr_store(unsigned long *val
,
238 const char *page
, size_t count
)
243 result
= kstrtoul(page
, 0, &tmp
);
251 static ssize_t
nullb_device_bool_attr_store(bool *val
, const char *page
,
257 result
= kstrtobool(page
, &tmp
);
265 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
266 #define NULLB_DEVICE_ATTR(NAME, TYPE) \
268 nullb_device_##NAME##_show(struct config_item *item, char *page) \
270 return nullb_device_##TYPE##_attr_show( \
271 to_nullb_device(item)->NAME, page); \
274 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
277 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
279 return nullb_device_##TYPE##_attr_store( \
280 &to_nullb_device(item)->NAME, page, count); \
282 CONFIGFS_ATTR(nullb_device_, NAME);
284 NULLB_DEVICE_ATTR(size
, ulong
);
285 NULLB_DEVICE_ATTR(completion_nsec
, ulong
);
286 NULLB_DEVICE_ATTR(submit_queues
, uint
);
287 NULLB_DEVICE_ATTR(home_node
, uint
);
288 NULLB_DEVICE_ATTR(queue_mode
, uint
);
289 NULLB_DEVICE_ATTR(blocksize
, uint
);
290 NULLB_DEVICE_ATTR(irqmode
, uint
);
291 NULLB_DEVICE_ATTR(hw_queue_depth
, uint
);
292 NULLB_DEVICE_ATTR(index
, uint
);
293 NULLB_DEVICE_ATTR(blocking
, bool);
294 NULLB_DEVICE_ATTR(use_per_node_hctx
, bool);
295 NULLB_DEVICE_ATTR(memory_backed
, bool);
296 NULLB_DEVICE_ATTR(discard
, bool);
297 NULLB_DEVICE_ATTR(mbps
, uint
);
298 NULLB_DEVICE_ATTR(cache_size
, ulong
);
299 NULLB_DEVICE_ATTR(zoned
, bool);
300 NULLB_DEVICE_ATTR(zone_size
, ulong
);
301 NULLB_DEVICE_ATTR(zone_nr_conv
, uint
);
303 static ssize_t
nullb_device_power_show(struct config_item
*item
, char *page
)
305 return nullb_device_bool_attr_show(to_nullb_device(item
)->power
, page
);
308 static ssize_t
nullb_device_power_store(struct config_item
*item
,
309 const char *page
, size_t count
)
311 struct nullb_device
*dev
= to_nullb_device(item
);
315 ret
= nullb_device_bool_attr_store(&newp
, page
, count
);
319 if (!dev
->power
&& newp
) {
320 if (test_and_set_bit(NULLB_DEV_FL_UP
, &dev
->flags
))
322 if (null_add_dev(dev
)) {
323 clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
);
327 set_bit(NULLB_DEV_FL_CONFIGURED
, &dev
->flags
);
329 } else if (dev
->power
&& !newp
) {
330 if (test_and_clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
)) {
333 null_del_dev(dev
->nullb
);
336 clear_bit(NULLB_DEV_FL_CONFIGURED
, &dev
->flags
);
342 CONFIGFS_ATTR(nullb_device_
, power
);
344 static ssize_t
nullb_device_badblocks_show(struct config_item
*item
, char *page
)
346 struct nullb_device
*t_dev
= to_nullb_device(item
);
348 return badblocks_show(&t_dev
->badblocks
, page
, 0);
351 static ssize_t
nullb_device_badblocks_store(struct config_item
*item
,
352 const char *page
, size_t count
)
354 struct nullb_device
*t_dev
= to_nullb_device(item
);
355 char *orig
, *buf
, *tmp
;
359 orig
= kstrndup(page
, count
, GFP_KERNEL
);
363 buf
= strstrip(orig
);
366 if (buf
[0] != '+' && buf
[0] != '-')
368 tmp
= strchr(&buf
[1], '-');
372 ret
= kstrtoull(buf
+ 1, 0, &start
);
375 ret
= kstrtoull(tmp
+ 1, 0, &end
);
381 /* enable badblocks */
382 cmpxchg(&t_dev
->badblocks
.shift
, -1, 0);
384 ret
= badblocks_set(&t_dev
->badblocks
, start
,
387 ret
= badblocks_clear(&t_dev
->badblocks
, start
,
395 CONFIGFS_ATTR(nullb_device_
, badblocks
);
397 static struct configfs_attribute
*nullb_device_attrs
[] = {
398 &nullb_device_attr_size
,
399 &nullb_device_attr_completion_nsec
,
400 &nullb_device_attr_submit_queues
,
401 &nullb_device_attr_home_node
,
402 &nullb_device_attr_queue_mode
,
403 &nullb_device_attr_blocksize
,
404 &nullb_device_attr_irqmode
,
405 &nullb_device_attr_hw_queue_depth
,
406 &nullb_device_attr_index
,
407 &nullb_device_attr_blocking
,
408 &nullb_device_attr_use_per_node_hctx
,
409 &nullb_device_attr_power
,
410 &nullb_device_attr_memory_backed
,
411 &nullb_device_attr_discard
,
412 &nullb_device_attr_mbps
,
413 &nullb_device_attr_cache_size
,
414 &nullb_device_attr_badblocks
,
415 &nullb_device_attr_zoned
,
416 &nullb_device_attr_zone_size
,
417 &nullb_device_attr_zone_nr_conv
,
421 static void nullb_device_release(struct config_item
*item
)
423 struct nullb_device
*dev
= to_nullb_device(item
);
425 null_free_device_storage(dev
, false);
429 static struct configfs_item_operations nullb_device_ops
= {
430 .release
= nullb_device_release
,
433 static const struct config_item_type nullb_device_type
= {
434 .ct_item_ops
= &nullb_device_ops
,
435 .ct_attrs
= nullb_device_attrs
,
436 .ct_owner
= THIS_MODULE
,
440 config_item
*nullb_group_make_item(struct config_group
*group
, const char *name
)
442 struct nullb_device
*dev
;
444 dev
= null_alloc_dev();
446 return ERR_PTR(-ENOMEM
);
448 config_item_init_type_name(&dev
->item
, name
, &nullb_device_type
);
454 nullb_group_drop_item(struct config_group
*group
, struct config_item
*item
)
456 struct nullb_device
*dev
= to_nullb_device(item
);
458 if (test_and_clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
)) {
461 null_del_dev(dev
->nullb
);
465 config_item_put(item
);
468 static ssize_t
memb_group_features_show(struct config_item
*item
, char *page
)
470 return snprintf(page
, PAGE_SIZE
, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
473 CONFIGFS_ATTR_RO(memb_group_
, features
);
475 static struct configfs_attribute
*nullb_group_attrs
[] = {
476 &memb_group_attr_features
,
480 static struct configfs_group_operations nullb_group_ops
= {
481 .make_item
= nullb_group_make_item
,
482 .drop_item
= nullb_group_drop_item
,
485 static const struct config_item_type nullb_group_type
= {
486 .ct_group_ops
= &nullb_group_ops
,
487 .ct_attrs
= nullb_group_attrs
,
488 .ct_owner
= THIS_MODULE
,
491 static struct configfs_subsystem nullb_subsys
= {
494 .ci_namebuf
= "nullb",
495 .ci_type
= &nullb_group_type
,
500 static inline int null_cache_active(struct nullb
*nullb
)
502 return test_bit(NULLB_DEV_FL_CACHE
, &nullb
->dev
->flags
);
505 static struct nullb_device
*null_alloc_dev(void)
507 struct nullb_device
*dev
;
509 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
512 INIT_RADIX_TREE(&dev
->data
, GFP_ATOMIC
);
513 INIT_RADIX_TREE(&dev
->cache
, GFP_ATOMIC
);
514 if (badblocks_init(&dev
->badblocks
, 0)) {
519 dev
->size
= g_gb
* 1024;
520 dev
->completion_nsec
= g_completion_nsec
;
521 dev
->submit_queues
= g_submit_queues
;
522 dev
->home_node
= g_home_node
;
523 dev
->queue_mode
= g_queue_mode
;
524 dev
->blocksize
= g_bs
;
525 dev
->irqmode
= g_irqmode
;
526 dev
->hw_queue_depth
= g_hw_queue_depth
;
527 dev
->blocking
= g_blocking
;
528 dev
->use_per_node_hctx
= g_use_per_node_hctx
;
529 dev
->zoned
= g_zoned
;
530 dev
->zone_size
= g_zone_size
;
531 dev
->zone_nr_conv
= g_zone_nr_conv
;
535 static void null_free_dev(struct nullb_device
*dev
)
541 badblocks_exit(&dev
->badblocks
);
545 static void put_tag(struct nullb_queue
*nq
, unsigned int tag
)
547 clear_bit_unlock(tag
, nq
->tag_map
);
549 if (waitqueue_active(&nq
->wait
))
553 static unsigned int get_tag(struct nullb_queue
*nq
)
558 tag
= find_first_zero_bit(nq
->tag_map
, nq
->queue_depth
);
559 if (tag
>= nq
->queue_depth
)
561 } while (test_and_set_bit_lock(tag
, nq
->tag_map
));
566 static void free_cmd(struct nullb_cmd
*cmd
)
568 put_tag(cmd
->nq
, cmd
->tag
);
571 static enum hrtimer_restart
null_cmd_timer_expired(struct hrtimer
*timer
);
573 static struct nullb_cmd
*__alloc_cmd(struct nullb_queue
*nq
)
575 struct nullb_cmd
*cmd
;
580 cmd
= &nq
->cmds
[tag
];
583 if (nq
->dev
->irqmode
== NULL_IRQ_TIMER
) {
584 hrtimer_init(&cmd
->timer
, CLOCK_MONOTONIC
,
586 cmd
->timer
.function
= null_cmd_timer_expired
;
594 static struct nullb_cmd
*alloc_cmd(struct nullb_queue
*nq
, int can_wait
)
596 struct nullb_cmd
*cmd
;
599 cmd
= __alloc_cmd(nq
);
600 if (cmd
|| !can_wait
)
604 prepare_to_wait(&nq
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
605 cmd
= __alloc_cmd(nq
);
612 finish_wait(&nq
->wait
, &wait
);
616 static void end_cmd(struct nullb_cmd
*cmd
)
618 int queue_mode
= cmd
->nq
->dev
->queue_mode
;
620 switch (queue_mode
) {
622 blk_mq_end_request(cmd
->rq
, cmd
->error
);
625 cmd
->bio
->bi_status
= cmd
->error
;
633 static enum hrtimer_restart
null_cmd_timer_expired(struct hrtimer
*timer
)
635 end_cmd(container_of(timer
, struct nullb_cmd
, timer
));
637 return HRTIMER_NORESTART
;
640 static void null_cmd_end_timer(struct nullb_cmd
*cmd
)
642 ktime_t kt
= cmd
->nq
->dev
->completion_nsec
;
644 hrtimer_start(&cmd
->timer
, kt
, HRTIMER_MODE_REL
);
647 static void null_complete_rq(struct request
*rq
)
649 end_cmd(blk_mq_rq_to_pdu(rq
));
652 static struct nullb_page
*null_alloc_page(gfp_t gfp_flags
)
654 struct nullb_page
*t_page
;
656 t_page
= kmalloc(sizeof(struct nullb_page
), gfp_flags
);
660 t_page
->page
= alloc_pages(gfp_flags
, 0);
664 memset(t_page
->bitmap
, 0, sizeof(t_page
->bitmap
));
672 static void null_free_page(struct nullb_page
*t_page
)
674 __set_bit(NULLB_PAGE_FREE
, t_page
->bitmap
);
675 if (test_bit(NULLB_PAGE_LOCK
, t_page
->bitmap
))
677 __free_page(t_page
->page
);
681 static bool null_page_empty(struct nullb_page
*page
)
683 int size
= MAP_SZ
- 2;
685 return find_first_bit(page
->bitmap
, size
) == size
;
688 static void null_free_sector(struct nullb
*nullb
, sector_t sector
,
691 unsigned int sector_bit
;
693 struct nullb_page
*t_page
, *ret
;
694 struct radix_tree_root
*root
;
696 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
697 idx
= sector
>> PAGE_SECTORS_SHIFT
;
698 sector_bit
= (sector
& SECTOR_MASK
);
700 t_page
= radix_tree_lookup(root
, idx
);
702 __clear_bit(sector_bit
, t_page
->bitmap
);
704 if (null_page_empty(t_page
)) {
705 ret
= radix_tree_delete_item(root
, idx
, t_page
);
706 WARN_ON(ret
!= t_page
);
709 nullb
->dev
->curr_cache
-= PAGE_SIZE
;
714 static struct nullb_page
*null_radix_tree_insert(struct nullb
*nullb
, u64 idx
,
715 struct nullb_page
*t_page
, bool is_cache
)
717 struct radix_tree_root
*root
;
719 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
721 if (radix_tree_insert(root
, idx
, t_page
)) {
722 null_free_page(t_page
);
723 t_page
= radix_tree_lookup(root
, idx
);
724 WARN_ON(!t_page
|| t_page
->page
->index
!= idx
);
726 nullb
->dev
->curr_cache
+= PAGE_SIZE
;
731 static void null_free_device_storage(struct nullb_device
*dev
, bool is_cache
)
733 unsigned long pos
= 0;
735 struct nullb_page
*ret
, *t_pages
[FREE_BATCH
];
736 struct radix_tree_root
*root
;
738 root
= is_cache
? &dev
->cache
: &dev
->data
;
743 nr_pages
= radix_tree_gang_lookup(root
,
744 (void **)t_pages
, pos
, FREE_BATCH
);
746 for (i
= 0; i
< nr_pages
; i
++) {
747 pos
= t_pages
[i
]->page
->index
;
748 ret
= radix_tree_delete_item(root
, pos
, t_pages
[i
]);
749 WARN_ON(ret
!= t_pages
[i
]);
754 } while (nr_pages
== FREE_BATCH
);
760 static struct nullb_page
*__null_lookup_page(struct nullb
*nullb
,
761 sector_t sector
, bool for_write
, bool is_cache
)
763 unsigned int sector_bit
;
765 struct nullb_page
*t_page
;
766 struct radix_tree_root
*root
;
768 idx
= sector
>> PAGE_SECTORS_SHIFT
;
769 sector_bit
= (sector
& SECTOR_MASK
);
771 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
772 t_page
= radix_tree_lookup(root
, idx
);
773 WARN_ON(t_page
&& t_page
->page
->index
!= idx
);
775 if (t_page
&& (for_write
|| test_bit(sector_bit
, t_page
->bitmap
)))
781 static struct nullb_page
*null_lookup_page(struct nullb
*nullb
,
782 sector_t sector
, bool for_write
, bool ignore_cache
)
784 struct nullb_page
*page
= NULL
;
787 page
= __null_lookup_page(nullb
, sector
, for_write
, true);
790 return __null_lookup_page(nullb
, sector
, for_write
, false);
793 static struct nullb_page
*null_insert_page(struct nullb
*nullb
,
794 sector_t sector
, bool ignore_cache
)
795 __releases(&nullb
->lock
)
796 __acquires(&nullb
->lock
)
799 struct nullb_page
*t_page
;
801 t_page
= null_lookup_page(nullb
, sector
, true, ignore_cache
);
805 spin_unlock_irq(&nullb
->lock
);
807 t_page
= null_alloc_page(GFP_NOIO
);
811 if (radix_tree_preload(GFP_NOIO
))
814 spin_lock_irq(&nullb
->lock
);
815 idx
= sector
>> PAGE_SECTORS_SHIFT
;
816 t_page
->page
->index
= idx
;
817 t_page
= null_radix_tree_insert(nullb
, idx
, t_page
, !ignore_cache
);
818 radix_tree_preload_end();
822 null_free_page(t_page
);
824 spin_lock_irq(&nullb
->lock
);
825 return null_lookup_page(nullb
, sector
, true, ignore_cache
);
828 static int null_flush_cache_page(struct nullb
*nullb
, struct nullb_page
*c_page
)
833 struct nullb_page
*t_page
, *ret
;
836 idx
= c_page
->page
->index
;
838 t_page
= null_insert_page(nullb
, idx
<< PAGE_SECTORS_SHIFT
, true);
840 __clear_bit(NULLB_PAGE_LOCK
, c_page
->bitmap
);
841 if (test_bit(NULLB_PAGE_FREE
, c_page
->bitmap
)) {
842 null_free_page(c_page
);
843 if (t_page
&& null_page_empty(t_page
)) {
844 ret
= radix_tree_delete_item(&nullb
->dev
->data
,
846 null_free_page(t_page
);
854 src
= kmap_atomic(c_page
->page
);
855 dst
= kmap_atomic(t_page
->page
);
857 for (i
= 0; i
< PAGE_SECTORS
;
858 i
+= (nullb
->dev
->blocksize
>> SECTOR_SHIFT
)) {
859 if (test_bit(i
, c_page
->bitmap
)) {
860 offset
= (i
<< SECTOR_SHIFT
);
861 memcpy(dst
+ offset
, src
+ offset
,
862 nullb
->dev
->blocksize
);
863 __set_bit(i
, t_page
->bitmap
);
870 ret
= radix_tree_delete_item(&nullb
->dev
->cache
, idx
, c_page
);
872 nullb
->dev
->curr_cache
-= PAGE_SIZE
;
877 static int null_make_cache_space(struct nullb
*nullb
, unsigned long n
)
879 int i
, err
, nr_pages
;
880 struct nullb_page
*c_pages
[FREE_BATCH
];
881 unsigned long flushed
= 0, one_round
;
884 if ((nullb
->dev
->cache_size
* 1024 * 1024) >
885 nullb
->dev
->curr_cache
+ n
|| nullb
->dev
->curr_cache
== 0)
888 nr_pages
= radix_tree_gang_lookup(&nullb
->dev
->cache
,
889 (void **)c_pages
, nullb
->cache_flush_pos
, FREE_BATCH
);
891 * nullb_flush_cache_page could unlock before using the c_pages. To
892 * avoid race, we don't allow page free
894 for (i
= 0; i
< nr_pages
; i
++) {
895 nullb
->cache_flush_pos
= c_pages
[i
]->page
->index
;
897 * We found the page which is being flushed to disk by other
900 if (test_bit(NULLB_PAGE_LOCK
, c_pages
[i
]->bitmap
))
903 __set_bit(NULLB_PAGE_LOCK
, c_pages
[i
]->bitmap
);
907 for (i
= 0; i
< nr_pages
; i
++) {
908 if (c_pages
[i
] == NULL
)
910 err
= null_flush_cache_page(nullb
, c_pages
[i
]);
915 flushed
+= one_round
<< PAGE_SHIFT
;
919 nullb
->cache_flush_pos
= 0;
920 if (one_round
== 0) {
921 /* give other threads a chance */
922 spin_unlock_irq(&nullb
->lock
);
923 spin_lock_irq(&nullb
->lock
);
930 static int copy_to_nullb(struct nullb
*nullb
, struct page
*source
,
931 unsigned int off
, sector_t sector
, size_t n
, bool is_fua
)
933 size_t temp
, count
= 0;
935 struct nullb_page
*t_page
;
939 temp
= min_t(size_t, nullb
->dev
->blocksize
, n
- count
);
941 if (null_cache_active(nullb
) && !is_fua
)
942 null_make_cache_space(nullb
, PAGE_SIZE
);
944 offset
= (sector
& SECTOR_MASK
) << SECTOR_SHIFT
;
945 t_page
= null_insert_page(nullb
, sector
,
946 !null_cache_active(nullb
) || is_fua
);
950 src
= kmap_atomic(source
);
951 dst
= kmap_atomic(t_page
->page
);
952 memcpy(dst
+ offset
, src
+ off
+ count
, temp
);
956 __set_bit(sector
& SECTOR_MASK
, t_page
->bitmap
);
959 null_free_sector(nullb
, sector
, true);
962 sector
+= temp
>> SECTOR_SHIFT
;
967 static int copy_from_nullb(struct nullb
*nullb
, struct page
*dest
,
968 unsigned int off
, sector_t sector
, size_t n
)
970 size_t temp
, count
= 0;
972 struct nullb_page
*t_page
;
976 temp
= min_t(size_t, nullb
->dev
->blocksize
, n
- count
);
978 offset
= (sector
& SECTOR_MASK
) << SECTOR_SHIFT
;
979 t_page
= null_lookup_page(nullb
, sector
, false,
980 !null_cache_active(nullb
));
982 dst
= kmap_atomic(dest
);
984 memset(dst
+ off
+ count
, 0, temp
);
987 src
= kmap_atomic(t_page
->page
);
988 memcpy(dst
+ off
+ count
, src
+ offset
, temp
);
994 sector
+= temp
>> SECTOR_SHIFT
;
999 static void null_handle_discard(struct nullb
*nullb
, sector_t sector
, size_t n
)
1003 spin_lock_irq(&nullb
->lock
);
1005 temp
= min_t(size_t, n
, nullb
->dev
->blocksize
);
1006 null_free_sector(nullb
, sector
, false);
1007 if (null_cache_active(nullb
))
1008 null_free_sector(nullb
, sector
, true);
1009 sector
+= temp
>> SECTOR_SHIFT
;
1012 spin_unlock_irq(&nullb
->lock
);
1015 static int null_handle_flush(struct nullb
*nullb
)
1019 if (!null_cache_active(nullb
))
1022 spin_lock_irq(&nullb
->lock
);
1024 err
= null_make_cache_space(nullb
,
1025 nullb
->dev
->cache_size
* 1024 * 1024);
1026 if (err
|| nullb
->dev
->curr_cache
== 0)
1030 WARN_ON(!radix_tree_empty(&nullb
->dev
->cache
));
1031 spin_unlock_irq(&nullb
->lock
);
1035 static int null_transfer(struct nullb
*nullb
, struct page
*page
,
1036 unsigned int len
, unsigned int off
, bool is_write
, sector_t sector
,
1042 err
= copy_from_nullb(nullb
, page
, off
, sector
, len
);
1043 flush_dcache_page(page
);
1045 flush_dcache_page(page
);
1046 err
= copy_to_nullb(nullb
, page
, off
, sector
, len
, is_fua
);
1052 static int null_handle_rq(struct nullb_cmd
*cmd
)
1054 struct request
*rq
= cmd
->rq
;
1055 struct nullb
*nullb
= cmd
->nq
->dev
->nullb
;
1059 struct req_iterator iter
;
1060 struct bio_vec bvec
;
1062 sector
= blk_rq_pos(rq
);
1064 if (req_op(rq
) == REQ_OP_DISCARD
) {
1065 null_handle_discard(nullb
, sector
, blk_rq_bytes(rq
));
1069 spin_lock_irq(&nullb
->lock
);
1070 rq_for_each_segment(bvec
, rq
, iter
) {
1072 err
= null_transfer(nullb
, bvec
.bv_page
, len
, bvec
.bv_offset
,
1073 op_is_write(req_op(rq
)), sector
,
1074 req_op(rq
) & REQ_FUA
);
1076 spin_unlock_irq(&nullb
->lock
);
1079 sector
+= len
>> SECTOR_SHIFT
;
1081 spin_unlock_irq(&nullb
->lock
);
1086 static int null_handle_bio(struct nullb_cmd
*cmd
)
1088 struct bio
*bio
= cmd
->bio
;
1089 struct nullb
*nullb
= cmd
->nq
->dev
->nullb
;
1093 struct bio_vec bvec
;
1094 struct bvec_iter iter
;
1096 sector
= bio
->bi_iter
.bi_sector
;
1098 if (bio_op(bio
) == REQ_OP_DISCARD
) {
1099 null_handle_discard(nullb
, sector
,
1100 bio_sectors(bio
) << SECTOR_SHIFT
);
1104 spin_lock_irq(&nullb
->lock
);
1105 bio_for_each_segment(bvec
, bio
, iter
) {
1107 err
= null_transfer(nullb
, bvec
.bv_page
, len
, bvec
.bv_offset
,
1108 op_is_write(bio_op(bio
)), sector
,
1109 bio
->bi_opf
& REQ_FUA
);
1111 spin_unlock_irq(&nullb
->lock
);
1114 sector
+= len
>> SECTOR_SHIFT
;
1116 spin_unlock_irq(&nullb
->lock
);
1120 static void null_stop_queue(struct nullb
*nullb
)
1122 struct request_queue
*q
= nullb
->q
;
1124 if (nullb
->dev
->queue_mode
== NULL_Q_MQ
)
1125 blk_mq_stop_hw_queues(q
);
1128 static void null_restart_queue_async(struct nullb
*nullb
)
1130 struct request_queue
*q
= nullb
->q
;
1132 if (nullb
->dev
->queue_mode
== NULL_Q_MQ
)
1133 blk_mq_start_stopped_hw_queues(q
, true);
1136 static blk_status_t
null_handle_cmd(struct nullb_cmd
*cmd
, sector_t sector
,
1137 sector_t nr_sectors
, enum req_opf op
)
1139 struct nullb_device
*dev
= cmd
->nq
->dev
;
1140 struct nullb
*nullb
= dev
->nullb
;
1143 if (test_bit(NULLB_DEV_FL_THROTTLED
, &dev
->flags
)) {
1144 struct request
*rq
= cmd
->rq
;
1146 if (!hrtimer_active(&nullb
->bw_timer
))
1147 hrtimer_restart(&nullb
->bw_timer
);
1149 if (atomic_long_sub_return(blk_rq_bytes(rq
),
1150 &nullb
->cur_bytes
) < 0) {
1151 null_stop_queue(nullb
);
1152 /* race with timer */
1153 if (atomic_long_read(&nullb
->cur_bytes
) > 0)
1154 null_restart_queue_async(nullb
);
1155 /* requeue request */
1156 return BLK_STS_DEV_RESOURCE
;
1160 if (op
== REQ_OP_FLUSH
) {
1161 cmd
->error
= errno_to_blk_status(null_handle_flush(nullb
));
1164 if (nullb
->dev
->badblocks
.shift
!= -1) {
1168 if (badblocks_check(&nullb
->dev
->badblocks
, sector
, nr_sectors
,
1169 &first_bad
, &bad_sectors
)) {
1170 cmd
->error
= BLK_STS_IOERR
;
1175 if (dev
->memory_backed
) {
1176 if (dev
->queue_mode
== NULL_Q_BIO
)
1177 err
= null_handle_bio(cmd
);
1179 err
= null_handle_rq(cmd
);
1182 cmd
->error
= errno_to_blk_status(err
);
1184 if (!cmd
->error
&& dev
->zoned
) {
1185 if (op
== REQ_OP_WRITE
)
1186 null_zone_write(cmd
, sector
, nr_sectors
);
1187 else if (op
== REQ_OP_ZONE_RESET
)
1188 null_zone_reset(cmd
, sector
);
1189 else if (op
== REQ_OP_ZONE_RESET_ALL
)
1190 null_zone_reset(cmd
, 0);
1193 /* Complete IO by inline, softirq or timer */
1194 switch (dev
->irqmode
) {
1195 case NULL_IRQ_SOFTIRQ
:
1196 switch (dev
->queue_mode
) {
1198 blk_mq_complete_request(cmd
->rq
);
1202 * XXX: no proper submitting cpu information available.
1211 case NULL_IRQ_TIMER
:
1212 null_cmd_end_timer(cmd
);
1218 static enum hrtimer_restart
nullb_bwtimer_fn(struct hrtimer
*timer
)
1220 struct nullb
*nullb
= container_of(timer
, struct nullb
, bw_timer
);
1221 ktime_t timer_interval
= ktime_set(0, TIMER_INTERVAL
);
1222 unsigned int mbps
= nullb
->dev
->mbps
;
1224 if (atomic_long_read(&nullb
->cur_bytes
) == mb_per_tick(mbps
))
1225 return HRTIMER_NORESTART
;
1227 atomic_long_set(&nullb
->cur_bytes
, mb_per_tick(mbps
));
1228 null_restart_queue_async(nullb
);
1230 hrtimer_forward_now(&nullb
->bw_timer
, timer_interval
);
1232 return HRTIMER_RESTART
;
1235 static void nullb_setup_bwtimer(struct nullb
*nullb
)
1237 ktime_t timer_interval
= ktime_set(0, TIMER_INTERVAL
);
1239 hrtimer_init(&nullb
->bw_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1240 nullb
->bw_timer
.function
= nullb_bwtimer_fn
;
1241 atomic_long_set(&nullb
->cur_bytes
, mb_per_tick(nullb
->dev
->mbps
));
1242 hrtimer_start(&nullb
->bw_timer
, timer_interval
, HRTIMER_MODE_REL
);
1245 static struct nullb_queue
*nullb_to_queue(struct nullb
*nullb
)
1249 if (nullb
->nr_queues
!= 1)
1250 index
= raw_smp_processor_id() / ((nr_cpu_ids
+ nullb
->nr_queues
- 1) / nullb
->nr_queues
);
1252 return &nullb
->queues
[index
];
1255 static blk_qc_t
null_queue_bio(struct request_queue
*q
, struct bio
*bio
)
1257 sector_t sector
= bio
->bi_iter
.bi_sector
;
1258 sector_t nr_sectors
= bio_sectors(bio
);
1259 struct nullb
*nullb
= q
->queuedata
;
1260 struct nullb_queue
*nq
= nullb_to_queue(nullb
);
1261 struct nullb_cmd
*cmd
;
1263 cmd
= alloc_cmd(nq
, 1);
1266 null_handle_cmd(cmd
, sector
, nr_sectors
, bio_op(bio
));
1267 return BLK_QC_T_NONE
;
1270 static bool should_timeout_request(struct request
*rq
)
1272 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1273 if (g_timeout_str
[0])
1274 return should_fail(&null_timeout_attr
, 1);
1279 static bool should_requeue_request(struct request
*rq
)
1281 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1282 if (g_requeue_str
[0])
1283 return should_fail(&null_requeue_attr
, 1);
1288 static enum blk_eh_timer_return
null_timeout_rq(struct request
*rq
, bool res
)
1290 pr_info("null: rq %p timed out\n", rq
);
1291 blk_mq_complete_request(rq
);
1295 static blk_status_t
null_queue_rq(struct blk_mq_hw_ctx
*hctx
,
1296 const struct blk_mq_queue_data
*bd
)
1298 struct nullb_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
1299 struct nullb_queue
*nq
= hctx
->driver_data
;
1300 sector_t nr_sectors
= blk_rq_sectors(bd
->rq
);
1301 sector_t sector
= blk_rq_pos(bd
->rq
);
1303 might_sleep_if(hctx
->flags
& BLK_MQ_F_BLOCKING
);
1305 if (nq
->dev
->irqmode
== NULL_IRQ_TIMER
) {
1306 hrtimer_init(&cmd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1307 cmd
->timer
.function
= null_cmd_timer_expired
;
1312 blk_mq_start_request(bd
->rq
);
1314 if (should_requeue_request(bd
->rq
)) {
1316 * Alternate between hitting the core BUSY path, and the
1317 * driver driven requeue path
1319 nq
->requeue_selection
++;
1320 if (nq
->requeue_selection
& 1)
1321 return BLK_STS_RESOURCE
;
1323 blk_mq_requeue_request(bd
->rq
, true);
1327 if (should_timeout_request(bd
->rq
))
1330 return null_handle_cmd(cmd
, sector
, nr_sectors
, req_op(bd
->rq
));
1333 static const struct blk_mq_ops null_mq_ops
= {
1334 .queue_rq
= null_queue_rq
,
1335 .complete
= null_complete_rq
,
1336 .timeout
= null_timeout_rq
,
1339 static void cleanup_queue(struct nullb_queue
*nq
)
1345 static void cleanup_queues(struct nullb
*nullb
)
1349 for (i
= 0; i
< nullb
->nr_queues
; i
++)
1350 cleanup_queue(&nullb
->queues
[i
]);
1352 kfree(nullb
->queues
);
1355 static void null_del_dev(struct nullb
*nullb
)
1357 struct nullb_device
*dev
= nullb
->dev
;
1359 ida_simple_remove(&nullb_indexes
, nullb
->index
);
1361 list_del_init(&nullb
->list
);
1363 del_gendisk(nullb
->disk
);
1365 if (test_bit(NULLB_DEV_FL_THROTTLED
, &nullb
->dev
->flags
)) {
1366 hrtimer_cancel(&nullb
->bw_timer
);
1367 atomic_long_set(&nullb
->cur_bytes
, LONG_MAX
);
1368 null_restart_queue_async(nullb
);
1371 blk_cleanup_queue(nullb
->q
);
1372 if (dev
->queue_mode
== NULL_Q_MQ
&&
1373 nullb
->tag_set
== &nullb
->__tag_set
)
1374 blk_mq_free_tag_set(nullb
->tag_set
);
1375 put_disk(nullb
->disk
);
1376 cleanup_queues(nullb
);
1377 if (null_cache_active(nullb
))
1378 null_free_device_storage(nullb
->dev
, true);
1383 static void null_config_discard(struct nullb
*nullb
)
1385 if (nullb
->dev
->discard
== false)
1387 nullb
->q
->limits
.discard_granularity
= nullb
->dev
->blocksize
;
1388 nullb
->q
->limits
.discard_alignment
= nullb
->dev
->blocksize
;
1389 blk_queue_max_discard_sectors(nullb
->q
, UINT_MAX
>> 9);
1390 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, nullb
->q
);
1393 static int null_open(struct block_device
*bdev
, fmode_t mode
)
1398 static void null_release(struct gendisk
*disk
, fmode_t mode
)
1402 static const struct block_device_operations null_fops
= {
1403 .owner
= THIS_MODULE
,
1405 .release
= null_release
,
1406 .report_zones
= null_zone_report
,
1409 static void null_init_queue(struct nullb
*nullb
, struct nullb_queue
*nq
)
1414 init_waitqueue_head(&nq
->wait
);
1415 nq
->queue_depth
= nullb
->queue_depth
;
1416 nq
->dev
= nullb
->dev
;
1419 static void null_init_queues(struct nullb
*nullb
)
1421 struct request_queue
*q
= nullb
->q
;
1422 struct blk_mq_hw_ctx
*hctx
;
1423 struct nullb_queue
*nq
;
1426 queue_for_each_hw_ctx(q
, hctx
, i
) {
1427 if (!hctx
->nr_ctx
|| !hctx
->tags
)
1429 nq
= &nullb
->queues
[i
];
1430 hctx
->driver_data
= nq
;
1431 null_init_queue(nullb
, nq
);
1436 static int setup_commands(struct nullb_queue
*nq
)
1438 struct nullb_cmd
*cmd
;
1441 nq
->cmds
= kcalloc(nq
->queue_depth
, sizeof(*cmd
), GFP_KERNEL
);
1445 tag_size
= ALIGN(nq
->queue_depth
, BITS_PER_LONG
) / BITS_PER_LONG
;
1446 nq
->tag_map
= kcalloc(tag_size
, sizeof(unsigned long), GFP_KERNEL
);
1452 for (i
= 0; i
< nq
->queue_depth
; i
++) {
1454 INIT_LIST_HEAD(&cmd
->list
);
1455 cmd
->ll_list
.next
= NULL
;
1462 static int setup_queues(struct nullb
*nullb
)
1464 nullb
->queues
= kcalloc(nullb
->dev
->submit_queues
,
1465 sizeof(struct nullb_queue
),
1470 nullb
->queue_depth
= nullb
->dev
->hw_queue_depth
;
1475 static int init_driver_queues(struct nullb
*nullb
)
1477 struct nullb_queue
*nq
;
1480 for (i
= 0; i
< nullb
->dev
->submit_queues
; i
++) {
1481 nq
= &nullb
->queues
[i
];
1483 null_init_queue(nullb
, nq
);
1485 ret
= setup_commands(nq
);
1493 static int null_gendisk_register(struct nullb
*nullb
)
1495 struct gendisk
*disk
;
1498 disk
= nullb
->disk
= alloc_disk_node(1, nullb
->dev
->home_node
);
1501 size
= (sector_t
)nullb
->dev
->size
* 1024 * 1024ULL;
1502 set_capacity(disk
, size
>> 9);
1504 disk
->flags
|= GENHD_FL_EXT_DEVT
| GENHD_FL_SUPPRESS_PARTITION_INFO
;
1505 disk
->major
= null_major
;
1506 disk
->first_minor
= nullb
->index
;
1507 disk
->fops
= &null_fops
;
1508 disk
->private_data
= nullb
;
1509 disk
->queue
= nullb
->q
;
1510 strncpy(disk
->disk_name
, nullb
->disk_name
, DISK_NAME_LEN
);
1512 if (nullb
->dev
->zoned
) {
1513 int ret
= blk_revalidate_disk_zones(disk
);
1523 static int null_init_tag_set(struct nullb
*nullb
, struct blk_mq_tag_set
*set
)
1525 set
->ops
= &null_mq_ops
;
1526 set
->nr_hw_queues
= nullb
? nullb
->dev
->submit_queues
:
1528 set
->queue_depth
= nullb
? nullb
->dev
->hw_queue_depth
:
1530 set
->numa_node
= nullb
? nullb
->dev
->home_node
: g_home_node
;
1531 set
->cmd_size
= sizeof(struct nullb_cmd
);
1532 set
->flags
= BLK_MQ_F_SHOULD_MERGE
;
1534 set
->flags
|= BLK_MQ_F_NO_SCHED
;
1535 set
->driver_data
= NULL
;
1537 if ((nullb
&& nullb
->dev
->blocking
) || g_blocking
)
1538 set
->flags
|= BLK_MQ_F_BLOCKING
;
1540 return blk_mq_alloc_tag_set(set
);
1543 static void null_validate_conf(struct nullb_device
*dev
)
1545 dev
->blocksize
= round_down(dev
->blocksize
, 512);
1546 dev
->blocksize
= clamp_t(unsigned int, dev
->blocksize
, 512, 4096);
1548 if (dev
->queue_mode
== NULL_Q_MQ
&& dev
->use_per_node_hctx
) {
1549 if (dev
->submit_queues
!= nr_online_nodes
)
1550 dev
->submit_queues
= nr_online_nodes
;
1551 } else if (dev
->submit_queues
> nr_cpu_ids
)
1552 dev
->submit_queues
= nr_cpu_ids
;
1553 else if (dev
->submit_queues
== 0)
1554 dev
->submit_queues
= 1;
1556 dev
->queue_mode
= min_t(unsigned int, dev
->queue_mode
, NULL_Q_MQ
);
1557 dev
->irqmode
= min_t(unsigned int, dev
->irqmode
, NULL_IRQ_TIMER
);
1559 /* Do memory allocation, so set blocking */
1560 if (dev
->memory_backed
)
1561 dev
->blocking
= true;
1562 else /* cache is meaningless */
1563 dev
->cache_size
= 0;
1564 dev
->cache_size
= min_t(unsigned long, ULONG_MAX
/ 1024 / 1024,
1566 dev
->mbps
= min_t(unsigned int, 1024 * 40, dev
->mbps
);
1567 /* can not stop a queue */
1568 if (dev
->queue_mode
== NULL_Q_BIO
)
1572 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1573 static bool __null_setup_fault(struct fault_attr
*attr
, char *str
)
1578 if (!setup_fault_attr(attr
, str
))
1586 static bool null_setup_fault(void)
1588 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1589 if (!__null_setup_fault(&null_timeout_attr
, g_timeout_str
))
1591 if (!__null_setup_fault(&null_requeue_attr
, g_requeue_str
))
1597 static int null_add_dev(struct nullb_device
*dev
)
1599 struct nullb
*nullb
;
1602 null_validate_conf(dev
);
1604 nullb
= kzalloc_node(sizeof(*nullb
), GFP_KERNEL
, dev
->home_node
);
1612 spin_lock_init(&nullb
->lock
);
1614 rv
= setup_queues(nullb
);
1616 goto out_free_nullb
;
1618 if (dev
->queue_mode
== NULL_Q_MQ
) {
1620 nullb
->tag_set
= &tag_set
;
1623 nullb
->tag_set
= &nullb
->__tag_set
;
1624 rv
= null_init_tag_set(nullb
, nullb
->tag_set
);
1628 goto out_cleanup_queues
;
1630 if (!null_setup_fault())
1631 goto out_cleanup_queues
;
1633 nullb
->tag_set
->timeout
= 5 * HZ
;
1634 nullb
->q
= blk_mq_init_queue(nullb
->tag_set
);
1635 if (IS_ERR(nullb
->q
)) {
1637 goto out_cleanup_tags
;
1639 null_init_queues(nullb
);
1640 } else if (dev
->queue_mode
== NULL_Q_BIO
) {
1641 nullb
->q
= blk_alloc_queue_node(GFP_KERNEL
, dev
->home_node
);
1644 goto out_cleanup_queues
;
1646 blk_queue_make_request(nullb
->q
, null_queue_bio
);
1647 rv
= init_driver_queues(nullb
);
1649 goto out_cleanup_blk_queue
;
1653 set_bit(NULLB_DEV_FL_THROTTLED
, &dev
->flags
);
1654 nullb_setup_bwtimer(nullb
);
1657 if (dev
->cache_size
> 0) {
1658 set_bit(NULLB_DEV_FL_CACHE
, &nullb
->dev
->flags
);
1659 blk_queue_write_cache(nullb
->q
, true, true);
1663 rv
= null_zone_init(dev
);
1665 goto out_cleanup_blk_queue
;
1667 blk_queue_chunk_sectors(nullb
->q
, dev
->zone_size_sects
);
1668 nullb
->q
->limits
.zoned
= BLK_ZONED_HM
;
1669 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL
, nullb
->q
);
1672 nullb
->q
->queuedata
= nullb
;
1673 blk_queue_flag_set(QUEUE_FLAG_NONROT
, nullb
->q
);
1674 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, nullb
->q
);
1677 nullb
->index
= ida_simple_get(&nullb_indexes
, 0, 0, GFP_KERNEL
);
1678 dev
->index
= nullb
->index
;
1679 mutex_unlock(&lock
);
1681 blk_queue_logical_block_size(nullb
->q
, dev
->blocksize
);
1682 blk_queue_physical_block_size(nullb
->q
, dev
->blocksize
);
1684 null_config_discard(nullb
);
1686 sprintf(nullb
->disk_name
, "nullb%d", nullb
->index
);
1688 rv
= null_gendisk_register(nullb
);
1690 goto out_cleanup_zone
;
1693 list_add_tail(&nullb
->list
, &nullb_list
);
1694 mutex_unlock(&lock
);
1699 null_zone_exit(dev
);
1700 out_cleanup_blk_queue
:
1701 blk_cleanup_queue(nullb
->q
);
1703 if (dev
->queue_mode
== NULL_Q_MQ
&& nullb
->tag_set
== &nullb
->__tag_set
)
1704 blk_mq_free_tag_set(nullb
->tag_set
);
1706 cleanup_queues(nullb
);
1713 static int __init
null_init(void)
1717 struct nullb
*nullb
;
1718 struct nullb_device
*dev
;
1720 if (g_bs
> PAGE_SIZE
) {
1721 pr_warn("null_blk: invalid block size\n");
1722 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE
);
1726 if (!is_power_of_2(g_zone_size
)) {
1727 pr_err("null_blk: zone_size must be power-of-two\n");
1731 if (g_home_node
!= NUMA_NO_NODE
&& g_home_node
>= nr_online_nodes
) {
1732 pr_err("null_blk: invalid home_node value\n");
1733 g_home_node
= NUMA_NO_NODE
;
1736 if (g_queue_mode
== NULL_Q_RQ
) {
1737 pr_err("null_blk: legacy IO path no longer available\n");
1740 if (g_queue_mode
== NULL_Q_MQ
&& g_use_per_node_hctx
) {
1741 if (g_submit_queues
!= nr_online_nodes
) {
1742 pr_warn("null_blk: submit_queues param is set to %u.\n",
1744 g_submit_queues
= nr_online_nodes
;
1746 } else if (g_submit_queues
> nr_cpu_ids
)
1747 g_submit_queues
= nr_cpu_ids
;
1748 else if (g_submit_queues
<= 0)
1749 g_submit_queues
= 1;
1751 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
) {
1752 ret
= null_init_tag_set(NULL
, &tag_set
);
1757 config_group_init(&nullb_subsys
.su_group
);
1758 mutex_init(&nullb_subsys
.su_mutex
);
1760 ret
= configfs_register_subsystem(&nullb_subsys
);
1766 null_major
= register_blkdev(0, "nullb");
1767 if (null_major
< 0) {
1772 for (i
= 0; i
< nr_devices
; i
++) {
1773 dev
= null_alloc_dev();
1778 ret
= null_add_dev(dev
);
1785 pr_info("null: module loaded\n");
1789 while (!list_empty(&nullb_list
)) {
1790 nullb
= list_entry(nullb_list
.next
, struct nullb
, list
);
1792 null_del_dev(nullb
);
1795 unregister_blkdev(null_major
, "nullb");
1797 configfs_unregister_subsystem(&nullb_subsys
);
1799 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
)
1800 blk_mq_free_tag_set(&tag_set
);
1804 static void __exit
null_exit(void)
1806 struct nullb
*nullb
;
1808 configfs_unregister_subsystem(&nullb_subsys
);
1810 unregister_blkdev(null_major
, "nullb");
1813 while (!list_empty(&nullb_list
)) {
1814 struct nullb_device
*dev
;
1816 nullb
= list_entry(nullb_list
.next
, struct nullb
, list
);
1818 null_del_dev(nullb
);
1821 mutex_unlock(&lock
);
1823 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
)
1824 blk_mq_free_tag_set(&tag_set
);
1827 module_init(null_init
);
1828 module_exit(null_exit
);
1830 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
1831 MODULE_LICENSE("GPL");