1 // SPDX-License-Identifier: GPL-2.0-only
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
6 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/sched.h>
11 #include <linux/init.h>
14 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
15 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
16 #define SECTOR_MASK (PAGE_SECTORS - 1)
20 #define TICKS_PER_SEC 50ULL
21 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
23 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
24 static DECLARE_FAULT_ATTR(null_timeout_attr
);
25 static DECLARE_FAULT_ATTR(null_requeue_attr
);
28 static inline u64
mb_per_tick(int mbps
)
30 return (1 << 20) / TICKS_PER_SEC
* ((u64
) mbps
);
34 * Status flags for nullb_device.
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
38 * THROTTLED: Device is being throttled.
39 * CACHE: Device is using a write-back cache.
41 enum nullb_device_flags
{
42 NULLB_DEV_FL_CONFIGURED
= 0,
44 NULLB_DEV_FL_THROTTLED
= 2,
45 NULLB_DEV_FL_CACHE
= 3,
48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
50 * nullb_page is a page in memory for nullb devices.
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
63 DECLARE_BITMAP(bitmap
, MAP_SZ
);
65 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
66 #define NULLB_PAGE_FREE (MAP_SZ - 2)
68 static LIST_HEAD(nullb_list
);
69 static struct mutex lock
;
70 static int null_major
;
71 static DEFINE_IDA(nullb_indexes
);
72 static struct blk_mq_tag_set tag_set
;
86 static int g_no_sched
;
87 module_param_named(no_sched
, g_no_sched
, int, 0444);
88 MODULE_PARM_DESC(no_sched
, "No io scheduler");
90 static int g_submit_queues
= 1;
91 module_param_named(submit_queues
, g_submit_queues
, int, 0444);
92 MODULE_PARM_DESC(submit_queues
, "Number of submission queues");
94 static int g_home_node
= NUMA_NO_NODE
;
95 module_param_named(home_node
, g_home_node
, int, 0444);
96 MODULE_PARM_DESC(home_node
, "Home node for the device");
98 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
99 static char g_timeout_str
[80];
100 module_param_string(timeout
, g_timeout_str
, sizeof(g_timeout_str
), 0444);
102 static char g_requeue_str
[80];
103 module_param_string(requeue
, g_requeue_str
, sizeof(g_requeue_str
), 0444);
106 static int g_queue_mode
= NULL_Q_MQ
;
108 static int null_param_store_val(const char *str
, int *val
, int min
, int max
)
112 ret
= kstrtoint(str
, 10, &new_val
);
116 if (new_val
< min
|| new_val
> max
)
123 static int null_set_queue_mode(const char *str
, const struct kernel_param
*kp
)
125 return null_param_store_val(str
, &g_queue_mode
, NULL_Q_BIO
, NULL_Q_MQ
);
128 static const struct kernel_param_ops null_queue_mode_param_ops
= {
129 .set
= null_set_queue_mode
,
130 .get
= param_get_int
,
133 device_param_cb(queue_mode
, &null_queue_mode_param_ops
, &g_queue_mode
, 0444);
134 MODULE_PARM_DESC(queue_mode
, "Block interface to use (0=bio,1=rq,2=multiqueue)");
136 static int g_gb
= 250;
137 module_param_named(gb
, g_gb
, int, 0444);
138 MODULE_PARM_DESC(gb
, "Size in GB");
140 static int g_bs
= 512;
141 module_param_named(bs
, g_bs
, int, 0444);
142 MODULE_PARM_DESC(bs
, "Block size (in bytes)");
144 static int nr_devices
= 1;
145 module_param(nr_devices
, int, 0444);
146 MODULE_PARM_DESC(nr_devices
, "Number of devices to register");
148 static bool g_blocking
;
149 module_param_named(blocking
, g_blocking
, bool, 0444);
150 MODULE_PARM_DESC(blocking
, "Register as a blocking blk-mq driver device");
152 static bool shared_tags
;
153 module_param(shared_tags
, bool, 0444);
154 MODULE_PARM_DESC(shared_tags
, "Share tag set between devices for blk-mq");
156 static int g_irqmode
= NULL_IRQ_SOFTIRQ
;
158 static int null_set_irqmode(const char *str
, const struct kernel_param
*kp
)
160 return null_param_store_val(str
, &g_irqmode
, NULL_IRQ_NONE
,
164 static const struct kernel_param_ops null_irqmode_param_ops
= {
165 .set
= null_set_irqmode
,
166 .get
= param_get_int
,
169 device_param_cb(irqmode
, &null_irqmode_param_ops
, &g_irqmode
, 0444);
170 MODULE_PARM_DESC(irqmode
, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
172 static unsigned long g_completion_nsec
= 10000;
173 module_param_named(completion_nsec
, g_completion_nsec
, ulong
, 0444);
174 MODULE_PARM_DESC(completion_nsec
, "Time in ns to complete a request in hardware. Default: 10,000ns");
176 static int g_hw_queue_depth
= 64;
177 module_param_named(hw_queue_depth
, g_hw_queue_depth
, int, 0444);
178 MODULE_PARM_DESC(hw_queue_depth
, "Queue depth for each hardware queue. Default: 64");
180 static bool g_use_per_node_hctx
;
181 module_param_named(use_per_node_hctx
, g_use_per_node_hctx
, bool, 0444);
182 MODULE_PARM_DESC(use_per_node_hctx
, "Use per-node allocation for hardware context queues. Default: false");
185 module_param_named(zoned
, g_zoned
, bool, S_IRUGO
);
186 MODULE_PARM_DESC(zoned
, "Make device as a host-managed zoned block device. Default: false");
188 static unsigned long g_zone_size
= 256;
189 module_param_named(zone_size
, g_zone_size
, ulong
, S_IRUGO
);
190 MODULE_PARM_DESC(zone_size
, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
192 static unsigned int g_zone_nr_conv
;
193 module_param_named(zone_nr_conv
, g_zone_nr_conv
, uint
, 0444);
194 MODULE_PARM_DESC(zone_nr_conv
, "Number of conventional zones when block device is zoned. Default: 0");
196 static struct nullb_device
*null_alloc_dev(void);
197 static void null_free_dev(struct nullb_device
*dev
);
198 static void null_del_dev(struct nullb
*nullb
);
199 static int null_add_dev(struct nullb_device
*dev
);
200 static void null_free_device_storage(struct nullb_device
*dev
, bool is_cache
);
202 static inline struct nullb_device
*to_nullb_device(struct config_item
*item
)
204 return item
? container_of(item
, struct nullb_device
, item
) : NULL
;
207 static inline ssize_t
nullb_device_uint_attr_show(unsigned int val
, char *page
)
209 return snprintf(page
, PAGE_SIZE
, "%u\n", val
);
212 static inline ssize_t
nullb_device_ulong_attr_show(unsigned long val
,
215 return snprintf(page
, PAGE_SIZE
, "%lu\n", val
);
218 static inline ssize_t
nullb_device_bool_attr_show(bool val
, char *page
)
220 return snprintf(page
, PAGE_SIZE
, "%u\n", val
);
223 static ssize_t
nullb_device_uint_attr_store(unsigned int *val
,
224 const char *page
, size_t count
)
229 result
= kstrtouint(page
, 0, &tmp
);
237 static ssize_t
nullb_device_ulong_attr_store(unsigned long *val
,
238 const char *page
, size_t count
)
243 result
= kstrtoul(page
, 0, &tmp
);
251 static ssize_t
nullb_device_bool_attr_store(bool *val
, const char *page
,
257 result
= kstrtobool(page
, &tmp
);
265 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
266 #define NULLB_DEVICE_ATTR(NAME, TYPE) \
268 nullb_device_##NAME##_show(struct config_item *item, char *page) \
270 return nullb_device_##TYPE##_attr_show( \
271 to_nullb_device(item)->NAME, page); \
274 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
277 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
279 return nullb_device_##TYPE##_attr_store( \
280 &to_nullb_device(item)->NAME, page, count); \
282 CONFIGFS_ATTR(nullb_device_, NAME);
284 NULLB_DEVICE_ATTR(size
, ulong
);
285 NULLB_DEVICE_ATTR(completion_nsec
, ulong
);
286 NULLB_DEVICE_ATTR(submit_queues
, uint
);
287 NULLB_DEVICE_ATTR(home_node
, uint
);
288 NULLB_DEVICE_ATTR(queue_mode
, uint
);
289 NULLB_DEVICE_ATTR(blocksize
, uint
);
290 NULLB_DEVICE_ATTR(irqmode
, uint
);
291 NULLB_DEVICE_ATTR(hw_queue_depth
, uint
);
292 NULLB_DEVICE_ATTR(index
, uint
);
293 NULLB_DEVICE_ATTR(blocking
, bool);
294 NULLB_DEVICE_ATTR(use_per_node_hctx
, bool);
295 NULLB_DEVICE_ATTR(memory_backed
, bool);
296 NULLB_DEVICE_ATTR(discard
, bool);
297 NULLB_DEVICE_ATTR(mbps
, uint
);
298 NULLB_DEVICE_ATTR(cache_size
, ulong
);
299 NULLB_DEVICE_ATTR(zoned
, bool);
300 NULLB_DEVICE_ATTR(zone_size
, ulong
);
301 NULLB_DEVICE_ATTR(zone_nr_conv
, uint
);
303 static ssize_t
nullb_device_power_show(struct config_item
*item
, char *page
)
305 return nullb_device_bool_attr_show(to_nullb_device(item
)->power
, page
);
308 static ssize_t
nullb_device_power_store(struct config_item
*item
,
309 const char *page
, size_t count
)
311 struct nullb_device
*dev
= to_nullb_device(item
);
315 ret
= nullb_device_bool_attr_store(&newp
, page
, count
);
319 if (!dev
->power
&& newp
) {
320 if (test_and_set_bit(NULLB_DEV_FL_UP
, &dev
->flags
))
322 if (null_add_dev(dev
)) {
323 clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
);
327 set_bit(NULLB_DEV_FL_CONFIGURED
, &dev
->flags
);
329 } else if (dev
->power
&& !newp
) {
332 null_del_dev(dev
->nullb
);
334 clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
);
335 clear_bit(NULLB_DEV_FL_CONFIGURED
, &dev
->flags
);
341 CONFIGFS_ATTR(nullb_device_
, power
);
343 static ssize_t
nullb_device_badblocks_show(struct config_item
*item
, char *page
)
345 struct nullb_device
*t_dev
= to_nullb_device(item
);
347 return badblocks_show(&t_dev
->badblocks
, page
, 0);
350 static ssize_t
nullb_device_badblocks_store(struct config_item
*item
,
351 const char *page
, size_t count
)
353 struct nullb_device
*t_dev
= to_nullb_device(item
);
354 char *orig
, *buf
, *tmp
;
358 orig
= kstrndup(page
, count
, GFP_KERNEL
);
362 buf
= strstrip(orig
);
365 if (buf
[0] != '+' && buf
[0] != '-')
367 tmp
= strchr(&buf
[1], '-');
371 ret
= kstrtoull(buf
+ 1, 0, &start
);
374 ret
= kstrtoull(tmp
+ 1, 0, &end
);
380 /* enable badblocks */
381 cmpxchg(&t_dev
->badblocks
.shift
, -1, 0);
383 ret
= badblocks_set(&t_dev
->badblocks
, start
,
386 ret
= badblocks_clear(&t_dev
->badblocks
, start
,
394 CONFIGFS_ATTR(nullb_device_
, badblocks
);
396 static struct configfs_attribute
*nullb_device_attrs
[] = {
397 &nullb_device_attr_size
,
398 &nullb_device_attr_completion_nsec
,
399 &nullb_device_attr_submit_queues
,
400 &nullb_device_attr_home_node
,
401 &nullb_device_attr_queue_mode
,
402 &nullb_device_attr_blocksize
,
403 &nullb_device_attr_irqmode
,
404 &nullb_device_attr_hw_queue_depth
,
405 &nullb_device_attr_index
,
406 &nullb_device_attr_blocking
,
407 &nullb_device_attr_use_per_node_hctx
,
408 &nullb_device_attr_power
,
409 &nullb_device_attr_memory_backed
,
410 &nullb_device_attr_discard
,
411 &nullb_device_attr_mbps
,
412 &nullb_device_attr_cache_size
,
413 &nullb_device_attr_badblocks
,
414 &nullb_device_attr_zoned
,
415 &nullb_device_attr_zone_size
,
416 &nullb_device_attr_zone_nr_conv
,
420 static void nullb_device_release(struct config_item
*item
)
422 struct nullb_device
*dev
= to_nullb_device(item
);
424 null_free_device_storage(dev
, false);
428 static struct configfs_item_operations nullb_device_ops
= {
429 .release
= nullb_device_release
,
432 static const struct config_item_type nullb_device_type
= {
433 .ct_item_ops
= &nullb_device_ops
,
434 .ct_attrs
= nullb_device_attrs
,
435 .ct_owner
= THIS_MODULE
,
439 config_item
*nullb_group_make_item(struct config_group
*group
, const char *name
)
441 struct nullb_device
*dev
;
443 dev
= null_alloc_dev();
445 return ERR_PTR(-ENOMEM
);
447 config_item_init_type_name(&dev
->item
, name
, &nullb_device_type
);
453 nullb_group_drop_item(struct config_group
*group
, struct config_item
*item
)
455 struct nullb_device
*dev
= to_nullb_device(item
);
457 if (test_and_clear_bit(NULLB_DEV_FL_UP
, &dev
->flags
)) {
460 null_del_dev(dev
->nullb
);
464 config_item_put(item
);
467 static ssize_t
memb_group_features_show(struct config_item
*item
, char *page
)
469 return snprintf(page
, PAGE_SIZE
, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
472 CONFIGFS_ATTR_RO(memb_group_
, features
);
474 static struct configfs_attribute
*nullb_group_attrs
[] = {
475 &memb_group_attr_features
,
479 static struct configfs_group_operations nullb_group_ops
= {
480 .make_item
= nullb_group_make_item
,
481 .drop_item
= nullb_group_drop_item
,
484 static const struct config_item_type nullb_group_type
= {
485 .ct_group_ops
= &nullb_group_ops
,
486 .ct_attrs
= nullb_group_attrs
,
487 .ct_owner
= THIS_MODULE
,
490 static struct configfs_subsystem nullb_subsys
= {
493 .ci_namebuf
= "nullb",
494 .ci_type
= &nullb_group_type
,
499 static inline int null_cache_active(struct nullb
*nullb
)
501 return test_bit(NULLB_DEV_FL_CACHE
, &nullb
->dev
->flags
);
504 static struct nullb_device
*null_alloc_dev(void)
506 struct nullb_device
*dev
;
508 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
511 INIT_RADIX_TREE(&dev
->data
, GFP_ATOMIC
);
512 INIT_RADIX_TREE(&dev
->cache
, GFP_ATOMIC
);
513 if (badblocks_init(&dev
->badblocks
, 0)) {
518 dev
->size
= g_gb
* 1024;
519 dev
->completion_nsec
= g_completion_nsec
;
520 dev
->submit_queues
= g_submit_queues
;
521 dev
->home_node
= g_home_node
;
522 dev
->queue_mode
= g_queue_mode
;
523 dev
->blocksize
= g_bs
;
524 dev
->irqmode
= g_irqmode
;
525 dev
->hw_queue_depth
= g_hw_queue_depth
;
526 dev
->blocking
= g_blocking
;
527 dev
->use_per_node_hctx
= g_use_per_node_hctx
;
528 dev
->zoned
= g_zoned
;
529 dev
->zone_size
= g_zone_size
;
530 dev
->zone_nr_conv
= g_zone_nr_conv
;
534 static void null_free_dev(struct nullb_device
*dev
)
540 badblocks_exit(&dev
->badblocks
);
544 static void put_tag(struct nullb_queue
*nq
, unsigned int tag
)
546 clear_bit_unlock(tag
, nq
->tag_map
);
548 if (waitqueue_active(&nq
->wait
))
552 static unsigned int get_tag(struct nullb_queue
*nq
)
557 tag
= find_first_zero_bit(nq
->tag_map
, nq
->queue_depth
);
558 if (tag
>= nq
->queue_depth
)
560 } while (test_and_set_bit_lock(tag
, nq
->tag_map
));
565 static void free_cmd(struct nullb_cmd
*cmd
)
567 put_tag(cmd
->nq
, cmd
->tag
);
570 static enum hrtimer_restart
null_cmd_timer_expired(struct hrtimer
*timer
);
572 static struct nullb_cmd
*__alloc_cmd(struct nullb_queue
*nq
)
574 struct nullb_cmd
*cmd
;
579 cmd
= &nq
->cmds
[tag
];
582 if (nq
->dev
->irqmode
== NULL_IRQ_TIMER
) {
583 hrtimer_init(&cmd
->timer
, CLOCK_MONOTONIC
,
585 cmd
->timer
.function
= null_cmd_timer_expired
;
593 static struct nullb_cmd
*alloc_cmd(struct nullb_queue
*nq
, int can_wait
)
595 struct nullb_cmd
*cmd
;
598 cmd
= __alloc_cmd(nq
);
599 if (cmd
|| !can_wait
)
603 prepare_to_wait(&nq
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
604 cmd
= __alloc_cmd(nq
);
611 finish_wait(&nq
->wait
, &wait
);
615 static void end_cmd(struct nullb_cmd
*cmd
)
617 int queue_mode
= cmd
->nq
->dev
->queue_mode
;
619 switch (queue_mode
) {
621 blk_mq_end_request(cmd
->rq
, cmd
->error
);
624 cmd
->bio
->bi_status
= cmd
->error
;
632 static enum hrtimer_restart
null_cmd_timer_expired(struct hrtimer
*timer
)
634 end_cmd(container_of(timer
, struct nullb_cmd
, timer
));
636 return HRTIMER_NORESTART
;
639 static void null_cmd_end_timer(struct nullb_cmd
*cmd
)
641 ktime_t kt
= cmd
->nq
->dev
->completion_nsec
;
643 hrtimer_start(&cmd
->timer
, kt
, HRTIMER_MODE_REL
);
646 static void null_complete_rq(struct request
*rq
)
648 end_cmd(blk_mq_rq_to_pdu(rq
));
651 static struct nullb_page
*null_alloc_page(gfp_t gfp_flags
)
653 struct nullb_page
*t_page
;
655 t_page
= kmalloc(sizeof(struct nullb_page
), gfp_flags
);
659 t_page
->page
= alloc_pages(gfp_flags
, 0);
663 memset(t_page
->bitmap
, 0, sizeof(t_page
->bitmap
));
671 static void null_free_page(struct nullb_page
*t_page
)
673 __set_bit(NULLB_PAGE_FREE
, t_page
->bitmap
);
674 if (test_bit(NULLB_PAGE_LOCK
, t_page
->bitmap
))
676 __free_page(t_page
->page
);
680 static bool null_page_empty(struct nullb_page
*page
)
682 int size
= MAP_SZ
- 2;
684 return find_first_bit(page
->bitmap
, size
) == size
;
687 static void null_free_sector(struct nullb
*nullb
, sector_t sector
,
690 unsigned int sector_bit
;
692 struct nullb_page
*t_page
, *ret
;
693 struct radix_tree_root
*root
;
695 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
696 idx
= sector
>> PAGE_SECTORS_SHIFT
;
697 sector_bit
= (sector
& SECTOR_MASK
);
699 t_page
= radix_tree_lookup(root
, idx
);
701 __clear_bit(sector_bit
, t_page
->bitmap
);
703 if (null_page_empty(t_page
)) {
704 ret
= radix_tree_delete_item(root
, idx
, t_page
);
705 WARN_ON(ret
!= t_page
);
708 nullb
->dev
->curr_cache
-= PAGE_SIZE
;
713 static struct nullb_page
*null_radix_tree_insert(struct nullb
*nullb
, u64 idx
,
714 struct nullb_page
*t_page
, bool is_cache
)
716 struct radix_tree_root
*root
;
718 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
720 if (radix_tree_insert(root
, idx
, t_page
)) {
721 null_free_page(t_page
);
722 t_page
= radix_tree_lookup(root
, idx
);
723 WARN_ON(!t_page
|| t_page
->page
->index
!= idx
);
725 nullb
->dev
->curr_cache
+= PAGE_SIZE
;
730 static void null_free_device_storage(struct nullb_device
*dev
, bool is_cache
)
732 unsigned long pos
= 0;
734 struct nullb_page
*ret
, *t_pages
[FREE_BATCH
];
735 struct radix_tree_root
*root
;
737 root
= is_cache
? &dev
->cache
: &dev
->data
;
742 nr_pages
= radix_tree_gang_lookup(root
,
743 (void **)t_pages
, pos
, FREE_BATCH
);
745 for (i
= 0; i
< nr_pages
; i
++) {
746 pos
= t_pages
[i
]->page
->index
;
747 ret
= radix_tree_delete_item(root
, pos
, t_pages
[i
]);
748 WARN_ON(ret
!= t_pages
[i
]);
753 } while (nr_pages
== FREE_BATCH
);
759 static struct nullb_page
*__null_lookup_page(struct nullb
*nullb
,
760 sector_t sector
, bool for_write
, bool is_cache
)
762 unsigned int sector_bit
;
764 struct nullb_page
*t_page
;
765 struct radix_tree_root
*root
;
767 idx
= sector
>> PAGE_SECTORS_SHIFT
;
768 sector_bit
= (sector
& SECTOR_MASK
);
770 root
= is_cache
? &nullb
->dev
->cache
: &nullb
->dev
->data
;
771 t_page
= radix_tree_lookup(root
, idx
);
772 WARN_ON(t_page
&& t_page
->page
->index
!= idx
);
774 if (t_page
&& (for_write
|| test_bit(sector_bit
, t_page
->bitmap
)))
780 static struct nullb_page
*null_lookup_page(struct nullb
*nullb
,
781 sector_t sector
, bool for_write
, bool ignore_cache
)
783 struct nullb_page
*page
= NULL
;
786 page
= __null_lookup_page(nullb
, sector
, for_write
, true);
789 return __null_lookup_page(nullb
, sector
, for_write
, false);
792 static struct nullb_page
*null_insert_page(struct nullb
*nullb
,
793 sector_t sector
, bool ignore_cache
)
794 __releases(&nullb
->lock
)
795 __acquires(&nullb
->lock
)
798 struct nullb_page
*t_page
;
800 t_page
= null_lookup_page(nullb
, sector
, true, ignore_cache
);
804 spin_unlock_irq(&nullb
->lock
);
806 t_page
= null_alloc_page(GFP_NOIO
);
810 if (radix_tree_preload(GFP_NOIO
))
813 spin_lock_irq(&nullb
->lock
);
814 idx
= sector
>> PAGE_SECTORS_SHIFT
;
815 t_page
->page
->index
= idx
;
816 t_page
= null_radix_tree_insert(nullb
, idx
, t_page
, !ignore_cache
);
817 radix_tree_preload_end();
821 null_free_page(t_page
);
823 spin_lock_irq(&nullb
->lock
);
824 return null_lookup_page(nullb
, sector
, true, ignore_cache
);
827 static int null_flush_cache_page(struct nullb
*nullb
, struct nullb_page
*c_page
)
832 struct nullb_page
*t_page
, *ret
;
835 idx
= c_page
->page
->index
;
837 t_page
= null_insert_page(nullb
, idx
<< PAGE_SECTORS_SHIFT
, true);
839 __clear_bit(NULLB_PAGE_LOCK
, c_page
->bitmap
);
840 if (test_bit(NULLB_PAGE_FREE
, c_page
->bitmap
)) {
841 null_free_page(c_page
);
842 if (t_page
&& null_page_empty(t_page
)) {
843 ret
= radix_tree_delete_item(&nullb
->dev
->data
,
845 null_free_page(t_page
);
853 src
= kmap_atomic(c_page
->page
);
854 dst
= kmap_atomic(t_page
->page
);
856 for (i
= 0; i
< PAGE_SECTORS
;
857 i
+= (nullb
->dev
->blocksize
>> SECTOR_SHIFT
)) {
858 if (test_bit(i
, c_page
->bitmap
)) {
859 offset
= (i
<< SECTOR_SHIFT
);
860 memcpy(dst
+ offset
, src
+ offset
,
861 nullb
->dev
->blocksize
);
862 __set_bit(i
, t_page
->bitmap
);
869 ret
= radix_tree_delete_item(&nullb
->dev
->cache
, idx
, c_page
);
871 nullb
->dev
->curr_cache
-= PAGE_SIZE
;
876 static int null_make_cache_space(struct nullb
*nullb
, unsigned long n
)
878 int i
, err
, nr_pages
;
879 struct nullb_page
*c_pages
[FREE_BATCH
];
880 unsigned long flushed
= 0, one_round
;
883 if ((nullb
->dev
->cache_size
* 1024 * 1024) >
884 nullb
->dev
->curr_cache
+ n
|| nullb
->dev
->curr_cache
== 0)
887 nr_pages
= radix_tree_gang_lookup(&nullb
->dev
->cache
,
888 (void **)c_pages
, nullb
->cache_flush_pos
, FREE_BATCH
);
890 * nullb_flush_cache_page could unlock before using the c_pages. To
891 * avoid race, we don't allow page free
893 for (i
= 0; i
< nr_pages
; i
++) {
894 nullb
->cache_flush_pos
= c_pages
[i
]->page
->index
;
896 * We found the page which is being flushed to disk by other
899 if (test_bit(NULLB_PAGE_LOCK
, c_pages
[i
]->bitmap
))
902 __set_bit(NULLB_PAGE_LOCK
, c_pages
[i
]->bitmap
);
906 for (i
= 0; i
< nr_pages
; i
++) {
907 if (c_pages
[i
] == NULL
)
909 err
= null_flush_cache_page(nullb
, c_pages
[i
]);
914 flushed
+= one_round
<< PAGE_SHIFT
;
918 nullb
->cache_flush_pos
= 0;
919 if (one_round
== 0) {
920 /* give other threads a chance */
921 spin_unlock_irq(&nullb
->lock
);
922 spin_lock_irq(&nullb
->lock
);
929 static int copy_to_nullb(struct nullb
*nullb
, struct page
*source
,
930 unsigned int off
, sector_t sector
, size_t n
, bool is_fua
)
932 size_t temp
, count
= 0;
934 struct nullb_page
*t_page
;
938 temp
= min_t(size_t, nullb
->dev
->blocksize
, n
- count
);
940 if (null_cache_active(nullb
) && !is_fua
)
941 null_make_cache_space(nullb
, PAGE_SIZE
);
943 offset
= (sector
& SECTOR_MASK
) << SECTOR_SHIFT
;
944 t_page
= null_insert_page(nullb
, sector
,
945 !null_cache_active(nullb
) || is_fua
);
949 src
= kmap_atomic(source
);
950 dst
= kmap_atomic(t_page
->page
);
951 memcpy(dst
+ offset
, src
+ off
+ count
, temp
);
955 __set_bit(sector
& SECTOR_MASK
, t_page
->bitmap
);
958 null_free_sector(nullb
, sector
, true);
961 sector
+= temp
>> SECTOR_SHIFT
;
966 static int copy_from_nullb(struct nullb
*nullb
, struct page
*dest
,
967 unsigned int off
, sector_t sector
, size_t n
)
969 size_t temp
, count
= 0;
971 struct nullb_page
*t_page
;
975 temp
= min_t(size_t, nullb
->dev
->blocksize
, n
- count
);
977 offset
= (sector
& SECTOR_MASK
) << SECTOR_SHIFT
;
978 t_page
= null_lookup_page(nullb
, sector
, false,
979 !null_cache_active(nullb
));
981 dst
= kmap_atomic(dest
);
983 memset(dst
+ off
+ count
, 0, temp
);
986 src
= kmap_atomic(t_page
->page
);
987 memcpy(dst
+ off
+ count
, src
+ offset
, temp
);
993 sector
+= temp
>> SECTOR_SHIFT
;
998 static void null_handle_discard(struct nullb
*nullb
, sector_t sector
, size_t n
)
1002 spin_lock_irq(&nullb
->lock
);
1004 temp
= min_t(size_t, n
, nullb
->dev
->blocksize
);
1005 null_free_sector(nullb
, sector
, false);
1006 if (null_cache_active(nullb
))
1007 null_free_sector(nullb
, sector
, true);
1008 sector
+= temp
>> SECTOR_SHIFT
;
1011 spin_unlock_irq(&nullb
->lock
);
1014 static int null_handle_flush(struct nullb
*nullb
)
1018 if (!null_cache_active(nullb
))
1021 spin_lock_irq(&nullb
->lock
);
1023 err
= null_make_cache_space(nullb
,
1024 nullb
->dev
->cache_size
* 1024 * 1024);
1025 if (err
|| nullb
->dev
->curr_cache
== 0)
1029 WARN_ON(!radix_tree_empty(&nullb
->dev
->cache
));
1030 spin_unlock_irq(&nullb
->lock
);
1034 static int null_transfer(struct nullb
*nullb
, struct page
*page
,
1035 unsigned int len
, unsigned int off
, bool is_write
, sector_t sector
,
1041 err
= copy_from_nullb(nullb
, page
, off
, sector
, len
);
1042 flush_dcache_page(page
);
1044 flush_dcache_page(page
);
1045 err
= copy_to_nullb(nullb
, page
, off
, sector
, len
, is_fua
);
1051 static int null_handle_rq(struct nullb_cmd
*cmd
)
1053 struct request
*rq
= cmd
->rq
;
1054 struct nullb
*nullb
= cmd
->nq
->dev
->nullb
;
1058 struct req_iterator iter
;
1059 struct bio_vec bvec
;
1061 sector
= blk_rq_pos(rq
);
1063 if (req_op(rq
) == REQ_OP_DISCARD
) {
1064 null_handle_discard(nullb
, sector
, blk_rq_bytes(rq
));
1068 spin_lock_irq(&nullb
->lock
);
1069 rq_for_each_segment(bvec
, rq
, iter
) {
1071 err
= null_transfer(nullb
, bvec
.bv_page
, len
, bvec
.bv_offset
,
1072 op_is_write(req_op(rq
)), sector
,
1073 req_op(rq
) & REQ_FUA
);
1075 spin_unlock_irq(&nullb
->lock
);
1078 sector
+= len
>> SECTOR_SHIFT
;
1080 spin_unlock_irq(&nullb
->lock
);
1085 static int null_handle_bio(struct nullb_cmd
*cmd
)
1087 struct bio
*bio
= cmd
->bio
;
1088 struct nullb
*nullb
= cmd
->nq
->dev
->nullb
;
1092 struct bio_vec bvec
;
1093 struct bvec_iter iter
;
1095 sector
= bio
->bi_iter
.bi_sector
;
1097 if (bio_op(bio
) == REQ_OP_DISCARD
) {
1098 null_handle_discard(nullb
, sector
,
1099 bio_sectors(bio
) << SECTOR_SHIFT
);
1103 spin_lock_irq(&nullb
->lock
);
1104 bio_for_each_segment(bvec
, bio
, iter
) {
1106 err
= null_transfer(nullb
, bvec
.bv_page
, len
, bvec
.bv_offset
,
1107 op_is_write(bio_op(bio
)), sector
,
1108 bio
->bi_opf
& REQ_FUA
);
1110 spin_unlock_irq(&nullb
->lock
);
1113 sector
+= len
>> SECTOR_SHIFT
;
1115 spin_unlock_irq(&nullb
->lock
);
1119 static void null_stop_queue(struct nullb
*nullb
)
1121 struct request_queue
*q
= nullb
->q
;
1123 if (nullb
->dev
->queue_mode
== NULL_Q_MQ
)
1124 blk_mq_stop_hw_queues(q
);
1127 static void null_restart_queue_async(struct nullb
*nullb
)
1129 struct request_queue
*q
= nullb
->q
;
1131 if (nullb
->dev
->queue_mode
== NULL_Q_MQ
)
1132 blk_mq_start_stopped_hw_queues(q
, true);
1135 static blk_status_t
null_handle_cmd(struct nullb_cmd
*cmd
)
1137 struct nullb_device
*dev
= cmd
->nq
->dev
;
1138 struct nullb
*nullb
= dev
->nullb
;
1141 if (test_bit(NULLB_DEV_FL_THROTTLED
, &dev
->flags
)) {
1142 struct request
*rq
= cmd
->rq
;
1144 if (!hrtimer_active(&nullb
->bw_timer
))
1145 hrtimer_restart(&nullb
->bw_timer
);
1147 if (atomic_long_sub_return(blk_rq_bytes(rq
),
1148 &nullb
->cur_bytes
) < 0) {
1149 null_stop_queue(nullb
);
1150 /* race with timer */
1151 if (atomic_long_read(&nullb
->cur_bytes
) > 0)
1152 null_restart_queue_async(nullb
);
1153 /* requeue request */
1154 return BLK_STS_DEV_RESOURCE
;
1158 if (nullb
->dev
->badblocks
.shift
!= -1) {
1160 sector_t sector
, size
, first_bad
;
1161 bool is_flush
= true;
1163 if (dev
->queue_mode
== NULL_Q_BIO
&&
1164 bio_op(cmd
->bio
) != REQ_OP_FLUSH
) {
1166 sector
= cmd
->bio
->bi_iter
.bi_sector
;
1167 size
= bio_sectors(cmd
->bio
);
1169 if (dev
->queue_mode
!= NULL_Q_BIO
&&
1170 req_op(cmd
->rq
) != REQ_OP_FLUSH
) {
1172 sector
= blk_rq_pos(cmd
->rq
);
1173 size
= blk_rq_sectors(cmd
->rq
);
1175 if (!is_flush
&& badblocks_check(&nullb
->dev
->badblocks
, sector
,
1176 size
, &first_bad
, &bad_sectors
)) {
1177 cmd
->error
= BLK_STS_IOERR
;
1182 if (dev
->memory_backed
) {
1183 if (dev
->queue_mode
== NULL_Q_BIO
) {
1184 if (bio_op(cmd
->bio
) == REQ_OP_FLUSH
)
1185 err
= null_handle_flush(nullb
);
1187 err
= null_handle_bio(cmd
);
1189 if (req_op(cmd
->rq
) == REQ_OP_FLUSH
)
1190 err
= null_handle_flush(nullb
);
1192 err
= null_handle_rq(cmd
);
1195 cmd
->error
= errno_to_blk_status(err
);
1197 if (!cmd
->error
&& dev
->zoned
) {
1199 unsigned int nr_sectors
;
1202 if (dev
->queue_mode
== NULL_Q_BIO
) {
1203 op
= bio_op(cmd
->bio
);
1204 sector
= cmd
->bio
->bi_iter
.bi_sector
;
1205 nr_sectors
= cmd
->bio
->bi_iter
.bi_size
>> 9;
1207 op
= req_op(cmd
->rq
);
1208 sector
= blk_rq_pos(cmd
->rq
);
1209 nr_sectors
= blk_rq_sectors(cmd
->rq
);
1212 if (op
== REQ_OP_WRITE
)
1213 null_zone_write(cmd
, sector
, nr_sectors
);
1214 else if (op
== REQ_OP_ZONE_RESET
)
1215 null_zone_reset(cmd
, sector
);
1218 /* Complete IO by inline, softirq or timer */
1219 switch (dev
->irqmode
) {
1220 case NULL_IRQ_SOFTIRQ
:
1221 switch (dev
->queue_mode
) {
1223 blk_mq_complete_request(cmd
->rq
);
1227 * XXX: no proper submitting cpu information available.
1236 case NULL_IRQ_TIMER
:
1237 null_cmd_end_timer(cmd
);
1243 static enum hrtimer_restart
nullb_bwtimer_fn(struct hrtimer
*timer
)
1245 struct nullb
*nullb
= container_of(timer
, struct nullb
, bw_timer
);
1246 ktime_t timer_interval
= ktime_set(0, TIMER_INTERVAL
);
1247 unsigned int mbps
= nullb
->dev
->mbps
;
1249 if (atomic_long_read(&nullb
->cur_bytes
) == mb_per_tick(mbps
))
1250 return HRTIMER_NORESTART
;
1252 atomic_long_set(&nullb
->cur_bytes
, mb_per_tick(mbps
));
1253 null_restart_queue_async(nullb
);
1255 hrtimer_forward_now(&nullb
->bw_timer
, timer_interval
);
1257 return HRTIMER_RESTART
;
1260 static void nullb_setup_bwtimer(struct nullb
*nullb
)
1262 ktime_t timer_interval
= ktime_set(0, TIMER_INTERVAL
);
1264 hrtimer_init(&nullb
->bw_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1265 nullb
->bw_timer
.function
= nullb_bwtimer_fn
;
1266 atomic_long_set(&nullb
->cur_bytes
, mb_per_tick(nullb
->dev
->mbps
));
1267 hrtimer_start(&nullb
->bw_timer
, timer_interval
, HRTIMER_MODE_REL
);
1270 static struct nullb_queue
*nullb_to_queue(struct nullb
*nullb
)
1274 if (nullb
->nr_queues
!= 1)
1275 index
= raw_smp_processor_id() / ((nr_cpu_ids
+ nullb
->nr_queues
- 1) / nullb
->nr_queues
);
1277 return &nullb
->queues
[index
];
1280 static blk_qc_t
null_queue_bio(struct request_queue
*q
, struct bio
*bio
)
1282 struct nullb
*nullb
= q
->queuedata
;
1283 struct nullb_queue
*nq
= nullb_to_queue(nullb
);
1284 struct nullb_cmd
*cmd
;
1286 cmd
= alloc_cmd(nq
, 1);
1289 null_handle_cmd(cmd
);
1290 return BLK_QC_T_NONE
;
1293 static bool should_timeout_request(struct request
*rq
)
1295 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1296 if (g_timeout_str
[0])
1297 return should_fail(&null_timeout_attr
, 1);
1302 static bool should_requeue_request(struct request
*rq
)
1304 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1305 if (g_requeue_str
[0])
1306 return should_fail(&null_requeue_attr
, 1);
1311 static enum blk_eh_timer_return
null_timeout_rq(struct request
*rq
, bool res
)
1313 pr_info("null: rq %p timed out\n", rq
);
1314 blk_mq_complete_request(rq
);
1318 static blk_status_t
null_queue_rq(struct blk_mq_hw_ctx
*hctx
,
1319 const struct blk_mq_queue_data
*bd
)
1321 struct nullb_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
1322 struct nullb_queue
*nq
= hctx
->driver_data
;
1324 might_sleep_if(hctx
->flags
& BLK_MQ_F_BLOCKING
);
1326 if (nq
->dev
->irqmode
== NULL_IRQ_TIMER
) {
1327 hrtimer_init(&cmd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1328 cmd
->timer
.function
= null_cmd_timer_expired
;
1333 blk_mq_start_request(bd
->rq
);
1335 if (should_requeue_request(bd
->rq
)) {
1337 * Alternate between hitting the core BUSY path, and the
1338 * driver driven requeue path
1340 nq
->requeue_selection
++;
1341 if (nq
->requeue_selection
& 1)
1342 return BLK_STS_RESOURCE
;
1344 blk_mq_requeue_request(bd
->rq
, true);
1348 if (should_timeout_request(bd
->rq
))
1351 return null_handle_cmd(cmd
);
1354 static const struct blk_mq_ops null_mq_ops
= {
1355 .queue_rq
= null_queue_rq
,
1356 .complete
= null_complete_rq
,
1357 .timeout
= null_timeout_rq
,
1360 static void cleanup_queue(struct nullb_queue
*nq
)
1366 static void cleanup_queues(struct nullb
*nullb
)
1370 for (i
= 0; i
< nullb
->nr_queues
; i
++)
1371 cleanup_queue(&nullb
->queues
[i
]);
1373 kfree(nullb
->queues
);
1376 static void null_del_dev(struct nullb
*nullb
)
1378 struct nullb_device
*dev
= nullb
->dev
;
1380 ida_simple_remove(&nullb_indexes
, nullb
->index
);
1382 list_del_init(&nullb
->list
);
1384 del_gendisk(nullb
->disk
);
1386 if (test_bit(NULLB_DEV_FL_THROTTLED
, &nullb
->dev
->flags
)) {
1387 hrtimer_cancel(&nullb
->bw_timer
);
1388 atomic_long_set(&nullb
->cur_bytes
, LONG_MAX
);
1389 null_restart_queue_async(nullb
);
1392 blk_cleanup_queue(nullb
->q
);
1393 if (dev
->queue_mode
== NULL_Q_MQ
&&
1394 nullb
->tag_set
== &nullb
->__tag_set
)
1395 blk_mq_free_tag_set(nullb
->tag_set
);
1396 put_disk(nullb
->disk
);
1397 cleanup_queues(nullb
);
1398 if (null_cache_active(nullb
))
1399 null_free_device_storage(nullb
->dev
, true);
1404 static void null_config_discard(struct nullb
*nullb
)
1406 if (nullb
->dev
->discard
== false)
1408 nullb
->q
->limits
.discard_granularity
= nullb
->dev
->blocksize
;
1409 nullb
->q
->limits
.discard_alignment
= nullb
->dev
->blocksize
;
1410 blk_queue_max_discard_sectors(nullb
->q
, UINT_MAX
>> 9);
1411 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, nullb
->q
);
1414 static int null_open(struct block_device
*bdev
, fmode_t mode
)
1419 static void null_release(struct gendisk
*disk
, fmode_t mode
)
1423 static const struct block_device_operations null_fops
= {
1424 .owner
= THIS_MODULE
,
1426 .release
= null_release
,
1427 .report_zones
= null_zone_report
,
1430 static void null_init_queue(struct nullb
*nullb
, struct nullb_queue
*nq
)
1435 init_waitqueue_head(&nq
->wait
);
1436 nq
->queue_depth
= nullb
->queue_depth
;
1437 nq
->dev
= nullb
->dev
;
1440 static void null_init_queues(struct nullb
*nullb
)
1442 struct request_queue
*q
= nullb
->q
;
1443 struct blk_mq_hw_ctx
*hctx
;
1444 struct nullb_queue
*nq
;
1447 queue_for_each_hw_ctx(q
, hctx
, i
) {
1448 if (!hctx
->nr_ctx
|| !hctx
->tags
)
1450 nq
= &nullb
->queues
[i
];
1451 hctx
->driver_data
= nq
;
1452 null_init_queue(nullb
, nq
);
1457 static int setup_commands(struct nullb_queue
*nq
)
1459 struct nullb_cmd
*cmd
;
1462 nq
->cmds
= kcalloc(nq
->queue_depth
, sizeof(*cmd
), GFP_KERNEL
);
1466 tag_size
= ALIGN(nq
->queue_depth
, BITS_PER_LONG
) / BITS_PER_LONG
;
1467 nq
->tag_map
= kcalloc(tag_size
, sizeof(unsigned long), GFP_KERNEL
);
1473 for (i
= 0; i
< nq
->queue_depth
; i
++) {
1475 INIT_LIST_HEAD(&cmd
->list
);
1476 cmd
->ll_list
.next
= NULL
;
1483 static int setup_queues(struct nullb
*nullb
)
1485 nullb
->queues
= kcalloc(nullb
->dev
->submit_queues
,
1486 sizeof(struct nullb_queue
),
1491 nullb
->nr_queues
= 0;
1492 nullb
->queue_depth
= nullb
->dev
->hw_queue_depth
;
1497 static int init_driver_queues(struct nullb
*nullb
)
1499 struct nullb_queue
*nq
;
1502 for (i
= 0; i
< nullb
->dev
->submit_queues
; i
++) {
1503 nq
= &nullb
->queues
[i
];
1505 null_init_queue(nullb
, nq
);
1507 ret
= setup_commands(nq
);
1515 static int null_gendisk_register(struct nullb
*nullb
)
1517 struct gendisk
*disk
;
1520 disk
= nullb
->disk
= alloc_disk_node(1, nullb
->dev
->home_node
);
1523 size
= (sector_t
)nullb
->dev
->size
* 1024 * 1024ULL;
1524 set_capacity(disk
, size
>> 9);
1526 disk
->flags
|= GENHD_FL_EXT_DEVT
| GENHD_FL_SUPPRESS_PARTITION_INFO
;
1527 disk
->major
= null_major
;
1528 disk
->first_minor
= nullb
->index
;
1529 disk
->fops
= &null_fops
;
1530 disk
->private_data
= nullb
;
1531 disk
->queue
= nullb
->q
;
1532 strncpy(disk
->disk_name
, nullb
->disk_name
, DISK_NAME_LEN
);
1534 if (nullb
->dev
->zoned
) {
1535 int ret
= blk_revalidate_disk_zones(disk
);
1545 static int null_init_tag_set(struct nullb
*nullb
, struct blk_mq_tag_set
*set
)
1547 set
->ops
= &null_mq_ops
;
1548 set
->nr_hw_queues
= nullb
? nullb
->dev
->submit_queues
:
1550 set
->queue_depth
= nullb
? nullb
->dev
->hw_queue_depth
:
1552 set
->numa_node
= nullb
? nullb
->dev
->home_node
: g_home_node
;
1553 set
->cmd_size
= sizeof(struct nullb_cmd
);
1554 set
->flags
= BLK_MQ_F_SHOULD_MERGE
;
1556 set
->flags
|= BLK_MQ_F_NO_SCHED
;
1557 set
->driver_data
= NULL
;
1559 if ((nullb
&& nullb
->dev
->blocking
) || g_blocking
)
1560 set
->flags
|= BLK_MQ_F_BLOCKING
;
1562 return blk_mq_alloc_tag_set(set
);
1565 static void null_validate_conf(struct nullb_device
*dev
)
1567 dev
->blocksize
= round_down(dev
->blocksize
, 512);
1568 dev
->blocksize
= clamp_t(unsigned int, dev
->blocksize
, 512, 4096);
1570 if (dev
->queue_mode
== NULL_Q_MQ
&& dev
->use_per_node_hctx
) {
1571 if (dev
->submit_queues
!= nr_online_nodes
)
1572 dev
->submit_queues
= nr_online_nodes
;
1573 } else if (dev
->submit_queues
> nr_cpu_ids
)
1574 dev
->submit_queues
= nr_cpu_ids
;
1575 else if (dev
->submit_queues
== 0)
1576 dev
->submit_queues
= 1;
1578 dev
->queue_mode
= min_t(unsigned int, dev
->queue_mode
, NULL_Q_MQ
);
1579 dev
->irqmode
= min_t(unsigned int, dev
->irqmode
, NULL_IRQ_TIMER
);
1581 /* Do memory allocation, so set blocking */
1582 if (dev
->memory_backed
)
1583 dev
->blocking
= true;
1584 else /* cache is meaningless */
1585 dev
->cache_size
= 0;
1586 dev
->cache_size
= min_t(unsigned long, ULONG_MAX
/ 1024 / 1024,
1588 dev
->mbps
= min_t(unsigned int, 1024 * 40, dev
->mbps
);
1589 /* can not stop a queue */
1590 if (dev
->queue_mode
== NULL_Q_BIO
)
1594 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1595 static bool __null_setup_fault(struct fault_attr
*attr
, char *str
)
1600 if (!setup_fault_attr(attr
, str
))
1608 static bool null_setup_fault(void)
1610 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1611 if (!__null_setup_fault(&null_timeout_attr
, g_timeout_str
))
1613 if (!__null_setup_fault(&null_requeue_attr
, g_requeue_str
))
1619 static int null_add_dev(struct nullb_device
*dev
)
1621 struct nullb
*nullb
;
1624 null_validate_conf(dev
);
1626 nullb
= kzalloc_node(sizeof(*nullb
), GFP_KERNEL
, dev
->home_node
);
1634 spin_lock_init(&nullb
->lock
);
1636 rv
= setup_queues(nullb
);
1638 goto out_free_nullb
;
1640 if (dev
->queue_mode
== NULL_Q_MQ
) {
1642 nullb
->tag_set
= &tag_set
;
1645 nullb
->tag_set
= &nullb
->__tag_set
;
1646 rv
= null_init_tag_set(nullb
, nullb
->tag_set
);
1650 goto out_cleanup_queues
;
1652 if (!null_setup_fault())
1653 goto out_cleanup_queues
;
1655 nullb
->tag_set
->timeout
= 5 * HZ
;
1656 nullb
->q
= blk_mq_init_queue(nullb
->tag_set
);
1657 if (IS_ERR(nullb
->q
)) {
1659 goto out_cleanup_tags
;
1661 null_init_queues(nullb
);
1662 } else if (dev
->queue_mode
== NULL_Q_BIO
) {
1663 nullb
->q
= blk_alloc_queue_node(GFP_KERNEL
, dev
->home_node
);
1666 goto out_cleanup_queues
;
1668 blk_queue_make_request(nullb
->q
, null_queue_bio
);
1669 rv
= init_driver_queues(nullb
);
1671 goto out_cleanup_blk_queue
;
1675 set_bit(NULLB_DEV_FL_THROTTLED
, &dev
->flags
);
1676 nullb_setup_bwtimer(nullb
);
1679 if (dev
->cache_size
> 0) {
1680 set_bit(NULLB_DEV_FL_CACHE
, &nullb
->dev
->flags
);
1681 blk_queue_write_cache(nullb
->q
, true, true);
1685 rv
= null_zone_init(dev
);
1687 goto out_cleanup_blk_queue
;
1689 blk_queue_chunk_sectors(nullb
->q
, dev
->zone_size_sects
);
1690 nullb
->q
->limits
.zoned
= BLK_ZONED_HM
;
1693 nullb
->q
->queuedata
= nullb
;
1694 blk_queue_flag_set(QUEUE_FLAG_NONROT
, nullb
->q
);
1695 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, nullb
->q
);
1698 nullb
->index
= ida_simple_get(&nullb_indexes
, 0, 0, GFP_KERNEL
);
1699 dev
->index
= nullb
->index
;
1700 mutex_unlock(&lock
);
1702 blk_queue_logical_block_size(nullb
->q
, dev
->blocksize
);
1703 blk_queue_physical_block_size(nullb
->q
, dev
->blocksize
);
1705 null_config_discard(nullb
);
1707 sprintf(nullb
->disk_name
, "nullb%d", nullb
->index
);
1709 rv
= null_gendisk_register(nullb
);
1711 goto out_cleanup_zone
;
1714 list_add_tail(&nullb
->list
, &nullb_list
);
1715 mutex_unlock(&lock
);
1720 null_zone_exit(dev
);
1721 out_cleanup_blk_queue
:
1722 blk_cleanup_queue(nullb
->q
);
1724 if (dev
->queue_mode
== NULL_Q_MQ
&& nullb
->tag_set
== &nullb
->__tag_set
)
1725 blk_mq_free_tag_set(nullb
->tag_set
);
1727 cleanup_queues(nullb
);
1734 static int __init
null_init(void)
1738 struct nullb
*nullb
;
1739 struct nullb_device
*dev
;
1741 if (g_bs
> PAGE_SIZE
) {
1742 pr_warn("null_blk: invalid block size\n");
1743 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE
);
1747 if (!is_power_of_2(g_zone_size
)) {
1748 pr_err("null_blk: zone_size must be power-of-two\n");
1752 if (g_home_node
!= NUMA_NO_NODE
&& g_home_node
>= nr_online_nodes
) {
1753 pr_err("null_blk: invalid home_node value\n");
1754 g_home_node
= NUMA_NO_NODE
;
1757 if (g_queue_mode
== NULL_Q_RQ
) {
1758 pr_err("null_blk: legacy IO path no longer available\n");
1761 if (g_queue_mode
== NULL_Q_MQ
&& g_use_per_node_hctx
) {
1762 if (g_submit_queues
!= nr_online_nodes
) {
1763 pr_warn("null_blk: submit_queues param is set to %u.\n",
1765 g_submit_queues
= nr_online_nodes
;
1767 } else if (g_submit_queues
> nr_cpu_ids
)
1768 g_submit_queues
= nr_cpu_ids
;
1769 else if (g_submit_queues
<= 0)
1770 g_submit_queues
= 1;
1772 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
) {
1773 ret
= null_init_tag_set(NULL
, &tag_set
);
1778 config_group_init(&nullb_subsys
.su_group
);
1779 mutex_init(&nullb_subsys
.su_mutex
);
1781 ret
= configfs_register_subsystem(&nullb_subsys
);
1787 null_major
= register_blkdev(0, "nullb");
1788 if (null_major
< 0) {
1793 for (i
= 0; i
< nr_devices
; i
++) {
1794 dev
= null_alloc_dev();
1799 ret
= null_add_dev(dev
);
1806 pr_info("null: module loaded\n");
1810 while (!list_empty(&nullb_list
)) {
1811 nullb
= list_entry(nullb_list
.next
, struct nullb
, list
);
1813 null_del_dev(nullb
);
1816 unregister_blkdev(null_major
, "nullb");
1818 configfs_unregister_subsystem(&nullb_subsys
);
1820 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
)
1821 blk_mq_free_tag_set(&tag_set
);
1825 static void __exit
null_exit(void)
1827 struct nullb
*nullb
;
1829 configfs_unregister_subsystem(&nullb_subsys
);
1831 unregister_blkdev(null_major
, "nullb");
1834 while (!list_empty(&nullb_list
)) {
1835 struct nullb_device
*dev
;
1837 nullb
= list_entry(nullb_list
.next
, struct nullb
, list
);
1839 null_del_dev(nullb
);
1842 mutex_unlock(&lock
);
1844 if (g_queue_mode
== NULL_Q_MQ
&& shared_tags
)
1845 blk_mq_free_tag_set(&tag_set
);
1848 module_init(null_init
);
1849 module_exit(null_exit
);
1851 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
1852 MODULE_LICENSE("GPL");