]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/block/null_blk_main.c
block: Revert v5.0 blk_mq_request_issue_directly() changes
[mirror_ubuntu-hirsute-kernel.git] / drivers / block / null_blk_main.c
CommitLineData
3bf2bd20
SL
1/*
2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
4 */
f2298c04 5#include <linux/module.h>
fc1bc354 6
f2298c04
JA
7#include <linux/moduleparam.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
f2298c04 10#include <linux/init.h>
6dad38d3 11#include "null_blk.h"
f2298c04 12
5bcd0e0c
SL
13#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
14#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
5bcd0e0c
SL
15#define SECTOR_MASK (PAGE_SECTORS - 1)
16
17#define FREE_BATCH 16
18
eff2c4f1
SL
19#define TICKS_PER_SEC 50ULL
20#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
21
33f782c4 22#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
93b57046 23static DECLARE_FAULT_ATTR(null_timeout_attr);
24941b90 24static DECLARE_FAULT_ATTR(null_requeue_attr);
33f782c4 25#endif
93b57046 26
eff2c4f1
SL
27static inline u64 mb_per_tick(int mbps)
28{
29 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
30}
f2298c04 31
3bf2bd20
SL
32/*
33 * Status flags for nullb_device.
34 *
35 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
36 * UP: Device is currently on and visible in userspace.
eff2c4f1 37 * THROTTLED: Device is being throttled.
deb78b41 38 * CACHE: Device is using a write-back cache.
3bf2bd20
SL
39 */
40enum nullb_device_flags {
41 NULLB_DEV_FL_CONFIGURED = 0,
42 NULLB_DEV_FL_UP = 1,
eff2c4f1 43 NULLB_DEV_FL_THROTTLED = 2,
deb78b41 44 NULLB_DEV_FL_CACHE = 3,
3bf2bd20
SL
45};
46
66231ad3 47#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
5bcd0e0c
SL
48/*
49 * nullb_page is a page in memory for nullb devices.
50 *
51 * @page: The page holding the data.
52 * @bitmap: The bitmap represents which sector in the page has data.
53 * Each bit represents one block size. For example, sector 8
54 * will use the 7th bit
deb78b41
SL
55 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
56 * page is being flushing to storage. FREE means the cache page is freed and
57 * should be skipped from flushing to storage. Please see
58 * null_make_cache_space
5bcd0e0c
SL
59 */
60struct nullb_page {
61 struct page *page;
66231ad3 62 DECLARE_BITMAP(bitmap, MAP_SZ);
5bcd0e0c 63};
66231ad3
ML
64#define NULLB_PAGE_LOCK (MAP_SZ - 1)
65#define NULLB_PAGE_FREE (MAP_SZ - 2)
5bcd0e0c 66
f2298c04
JA
67static LIST_HEAD(nullb_list);
68static struct mutex lock;
69static int null_major;
94bc02e3 70static DEFINE_IDA(nullb_indexes);
82f402fe 71static struct blk_mq_tag_set tag_set;
f2298c04 72
f2298c04
JA
73enum {
74 NULL_IRQ_NONE = 0,
75 NULL_IRQ_SOFTIRQ = 1,
76 NULL_IRQ_TIMER = 2,
ce2c350b 77};
f2298c04 78
ce2c350b 79enum {
f2298c04
JA
80 NULL_Q_BIO = 0,
81 NULL_Q_RQ = 1,
82 NULL_Q_MQ = 2,
83};
84
b3cffc38 85static int g_no_sched;
5657a819 86module_param_named(no_sched, g_no_sched, int, 0444);
b3cffc38 87MODULE_PARM_DESC(no_sched, "No io scheduler");
88
2984c868 89static int g_submit_queues = 1;
5657a819 90module_param_named(submit_queues, g_submit_queues, int, 0444);
f2298c04
JA
91MODULE_PARM_DESC(submit_queues, "Number of submission queues");
92
2984c868 93static int g_home_node = NUMA_NO_NODE;
5657a819 94module_param_named(home_node, g_home_node, int, 0444);
f2298c04
JA
95MODULE_PARM_DESC(home_node, "Home node for the device");
96
33f782c4 97#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
93b57046 98static char g_timeout_str[80];
5657a819 99module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
24941b90
JA
100
101static char g_requeue_str[80];
5657a819 102module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
33f782c4 103#endif
93b57046 104
2984c868 105static int g_queue_mode = NULL_Q_MQ;
709c8667
MB
106
107static int null_param_store_val(const char *str, int *val, int min, int max)
108{
109 int ret, new_val;
110
111 ret = kstrtoint(str, 10, &new_val);
112 if (ret)
113 return -EINVAL;
114
115 if (new_val < min || new_val > max)
116 return -EINVAL;
117
118 *val = new_val;
119 return 0;
120}
121
122static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
123{
2984c868 124 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
709c8667
MB
125}
126
9c27847d 127static const struct kernel_param_ops null_queue_mode_param_ops = {
709c8667
MB
128 .set = null_set_queue_mode,
129 .get = param_get_int,
130};
131
5657a819 132device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
54ae81cd 133MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
f2298c04 134
2984c868 135static int g_gb = 250;
5657a819 136module_param_named(gb, g_gb, int, 0444);
f2298c04
JA
137MODULE_PARM_DESC(gb, "Size in GB");
138
2984c868 139static int g_bs = 512;
5657a819 140module_param_named(bs, g_bs, int, 0444);
f2298c04
JA
141MODULE_PARM_DESC(bs, "Block size (in bytes)");
142
82f402fe 143static int nr_devices = 1;
5657a819 144module_param(nr_devices, int, 0444);
f2298c04
JA
145MODULE_PARM_DESC(nr_devices, "Number of devices to register");
146
2984c868 147static bool g_blocking;
5657a819 148module_param_named(blocking, g_blocking, bool, 0444);
db5bcf87
JA
149MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
150
82f402fe 151static bool shared_tags;
5657a819 152module_param(shared_tags, bool, 0444);
82f402fe
JA
153MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
154
2984c868 155static int g_irqmode = NULL_IRQ_SOFTIRQ;
709c8667
MB
156
157static int null_set_irqmode(const char *str, const struct kernel_param *kp)
158{
2984c868 159 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
709c8667
MB
160 NULL_IRQ_TIMER);
161}
162
9c27847d 163static const struct kernel_param_ops null_irqmode_param_ops = {
709c8667
MB
164 .set = null_set_irqmode,
165 .get = param_get_int,
166};
167
5657a819 168device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
f2298c04
JA
169MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
170
2984c868 171static unsigned long g_completion_nsec = 10000;
5657a819 172module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
f2298c04
JA
173MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
174
2984c868 175static int g_hw_queue_depth = 64;
5657a819 176module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
f2298c04
JA
177MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
178
2984c868 179static bool g_use_per_node_hctx;
5657a819 180module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
20005244 181MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
f2298c04 182
ca4b2a01
MB
183static bool g_zoned;
184module_param_named(zoned, g_zoned, bool, S_IRUGO);
185MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
186
187static unsigned long g_zone_size = 256;
188module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
189MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
190
ea2c18e1
MS
191static unsigned int g_zone_nr_conv;
192module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
193MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
194
3bf2bd20
SL
195static struct nullb_device *null_alloc_dev(void);
196static void null_free_dev(struct nullb_device *dev);
cedcafad
SL
197static void null_del_dev(struct nullb *nullb);
198static int null_add_dev(struct nullb_device *dev);
deb78b41 199static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
3bf2bd20
SL
200
201static inline struct nullb_device *to_nullb_device(struct config_item *item)
202{
203 return item ? container_of(item, struct nullb_device, item) : NULL;
204}
205
206static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
207{
208 return snprintf(page, PAGE_SIZE, "%u\n", val);
209}
210
211static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
212 char *page)
213{
214 return snprintf(page, PAGE_SIZE, "%lu\n", val);
215}
216
217static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
218{
219 return snprintf(page, PAGE_SIZE, "%u\n", val);
220}
221
222static ssize_t nullb_device_uint_attr_store(unsigned int *val,
223 const char *page, size_t count)
224{
225 unsigned int tmp;
226 int result;
227
228 result = kstrtouint(page, 0, &tmp);
229 if (result)
230 return result;
231
232 *val = tmp;
233 return count;
234}
235
236static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
237 const char *page, size_t count)
238{
239 int result;
240 unsigned long tmp;
241
242 result = kstrtoul(page, 0, &tmp);
243 if (result)
244 return result;
245
246 *val = tmp;
247 return count;
248}
249
250static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
251 size_t count)
252{
253 bool tmp;
254 int result;
255
256 result = kstrtobool(page, &tmp);
257 if (result)
258 return result;
259
260 *val = tmp;
261 return count;
262}
263
264/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
265#define NULLB_DEVICE_ATTR(NAME, TYPE) \
266static ssize_t \
267nullb_device_##NAME##_show(struct config_item *item, char *page) \
268{ \
269 return nullb_device_##TYPE##_attr_show( \
270 to_nullb_device(item)->NAME, page); \
271} \
272static ssize_t \
273nullb_device_##NAME##_store(struct config_item *item, const char *page, \
274 size_t count) \
275{ \
276 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
277 return -EBUSY; \
278 return nullb_device_##TYPE##_attr_store( \
279 &to_nullb_device(item)->NAME, page, count); \
280} \
281CONFIGFS_ATTR(nullb_device_, NAME);
282
283NULLB_DEVICE_ATTR(size, ulong);
284NULLB_DEVICE_ATTR(completion_nsec, ulong);
285NULLB_DEVICE_ATTR(submit_queues, uint);
286NULLB_DEVICE_ATTR(home_node, uint);
287NULLB_DEVICE_ATTR(queue_mode, uint);
288NULLB_DEVICE_ATTR(blocksize, uint);
289NULLB_DEVICE_ATTR(irqmode, uint);
290NULLB_DEVICE_ATTR(hw_queue_depth, uint);
cedcafad 291NULLB_DEVICE_ATTR(index, uint);
3bf2bd20
SL
292NULLB_DEVICE_ATTR(blocking, bool);
293NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
5bcd0e0c 294NULLB_DEVICE_ATTR(memory_backed, bool);
306eb6b4 295NULLB_DEVICE_ATTR(discard, bool);
eff2c4f1 296NULLB_DEVICE_ATTR(mbps, uint);
deb78b41 297NULLB_DEVICE_ATTR(cache_size, ulong);
ca4b2a01
MB
298NULLB_DEVICE_ATTR(zoned, bool);
299NULLB_DEVICE_ATTR(zone_size, ulong);
ea2c18e1 300NULLB_DEVICE_ATTR(zone_nr_conv, uint);
3bf2bd20 301
cedcafad
SL
302static ssize_t nullb_device_power_show(struct config_item *item, char *page)
303{
304 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
305}
306
307static ssize_t nullb_device_power_store(struct config_item *item,
308 const char *page, size_t count)
309{
310 struct nullb_device *dev = to_nullb_device(item);
311 bool newp = false;
312 ssize_t ret;
313
314 ret = nullb_device_bool_attr_store(&newp, page, count);
315 if (ret < 0)
316 return ret;
317
318 if (!dev->power && newp) {
319 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
320 return count;
321 if (null_add_dev(dev)) {
322 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
323 return -ENOMEM;
324 }
325
326 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
327 dev->power = newp;
b3c30512 328 } else if (dev->power && !newp) {
cedcafad
SL
329 mutex_lock(&lock);
330 dev->power = newp;
331 null_del_dev(dev->nullb);
332 mutex_unlock(&lock);
333 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
00a8cdb8 334 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
cedcafad
SL
335 }
336
337 return count;
338}
339
340CONFIGFS_ATTR(nullb_device_, power);
341
2f54a613
SL
342static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
343{
344 struct nullb_device *t_dev = to_nullb_device(item);
345
346 return badblocks_show(&t_dev->badblocks, page, 0);
347}
348
349static ssize_t nullb_device_badblocks_store(struct config_item *item,
350 const char *page, size_t count)
351{
352 struct nullb_device *t_dev = to_nullb_device(item);
353 char *orig, *buf, *tmp;
354 u64 start, end;
355 int ret;
356
357 orig = kstrndup(page, count, GFP_KERNEL);
358 if (!orig)
359 return -ENOMEM;
360
361 buf = strstrip(orig);
362
363 ret = -EINVAL;
364 if (buf[0] != '+' && buf[0] != '-')
365 goto out;
366 tmp = strchr(&buf[1], '-');
367 if (!tmp)
368 goto out;
369 *tmp = '\0';
370 ret = kstrtoull(buf + 1, 0, &start);
371 if (ret)
372 goto out;
373 ret = kstrtoull(tmp + 1, 0, &end);
374 if (ret)
375 goto out;
376 ret = -EINVAL;
377 if (start > end)
378 goto out;
379 /* enable badblocks */
380 cmpxchg(&t_dev->badblocks.shift, -1, 0);
381 if (buf[0] == '+')
382 ret = badblocks_set(&t_dev->badblocks, start,
383 end - start + 1, 1);
384 else
385 ret = badblocks_clear(&t_dev->badblocks, start,
386 end - start + 1);
387 if (ret == 0)
388 ret = count;
389out:
390 kfree(orig);
391 return ret;
392}
393CONFIGFS_ATTR(nullb_device_, badblocks);
394
3bf2bd20
SL
395static struct configfs_attribute *nullb_device_attrs[] = {
396 &nullb_device_attr_size,
397 &nullb_device_attr_completion_nsec,
398 &nullb_device_attr_submit_queues,
399 &nullb_device_attr_home_node,
400 &nullb_device_attr_queue_mode,
401 &nullb_device_attr_blocksize,
402 &nullb_device_attr_irqmode,
403 &nullb_device_attr_hw_queue_depth,
cedcafad 404 &nullb_device_attr_index,
3bf2bd20
SL
405 &nullb_device_attr_blocking,
406 &nullb_device_attr_use_per_node_hctx,
cedcafad 407 &nullb_device_attr_power,
5bcd0e0c 408 &nullb_device_attr_memory_backed,
306eb6b4 409 &nullb_device_attr_discard,
eff2c4f1 410 &nullb_device_attr_mbps,
deb78b41 411 &nullb_device_attr_cache_size,
2f54a613 412 &nullb_device_attr_badblocks,
ca4b2a01
MB
413 &nullb_device_attr_zoned,
414 &nullb_device_attr_zone_size,
ea2c18e1 415 &nullb_device_attr_zone_nr_conv,
3bf2bd20
SL
416 NULL,
417};
418
419static void nullb_device_release(struct config_item *item)
420{
5bcd0e0c
SL
421 struct nullb_device *dev = to_nullb_device(item);
422
deb78b41 423 null_free_device_storage(dev, false);
5bcd0e0c 424 null_free_dev(dev);
3bf2bd20
SL
425}
426
427static struct configfs_item_operations nullb_device_ops = {
428 .release = nullb_device_release,
429};
430
e1919dff 431static const struct config_item_type nullb_device_type = {
3bf2bd20
SL
432 .ct_item_ops = &nullb_device_ops,
433 .ct_attrs = nullb_device_attrs,
434 .ct_owner = THIS_MODULE,
435};
436
437static struct
438config_item *nullb_group_make_item(struct config_group *group, const char *name)
439{
440 struct nullb_device *dev;
441
442 dev = null_alloc_dev();
443 if (!dev)
444 return ERR_PTR(-ENOMEM);
445
446 config_item_init_type_name(&dev->item, name, &nullb_device_type);
447
448 return &dev->item;
449}
450
451static void
452nullb_group_drop_item(struct config_group *group, struct config_item *item)
453{
cedcafad
SL
454 struct nullb_device *dev = to_nullb_device(item);
455
456 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
457 mutex_lock(&lock);
458 dev->power = false;
459 null_del_dev(dev->nullb);
460 mutex_unlock(&lock);
461 }
462
3bf2bd20
SL
463 config_item_put(item);
464}
465
466static ssize_t memb_group_features_show(struct config_item *item, char *page)
467{
ca4b2a01 468 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
3bf2bd20
SL
469}
470
471CONFIGFS_ATTR_RO(memb_group_, features);
472
473static struct configfs_attribute *nullb_group_attrs[] = {
474 &memb_group_attr_features,
475 NULL,
476};
477
478static struct configfs_group_operations nullb_group_ops = {
479 .make_item = nullb_group_make_item,
480 .drop_item = nullb_group_drop_item,
481};
482
e1919dff 483static const struct config_item_type nullb_group_type = {
3bf2bd20
SL
484 .ct_group_ops = &nullb_group_ops,
485 .ct_attrs = nullb_group_attrs,
486 .ct_owner = THIS_MODULE,
487};
488
489static struct configfs_subsystem nullb_subsys = {
490 .su_group = {
491 .cg_item = {
492 .ci_namebuf = "nullb",
493 .ci_type = &nullb_group_type,
494 },
495 },
496};
497
deb78b41
SL
498static inline int null_cache_active(struct nullb *nullb)
499{
500 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
501}
502
2984c868
SL
503static struct nullb_device *null_alloc_dev(void)
504{
505 struct nullb_device *dev;
506
507 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
508 if (!dev)
509 return NULL;
5bcd0e0c 510 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
deb78b41 511 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
2f54a613
SL
512 if (badblocks_init(&dev->badblocks, 0)) {
513 kfree(dev);
514 return NULL;
515 }
516
2984c868
SL
517 dev->size = g_gb * 1024;
518 dev->completion_nsec = g_completion_nsec;
519 dev->submit_queues = g_submit_queues;
520 dev->home_node = g_home_node;
521 dev->queue_mode = g_queue_mode;
522 dev->blocksize = g_bs;
523 dev->irqmode = g_irqmode;
524 dev->hw_queue_depth = g_hw_queue_depth;
2984c868
SL
525 dev->blocking = g_blocking;
526 dev->use_per_node_hctx = g_use_per_node_hctx;
ca4b2a01
MB
527 dev->zoned = g_zoned;
528 dev->zone_size = g_zone_size;
ea2c18e1 529 dev->zone_nr_conv = g_zone_nr_conv;
2984c868
SL
530 return dev;
531}
532
533static void null_free_dev(struct nullb_device *dev)
534{
1addb798
DD
535 if (!dev)
536 return;
537
ca4b2a01 538 null_zone_exit(dev);
1addb798 539 badblocks_exit(&dev->badblocks);
2984c868
SL
540 kfree(dev);
541}
542
f2298c04
JA
543static void put_tag(struct nullb_queue *nq, unsigned int tag)
544{
545 clear_bit_unlock(tag, nq->tag_map);
546
547 if (waitqueue_active(&nq->wait))
548 wake_up(&nq->wait);
549}
550
551static unsigned int get_tag(struct nullb_queue *nq)
552{
553 unsigned int tag;
554
555 do {
556 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
557 if (tag >= nq->queue_depth)
558 return -1U;
559 } while (test_and_set_bit_lock(tag, nq->tag_map));
560
561 return tag;
562}
563
564static void free_cmd(struct nullb_cmd *cmd)
565{
566 put_tag(cmd->nq, cmd->tag);
567}
568
3c395a96
PV
569static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
570
f2298c04
JA
571static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
572{
573 struct nullb_cmd *cmd;
574 unsigned int tag;
575
576 tag = get_tag(nq);
577 if (tag != -1U) {
578 cmd = &nq->cmds[tag];
579 cmd->tag = tag;
580 cmd->nq = nq;
2984c868 581 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
3c395a96
PV
582 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
583 HRTIMER_MODE_REL);
584 cmd->timer.function = null_cmd_timer_expired;
585 }
f2298c04
JA
586 return cmd;
587 }
588
589 return NULL;
590}
591
592static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
593{
594 struct nullb_cmd *cmd;
595 DEFINE_WAIT(wait);
596
597 cmd = __alloc_cmd(nq);
598 if (cmd || !can_wait)
599 return cmd;
600
601 do {
602 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
603 cmd = __alloc_cmd(nq);
604 if (cmd)
605 break;
606
607 io_schedule();
608 } while (1);
609
610 finish_wait(&nq->wait, &wait);
611 return cmd;
612}
613
614static void end_cmd(struct nullb_cmd *cmd)
615{
2984c868 616 int queue_mode = cmd->nq->dev->queue_mode;
cf8ecc5a 617
ce2c350b
CH
618 switch (queue_mode) {
619 case NULL_Q_MQ:
5bcd0e0c 620 blk_mq_end_request(cmd->rq, cmd->error);
ce2c350b 621 return;
ce2c350b 622 case NULL_Q_BIO:
5bcd0e0c 623 cmd->bio->bi_status = cmd->error;
4246a0b6 624 bio_endio(cmd->bio);
48cc661e 625 break;
ce2c350b 626 }
f2298c04 627
48cc661e 628 free_cmd(cmd);
cf8ecc5a
AA
629}
630
631static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
632{
633 end_cmd(container_of(timer, struct nullb_cmd, timer));
f2298c04
JA
634
635 return HRTIMER_NORESTART;
636}
637
638static void null_cmd_end_timer(struct nullb_cmd *cmd)
639{
2984c868 640 ktime_t kt = cmd->nq->dev->completion_nsec;
f2298c04 641
3c395a96 642 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
f2298c04
JA
643}
644
49f66136 645static void null_complete_rq(struct request *rq)
f2298c04 646{
49f66136 647 end_cmd(blk_mq_rq_to_pdu(rq));
f2298c04
JA
648}
649
5bcd0e0c
SL
650static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
651{
652 struct nullb_page *t_page;
653
654 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
655 if (!t_page)
656 goto out;
657
658 t_page->page = alloc_pages(gfp_flags, 0);
659 if (!t_page->page)
660 goto out_freepage;
661
66231ad3 662 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
5bcd0e0c
SL
663 return t_page;
664out_freepage:
665 kfree(t_page);
666out:
667 return NULL;
668}
669
670static void null_free_page(struct nullb_page *t_page)
671{
66231ad3
ML
672 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
673 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
deb78b41 674 return;
5bcd0e0c
SL
675 __free_page(t_page->page);
676 kfree(t_page);
677}
678
66231ad3
ML
679static bool null_page_empty(struct nullb_page *page)
680{
681 int size = MAP_SZ - 2;
682
683 return find_first_bit(page->bitmap, size) == size;
684}
685
deb78b41
SL
686static void null_free_sector(struct nullb *nullb, sector_t sector,
687 bool is_cache)
5bcd0e0c
SL
688{
689 unsigned int sector_bit;
690 u64 idx;
691 struct nullb_page *t_page, *ret;
692 struct radix_tree_root *root;
693
deb78b41 694 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
5bcd0e0c
SL
695 idx = sector >> PAGE_SECTORS_SHIFT;
696 sector_bit = (sector & SECTOR_MASK);
697
698 t_page = radix_tree_lookup(root, idx);
699 if (t_page) {
66231ad3 700 __clear_bit(sector_bit, t_page->bitmap);
5bcd0e0c 701
66231ad3 702 if (null_page_empty(t_page)) {
5bcd0e0c
SL
703 ret = radix_tree_delete_item(root, idx, t_page);
704 WARN_ON(ret != t_page);
705 null_free_page(ret);
deb78b41
SL
706 if (is_cache)
707 nullb->dev->curr_cache -= PAGE_SIZE;
5bcd0e0c
SL
708 }
709 }
710}
711
712static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
deb78b41 713 struct nullb_page *t_page, bool is_cache)
5bcd0e0c
SL
714{
715 struct radix_tree_root *root;
716
deb78b41 717 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
5bcd0e0c
SL
718
719 if (radix_tree_insert(root, idx, t_page)) {
720 null_free_page(t_page);
721 t_page = radix_tree_lookup(root, idx);
722 WARN_ON(!t_page || t_page->page->index != idx);
deb78b41
SL
723 } else if (is_cache)
724 nullb->dev->curr_cache += PAGE_SIZE;
5bcd0e0c
SL
725
726 return t_page;
727}
728
deb78b41 729static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
5bcd0e0c
SL
730{
731 unsigned long pos = 0;
732 int nr_pages;
733 struct nullb_page *ret, *t_pages[FREE_BATCH];
734 struct radix_tree_root *root;
735
deb78b41 736 root = is_cache ? &dev->cache : &dev->data;
5bcd0e0c
SL
737
738 do {
739 int i;
740
741 nr_pages = radix_tree_gang_lookup(root,
742 (void **)t_pages, pos, FREE_BATCH);
743
744 for (i = 0; i < nr_pages; i++) {
745 pos = t_pages[i]->page->index;
746 ret = radix_tree_delete_item(root, pos, t_pages[i]);
747 WARN_ON(ret != t_pages[i]);
748 null_free_page(ret);
749 }
750
751 pos++;
752 } while (nr_pages == FREE_BATCH);
deb78b41
SL
753
754 if (is_cache)
755 dev->curr_cache = 0;
5bcd0e0c
SL
756}
757
deb78b41
SL
758static struct nullb_page *__null_lookup_page(struct nullb *nullb,
759 sector_t sector, bool for_write, bool is_cache)
5bcd0e0c
SL
760{
761 unsigned int sector_bit;
762 u64 idx;
763 struct nullb_page *t_page;
deb78b41 764 struct radix_tree_root *root;
5bcd0e0c
SL
765
766 idx = sector >> PAGE_SECTORS_SHIFT;
767 sector_bit = (sector & SECTOR_MASK);
768
deb78b41
SL
769 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
770 t_page = radix_tree_lookup(root, idx);
5bcd0e0c
SL
771 WARN_ON(t_page && t_page->page->index != idx);
772
66231ad3 773 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
5bcd0e0c
SL
774 return t_page;
775
776 return NULL;
777}
778
deb78b41
SL
779static struct nullb_page *null_lookup_page(struct nullb *nullb,
780 sector_t sector, bool for_write, bool ignore_cache)
781{
782 struct nullb_page *page = NULL;
783
784 if (!ignore_cache)
785 page = __null_lookup_page(nullb, sector, for_write, true);
786 if (page)
787 return page;
788 return __null_lookup_page(nullb, sector, for_write, false);
789}
790
5bcd0e0c 791static struct nullb_page *null_insert_page(struct nullb *nullb,
61884de0
JA
792 sector_t sector, bool ignore_cache)
793 __releases(&nullb->lock)
794 __acquires(&nullb->lock)
5bcd0e0c
SL
795{
796 u64 idx;
797 struct nullb_page *t_page;
798
deb78b41 799 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
5bcd0e0c
SL
800 if (t_page)
801 return t_page;
802
803 spin_unlock_irq(&nullb->lock);
804
805 t_page = null_alloc_page(GFP_NOIO);
806 if (!t_page)
807 goto out_lock;
808
809 if (radix_tree_preload(GFP_NOIO))
810 goto out_freepage;
811
812 spin_lock_irq(&nullb->lock);
813 idx = sector >> PAGE_SECTORS_SHIFT;
814 t_page->page->index = idx;
deb78b41 815 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
5bcd0e0c
SL
816 radix_tree_preload_end();
817
818 return t_page;
819out_freepage:
820 null_free_page(t_page);
821out_lock:
822 spin_lock_irq(&nullb->lock);
deb78b41
SL
823 return null_lookup_page(nullb, sector, true, ignore_cache);
824}
825
826static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
827{
828 int i;
829 unsigned int offset;
830 u64 idx;
831 struct nullb_page *t_page, *ret;
832 void *dst, *src;
833
834 idx = c_page->page->index;
835
836 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
837
66231ad3
ML
838 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
839 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
deb78b41 840 null_free_page(c_page);
66231ad3 841 if (t_page && null_page_empty(t_page)) {
deb78b41
SL
842 ret = radix_tree_delete_item(&nullb->dev->data,
843 idx, t_page);
844 null_free_page(t_page);
845 }
846 return 0;
847 }
848
849 if (!t_page)
850 return -ENOMEM;
851
852 src = kmap_atomic(c_page->page);
853 dst = kmap_atomic(t_page->page);
854
855 for (i = 0; i < PAGE_SECTORS;
856 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
66231ad3 857 if (test_bit(i, c_page->bitmap)) {
deb78b41
SL
858 offset = (i << SECTOR_SHIFT);
859 memcpy(dst + offset, src + offset,
860 nullb->dev->blocksize);
66231ad3 861 __set_bit(i, t_page->bitmap);
deb78b41
SL
862 }
863 }
864
865 kunmap_atomic(dst);
866 kunmap_atomic(src);
867
868 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
869 null_free_page(ret);
870 nullb->dev->curr_cache -= PAGE_SIZE;
871
872 return 0;
873}
874
875static int null_make_cache_space(struct nullb *nullb, unsigned long n)
f2298c04 876{
deb78b41
SL
877 int i, err, nr_pages;
878 struct nullb_page *c_pages[FREE_BATCH];
879 unsigned long flushed = 0, one_round;
880
881again:
882 if ((nullb->dev->cache_size * 1024 * 1024) >
883 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
884 return 0;
885
886 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
887 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
888 /*
889 * nullb_flush_cache_page could unlock before using the c_pages. To
890 * avoid race, we don't allow page free
891 */
892 for (i = 0; i < nr_pages; i++) {
893 nullb->cache_flush_pos = c_pages[i]->page->index;
894 /*
895 * We found the page which is being flushed to disk by other
896 * threads
897 */
66231ad3 898 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
deb78b41
SL
899 c_pages[i] = NULL;
900 else
66231ad3 901 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
deb78b41
SL
902 }
903
904 one_round = 0;
905 for (i = 0; i < nr_pages; i++) {
906 if (c_pages[i] == NULL)
907 continue;
908 err = null_flush_cache_page(nullb, c_pages[i]);
909 if (err)
910 return err;
911 one_round++;
912 }
913 flushed += one_round << PAGE_SHIFT;
914
915 if (n > flushed) {
916 if (nr_pages == 0)
917 nullb->cache_flush_pos = 0;
918 if (one_round == 0) {
919 /* give other threads a chance */
920 spin_unlock_irq(&nullb->lock);
921 spin_lock_irq(&nullb->lock);
922 }
923 goto again;
924 }
925 return 0;
5bcd0e0c
SL
926}
927
928static int copy_to_nullb(struct nullb *nullb, struct page *source,
deb78b41 929 unsigned int off, sector_t sector, size_t n, bool is_fua)
5bcd0e0c
SL
930{
931 size_t temp, count = 0;
932 unsigned int offset;
933 struct nullb_page *t_page;
934 void *dst, *src;
935
936 while (count < n) {
937 temp = min_t(size_t, nullb->dev->blocksize, n - count);
938
deb78b41
SL
939 if (null_cache_active(nullb) && !is_fua)
940 null_make_cache_space(nullb, PAGE_SIZE);
941
5bcd0e0c 942 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
deb78b41
SL
943 t_page = null_insert_page(nullb, sector,
944 !null_cache_active(nullb) || is_fua);
5bcd0e0c
SL
945 if (!t_page)
946 return -ENOSPC;
947
948 src = kmap_atomic(source);
949 dst = kmap_atomic(t_page->page);
950 memcpy(dst + offset, src + off + count, temp);
951 kunmap_atomic(dst);
952 kunmap_atomic(src);
953
66231ad3 954 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
5bcd0e0c 955
deb78b41
SL
956 if (is_fua)
957 null_free_sector(nullb, sector, true);
958
5bcd0e0c
SL
959 count += temp;
960 sector += temp >> SECTOR_SHIFT;
961 }
962 return 0;
963}
964
965static int copy_from_nullb(struct nullb *nullb, struct page *dest,
966 unsigned int off, sector_t sector, size_t n)
967{
968 size_t temp, count = 0;
969 unsigned int offset;
970 struct nullb_page *t_page;
971 void *dst, *src;
972
973 while (count < n) {
974 temp = min_t(size_t, nullb->dev->blocksize, n - count);
975
976 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
deb78b41
SL
977 t_page = null_lookup_page(nullb, sector, false,
978 !null_cache_active(nullb));
5bcd0e0c
SL
979
980 dst = kmap_atomic(dest);
981 if (!t_page) {
982 memset(dst + off + count, 0, temp);
983 goto next;
984 }
985 src = kmap_atomic(t_page->page);
986 memcpy(dst + off + count, src + offset, temp);
987 kunmap_atomic(src);
988next:
989 kunmap_atomic(dst);
990
991 count += temp;
992 sector += temp >> SECTOR_SHIFT;
993 }
994 return 0;
995}
996
306eb6b4
SL
997static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
998{
999 size_t temp;
1000
1001 spin_lock_irq(&nullb->lock);
1002 while (n > 0) {
1003 temp = min_t(size_t, n, nullb->dev->blocksize);
deb78b41
SL
1004 null_free_sector(nullb, sector, false);
1005 if (null_cache_active(nullb))
1006 null_free_sector(nullb, sector, true);
306eb6b4
SL
1007 sector += temp >> SECTOR_SHIFT;
1008 n -= temp;
1009 }
1010 spin_unlock_irq(&nullb->lock);
1011}
1012
deb78b41
SL
1013static int null_handle_flush(struct nullb *nullb)
1014{
1015 int err;
1016
1017 if (!null_cache_active(nullb))
1018 return 0;
1019
1020 spin_lock_irq(&nullb->lock);
1021 while (true) {
1022 err = null_make_cache_space(nullb,
1023 nullb->dev->cache_size * 1024 * 1024);
1024 if (err || nullb->dev->curr_cache == 0)
1025 break;
1026 }
1027
1028 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1029 spin_unlock_irq(&nullb->lock);
1030 return err;
1031}
1032
5bcd0e0c 1033static int null_transfer(struct nullb *nullb, struct page *page,
deb78b41
SL
1034 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1035 bool is_fua)
f2298c04 1036{
5bcd0e0c
SL
1037 int err = 0;
1038
1039 if (!is_write) {
1040 err = copy_from_nullb(nullb, page, off, sector, len);
1041 flush_dcache_page(page);
1042 } else {
1043 flush_dcache_page(page);
deb78b41 1044 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
5bcd0e0c
SL
1045 }
1046
1047 return err;
1048}
1049
1050static int null_handle_rq(struct nullb_cmd *cmd)
1051{
1052 struct request *rq = cmd->rq;
1053 struct nullb *nullb = cmd->nq->dev->nullb;
1054 int err;
1055 unsigned int len;
1056 sector_t sector;
1057 struct req_iterator iter;
1058 struct bio_vec bvec;
1059
1060 sector = blk_rq_pos(rq);
1061
306eb6b4
SL
1062 if (req_op(rq) == REQ_OP_DISCARD) {
1063 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1064 return 0;
1065 }
1066
5bcd0e0c
SL
1067 spin_lock_irq(&nullb->lock);
1068 rq_for_each_segment(bvec, rq, iter) {
1069 len = bvec.bv_len;
1070 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
deb78b41
SL
1071 op_is_write(req_op(rq)), sector,
1072 req_op(rq) & REQ_FUA);
5bcd0e0c
SL
1073 if (err) {
1074 spin_unlock_irq(&nullb->lock);
1075 return err;
1076 }
1077 sector += len >> SECTOR_SHIFT;
1078 }
1079 spin_unlock_irq(&nullb->lock);
1080
1081 return 0;
1082}
1083
1084static int null_handle_bio(struct nullb_cmd *cmd)
1085{
1086 struct bio *bio = cmd->bio;
1087 struct nullb *nullb = cmd->nq->dev->nullb;
1088 int err;
1089 unsigned int len;
1090 sector_t sector;
1091 struct bio_vec bvec;
1092 struct bvec_iter iter;
1093
1094 sector = bio->bi_iter.bi_sector;
1095
306eb6b4
SL
1096 if (bio_op(bio) == REQ_OP_DISCARD) {
1097 null_handle_discard(nullb, sector,
1098 bio_sectors(bio) << SECTOR_SHIFT);
1099 return 0;
1100 }
1101
5bcd0e0c
SL
1102 spin_lock_irq(&nullb->lock);
1103 bio_for_each_segment(bvec, bio, iter) {
1104 len = bvec.bv_len;
1105 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
deb78b41 1106 op_is_write(bio_op(bio)), sector,
bf7c7a04 1107 bio->bi_opf & REQ_FUA);
5bcd0e0c
SL
1108 if (err) {
1109 spin_unlock_irq(&nullb->lock);
1110 return err;
1111 }
1112 sector += len >> SECTOR_SHIFT;
1113 }
1114 spin_unlock_irq(&nullb->lock);
1115 return 0;
1116}
1117
eff2c4f1
SL
1118static void null_stop_queue(struct nullb *nullb)
1119{
1120 struct request_queue *q = nullb->q;
1121
1122 if (nullb->dev->queue_mode == NULL_Q_MQ)
1123 blk_mq_stop_hw_queues(q);
eff2c4f1
SL
1124}
1125
1126static void null_restart_queue_async(struct nullb *nullb)
1127{
1128 struct request_queue *q = nullb->q;
eff2c4f1
SL
1129
1130 if (nullb->dev->queue_mode == NULL_Q_MQ)
1131 blk_mq_start_stopped_hw_queues(q, true);
eff2c4f1
SL
1132}
1133
5bcd0e0c
SL
1134static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1135{
1136 struct nullb_device *dev = cmd->nq->dev;
eff2c4f1 1137 struct nullb *nullb = dev->nullb;
5bcd0e0c
SL
1138 int err = 0;
1139
eff2c4f1
SL
1140 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1141 struct request *rq = cmd->rq;
1142
1143 if (!hrtimer_active(&nullb->bw_timer))
1144 hrtimer_restart(&nullb->bw_timer);
1145
1146 if (atomic_long_sub_return(blk_rq_bytes(rq),
1147 &nullb->cur_bytes) < 0) {
1148 null_stop_queue(nullb);
1149 /* race with timer */
1150 if (atomic_long_read(&nullb->cur_bytes) > 0)
1151 null_restart_queue_async(nullb);
e50b1e32
JA
1152 /* requeue request */
1153 return BLK_STS_DEV_RESOURCE;
eff2c4f1
SL
1154 }
1155 }
1156
2f54a613
SL
1157 if (nullb->dev->badblocks.shift != -1) {
1158 int bad_sectors;
1159 sector_t sector, size, first_bad;
1160 bool is_flush = true;
1161
1162 if (dev->queue_mode == NULL_Q_BIO &&
1163 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1164 is_flush = false;
1165 sector = cmd->bio->bi_iter.bi_sector;
1166 size = bio_sectors(cmd->bio);
1167 }
1168 if (dev->queue_mode != NULL_Q_BIO &&
1169 req_op(cmd->rq) != REQ_OP_FLUSH) {
1170 is_flush = false;
1171 sector = blk_rq_pos(cmd->rq);
1172 size = blk_rq_sectors(cmd->rq);
1173 }
1174 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1175 size, &first_bad, &bad_sectors)) {
1176 cmd->error = BLK_STS_IOERR;
1177 goto out;
1178 }
1179 }
1180
5bcd0e0c 1181 if (dev->memory_backed) {
deb78b41
SL
1182 if (dev->queue_mode == NULL_Q_BIO) {
1183 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1184 err = null_handle_flush(nullb);
1185 else
1186 err = null_handle_bio(cmd);
1187 } else {
1188 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1189 err = null_handle_flush(nullb);
1190 else
1191 err = null_handle_rq(cmd);
1192 }
5bcd0e0c
SL
1193 }
1194 cmd->error = errno_to_blk_status(err);
ca4b2a01
MB
1195
1196 if (!cmd->error && dev->zoned) {
b228ba1c
JA
1197 sector_t sector;
1198 unsigned int nr_sectors;
1199 int op;
1200
1201 if (dev->queue_mode == NULL_Q_BIO) {
1202 op = bio_op(cmd->bio);
1203 sector = cmd->bio->bi_iter.bi_sector;
1204 nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
1205 } else {
1206 op = req_op(cmd->rq);
1207 sector = blk_rq_pos(cmd->rq);
1208 nr_sectors = blk_rq_sectors(cmd->rq);
1209 }
1210
1211 if (op == REQ_OP_WRITE)
1212 null_zone_write(cmd, sector, nr_sectors);
1213 else if (op == REQ_OP_ZONE_RESET)
1214 null_zone_reset(cmd, sector);
ca4b2a01 1215 }
2f54a613 1216out:
f2298c04 1217 /* Complete IO by inline, softirq or timer */
5bcd0e0c 1218 switch (dev->irqmode) {
f2298c04 1219 case NULL_IRQ_SOFTIRQ:
5bcd0e0c 1220 switch (dev->queue_mode) {
ce2c350b 1221 case NULL_Q_MQ:
08e0029a 1222 blk_mq_complete_request(cmd->rq);
ce2c350b 1223 break;
ce2c350b
CH
1224 case NULL_Q_BIO:
1225 /*
1226 * XXX: no proper submitting cpu information available.
1227 */
1228 end_cmd(cmd);
1229 break;
1230 }
1231 break;
1232 case NULL_IRQ_NONE:
f2298c04 1233 end_cmd(cmd);
f2298c04
JA
1234 break;
1235 case NULL_IRQ_TIMER:
1236 null_cmd_end_timer(cmd);
1237 break;
1238 }
5bcd0e0c 1239 return BLK_STS_OK;
f2298c04
JA
1240}
1241
eff2c4f1
SL
1242static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1243{
1244 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1245 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1246 unsigned int mbps = nullb->dev->mbps;
1247
1248 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1249 return HRTIMER_NORESTART;
1250
1251 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1252 null_restart_queue_async(nullb);
1253
1254 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1255
1256 return HRTIMER_RESTART;
1257}
1258
1259static void nullb_setup_bwtimer(struct nullb *nullb)
1260{
1261 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1262
1263 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1264 nullb->bw_timer.function = nullb_bwtimer_fn;
1265 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1266 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
f2298c04
JA
1267}
1268
1269static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1270{
1271 int index = 0;
1272
1273 if (nullb->nr_queues != 1)
1274 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1275
1276 return &nullb->queues[index];
1277}
1278
dece1635 1279static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
f2298c04
JA
1280{
1281 struct nullb *nullb = q->queuedata;
1282 struct nullb_queue *nq = nullb_to_queue(nullb);
1283 struct nullb_cmd *cmd;
1284
1285 cmd = alloc_cmd(nq, 1);
1286 cmd->bio = bio;
1287
1288 null_handle_cmd(cmd);
dece1635 1289 return BLK_QC_T_NONE;
f2298c04
JA
1290}
1291
93b57046
JA
1292static bool should_timeout_request(struct request *rq)
1293{
33f782c4 1294#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
93b57046
JA
1295 if (g_timeout_str[0])
1296 return should_fail(&null_timeout_attr, 1);
33f782c4 1297#endif
24941b90
JA
1298 return false;
1299}
93b57046 1300
24941b90
JA
1301static bool should_requeue_request(struct request *rq)
1302{
1303#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1304 if (g_requeue_str[0])
1305 return should_fail(&null_requeue_attr, 1);
1306#endif
93b57046
JA
1307 return false;
1308}
1309
5448aca4
JA
1310static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1311{
1312 pr_info("null: rq %p timed out\n", rq);
0df0bb08
CH
1313 blk_mq_complete_request(rq);
1314 return BLK_EH_DONE;
5448aca4
JA
1315}
1316
fc17b653 1317static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
74c45052 1318 const struct blk_mq_queue_data *bd)
f2298c04 1319{
74c45052 1320 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
2984c868 1321 struct nullb_queue *nq = hctx->driver_data;
f2298c04 1322
db5bcf87
JA
1323 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1324
2984c868 1325 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
3c395a96
PV
1326 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1327 cmd->timer.function = null_cmd_timer_expired;
1328 }
74c45052 1329 cmd->rq = bd->rq;
2984c868 1330 cmd->nq = nq;
f2298c04 1331
74c45052 1332 blk_mq_start_request(bd->rq);
e2490073 1333
24941b90
JA
1334 if (should_requeue_request(bd->rq)) {
1335 /*
1336 * Alternate between hitting the core BUSY path, and the
1337 * driver driven requeue path
1338 */
1339 nq->requeue_selection++;
1340 if (nq->requeue_selection & 1)
1341 return BLK_STS_RESOURCE;
1342 else {
1343 blk_mq_requeue_request(bd->rq, true);
1344 return BLK_STS_OK;
1345 }
1346 }
1347 if (should_timeout_request(bd->rq))
1348 return BLK_STS_OK;
93b57046 1349
24941b90 1350 return null_handle_cmd(cmd);
f2298c04
JA
1351}
1352
f363b089 1353static const struct blk_mq_ops null_mq_ops = {
f2298c04 1354 .queue_rq = null_queue_rq,
49f66136 1355 .complete = null_complete_rq,
5448aca4 1356 .timeout = null_timeout_rq,
f2298c04
JA
1357};
1358
de65d2d2
MB
1359static void cleanup_queue(struct nullb_queue *nq)
1360{
1361 kfree(nq->tag_map);
1362 kfree(nq->cmds);
1363}
1364
1365static void cleanup_queues(struct nullb *nullb)
1366{
1367 int i;
1368
1369 for (i = 0; i < nullb->nr_queues; i++)
1370 cleanup_queue(&nullb->queues[i]);
1371
1372 kfree(nullb->queues);
1373}
1374
9ae2d0aa
MB
1375static void null_del_dev(struct nullb *nullb)
1376{
2984c868
SL
1377 struct nullb_device *dev = nullb->dev;
1378
94bc02e3
SL
1379 ida_simple_remove(&nullb_indexes, nullb->index);
1380
9ae2d0aa
MB
1381 list_del_init(&nullb->list);
1382
74ede5af 1383 del_gendisk(nullb->disk);
eff2c4f1
SL
1384
1385 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1386 hrtimer_cancel(&nullb->bw_timer);
1387 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1388 null_restart_queue_async(nullb);
1389 }
1390
9ae2d0aa 1391 blk_cleanup_queue(nullb->q);
2984c868
SL
1392 if (dev->queue_mode == NULL_Q_MQ &&
1393 nullb->tag_set == &nullb->__tag_set)
82f402fe 1394 blk_mq_free_tag_set(nullb->tag_set);
74ede5af 1395 put_disk(nullb->disk);
9ae2d0aa 1396 cleanup_queues(nullb);
deb78b41
SL
1397 if (null_cache_active(nullb))
1398 null_free_device_storage(nullb->dev, true);
9ae2d0aa 1399 kfree(nullb);
2984c868 1400 dev->nullb = NULL;
9ae2d0aa
MB
1401}
1402
306eb6b4
SL
1403static void null_config_discard(struct nullb *nullb)
1404{
1405 if (nullb->dev->discard == false)
1406 return;
1407 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1408 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1409 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
8b904b5b 1410 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
9ae2d0aa
MB
1411}
1412
f2298c04
JA
1413static int null_open(struct block_device *bdev, fmode_t mode)
1414{
1415 return 0;
1416}
1417
1418static void null_release(struct gendisk *disk, fmode_t mode)
1419{
1420}
1421
1422static const struct block_device_operations null_fops = {
1423 .owner = THIS_MODULE,
1424 .open = null_open,
1425 .release = null_release,
e76239a3 1426 .report_zones = null_zone_report,
f2298c04
JA
1427};
1428
82f402fe
JA
1429static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1430{
1431 BUG_ON(!nullb);
1432 BUG_ON(!nq);
1433
1434 init_waitqueue_head(&nq->wait);
1435 nq->queue_depth = nullb->queue_depth;
2984c868 1436 nq->dev = nullb->dev;
82f402fe
JA
1437}
1438
1439static void null_init_queues(struct nullb *nullb)
1440{
1441 struct request_queue *q = nullb->q;
1442 struct blk_mq_hw_ctx *hctx;
1443 struct nullb_queue *nq;
1444 int i;
1445
1446 queue_for_each_hw_ctx(q, hctx, i) {
1447 if (!hctx->nr_ctx || !hctx->tags)
1448 continue;
1449 nq = &nullb->queues[i];
1450 hctx->driver_data = nq;
1451 null_init_queue(nullb, nq);
1452 nullb->nr_queues++;
1453 }
1454}
1455
f2298c04
JA
1456static int setup_commands(struct nullb_queue *nq)
1457{
1458 struct nullb_cmd *cmd;
1459 int i, tag_size;
1460
6396bb22 1461 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
f2298c04 1462 if (!nq->cmds)
2d263a78 1463 return -ENOMEM;
f2298c04
JA
1464
1465 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
6396bb22 1466 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
f2298c04
JA
1467 if (!nq->tag_map) {
1468 kfree(nq->cmds);
2d263a78 1469 return -ENOMEM;
f2298c04
JA
1470 }
1471
1472 for (i = 0; i < nq->queue_depth; i++) {
1473 cmd = &nq->cmds[i];
1474 INIT_LIST_HEAD(&cmd->list);
1475 cmd->ll_list.next = NULL;
1476 cmd->tag = -1U;
1477 }
1478
1479 return 0;
1480}
1481
f2298c04
JA
1482static int setup_queues(struct nullb *nullb)
1483{
6396bb22
KC
1484 nullb->queues = kcalloc(nullb->dev->submit_queues,
1485 sizeof(struct nullb_queue),
1486 GFP_KERNEL);
f2298c04 1487 if (!nullb->queues)
2d263a78 1488 return -ENOMEM;
f2298c04
JA
1489
1490 nullb->nr_queues = 0;
2984c868 1491 nullb->queue_depth = nullb->dev->hw_queue_depth;
f2298c04 1492
2d263a78
MB
1493 return 0;
1494}
1495
1496static int init_driver_queues(struct nullb *nullb)
1497{
1498 struct nullb_queue *nq;
1499 int i, ret = 0;
f2298c04 1500
2984c868 1501 for (i = 0; i < nullb->dev->submit_queues; i++) {
f2298c04 1502 nq = &nullb->queues[i];
2d263a78
MB
1503
1504 null_init_queue(nullb, nq);
1505
1506 ret = setup_commands(nq);
1507 if (ret)
31f9690e 1508 return ret;
f2298c04
JA
1509 nullb->nr_queues++;
1510 }
2d263a78 1511 return 0;
f2298c04
JA
1512}
1513
9ae2d0aa 1514static int null_gendisk_register(struct nullb *nullb)
f2298c04
JA
1515{
1516 struct gendisk *disk;
f2298c04 1517 sector_t size;
9ae2d0aa 1518
2984c868 1519 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
9ae2d0aa
MB
1520 if (!disk)
1521 return -ENOMEM;
2984c868 1522 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
9ae2d0aa
MB
1523 set_capacity(disk, size >> 9);
1524
1525 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1526 disk->major = null_major;
1527 disk->first_minor = nullb->index;
1528 disk->fops = &null_fops;
1529 disk->private_data = nullb;
1530 disk->queue = nullb->q;
1531 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1532
bf505456
DLM
1533 if (nullb->dev->zoned) {
1534 int ret = blk_revalidate_disk_zones(disk);
1535
1536 if (ret != 0)
1537 return ret;
1538 }
1539
9ae2d0aa
MB
1540 add_disk(disk);
1541 return 0;
1542}
1543
2984c868 1544static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
82f402fe
JA
1545{
1546 set->ops = &null_mq_ops;
2984c868
SL
1547 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1548 g_submit_queues;
1549 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1550 g_hw_queue_depth;
1551 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
82f402fe
JA
1552 set->cmd_size = sizeof(struct nullb_cmd);
1553 set->flags = BLK_MQ_F_SHOULD_MERGE;
b3cffc38 1554 if (g_no_sched)
1555 set->flags |= BLK_MQ_F_NO_SCHED;
82f402fe
JA
1556 set->driver_data = NULL;
1557
0d06a42f 1558 if ((nullb && nullb->dev->blocking) || g_blocking)
82f402fe
JA
1559 set->flags |= BLK_MQ_F_BLOCKING;
1560
1561 return blk_mq_alloc_tag_set(set);
1562}
1563
cedcafad
SL
1564static void null_validate_conf(struct nullb_device *dev)
1565{
1566 dev->blocksize = round_down(dev->blocksize, 512);
1567 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
cedcafad
SL
1568
1569 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1570 if (dev->submit_queues != nr_online_nodes)
1571 dev->submit_queues = nr_online_nodes;
1572 } else if (dev->submit_queues > nr_cpu_ids)
1573 dev->submit_queues = nr_cpu_ids;
1574 else if (dev->submit_queues == 0)
1575 dev->submit_queues = 1;
1576
1577 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1578 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
5bcd0e0c
SL
1579
1580 /* Do memory allocation, so set blocking */
1581 if (dev->memory_backed)
1582 dev->blocking = true;
deb78b41
SL
1583 else /* cache is meaningless */
1584 dev->cache_size = 0;
1585 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1586 dev->cache_size);
eff2c4f1
SL
1587 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1588 /* can not stop a queue */
1589 if (dev->queue_mode == NULL_Q_BIO)
1590 dev->mbps = 0;
cedcafad
SL
1591}
1592
33f782c4 1593#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
24941b90
JA
1594static bool __null_setup_fault(struct fault_attr *attr, char *str)
1595{
1596 if (!str[0])
93b57046
JA
1597 return true;
1598
24941b90 1599 if (!setup_fault_attr(attr, str))
93b57046
JA
1600 return false;
1601
24941b90
JA
1602 attr->verbose = 0;
1603 return true;
1604}
1605#endif
1606
1607static bool null_setup_fault(void)
1608{
1609#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1610 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1611 return false;
1612 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1613 return false;
33f782c4 1614#endif
93b57046
JA
1615 return true;
1616}
1617
2984c868 1618static int null_add_dev(struct nullb_device *dev)
9ae2d0aa
MB
1619{
1620 struct nullb *nullb;
dc501dc0 1621 int rv;
f2298c04 1622
cedcafad
SL
1623 null_validate_conf(dev);
1624
2984c868 1625 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
dc501dc0
RE
1626 if (!nullb) {
1627 rv = -ENOMEM;
24d2f903 1628 goto out;
dc501dc0 1629 }
2984c868
SL
1630 nullb->dev = dev;
1631 dev->nullb = nullb;
f2298c04
JA
1632
1633 spin_lock_init(&nullb->lock);
1634
dc501dc0
RE
1635 rv = setup_queues(nullb);
1636 if (rv)
24d2f903 1637 goto out_free_nullb;
f2298c04 1638
2984c868 1639 if (dev->queue_mode == NULL_Q_MQ) {
82f402fe
JA
1640 if (shared_tags) {
1641 nullb->tag_set = &tag_set;
1642 rv = 0;
1643 } else {
1644 nullb->tag_set = &nullb->__tag_set;
2984c868 1645 rv = null_init_tag_set(nullb, nullb->tag_set);
82f402fe
JA
1646 }
1647
dc501dc0 1648 if (rv)
24d2f903
CH
1649 goto out_cleanup_queues;
1650
93b57046
JA
1651 if (!null_setup_fault())
1652 goto out_cleanup_queues;
1653
5448aca4 1654 nullb->tag_set->timeout = 5 * HZ;
82f402fe 1655 nullb->q = blk_mq_init_queue(nullb->tag_set);
35b489d3 1656 if (IS_ERR(nullb->q)) {
dc501dc0 1657 rv = -ENOMEM;
24d2f903 1658 goto out_cleanup_tags;
dc501dc0 1659 }
82f402fe 1660 null_init_queues(nullb);
2984c868 1661 } else if (dev->queue_mode == NULL_Q_BIO) {
6d469642 1662 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
dc501dc0
RE
1663 if (!nullb->q) {
1664 rv = -ENOMEM;
24d2f903 1665 goto out_cleanup_queues;
dc501dc0 1666 }
f2298c04 1667 blk_queue_make_request(nullb->q, null_queue_bio);
31f9690e
JK
1668 rv = init_driver_queues(nullb);
1669 if (rv)
1670 goto out_cleanup_blk_queue;
f2298c04
JA
1671 }
1672
eff2c4f1
SL
1673 if (dev->mbps) {
1674 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1675 nullb_setup_bwtimer(nullb);
1676 }
1677
deb78b41
SL
1678 if (dev->cache_size > 0) {
1679 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1680 blk_queue_write_cache(nullb->q, true, true);
deb78b41
SL
1681 }
1682
ca4b2a01
MB
1683 if (dev->zoned) {
1684 rv = null_zone_init(dev);
1685 if (rv)
1686 goto out_cleanup_blk_queue;
1687
1688 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
1689 nullb->q->limits.zoned = BLK_ZONED_HM;
1690 }
1691
f2298c04 1692 nullb->q->queuedata = nullb;
8b904b5b
BVA
1693 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1694 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
f2298c04 1695
f2298c04 1696 mutex_lock(&lock);
94bc02e3 1697 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
cedcafad 1698 dev->index = nullb->index;
f2298c04
JA
1699 mutex_unlock(&lock);
1700
2984c868
SL
1701 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1702 blk_queue_physical_block_size(nullb->q, dev->blocksize);
f2298c04 1703
306eb6b4 1704 null_config_discard(nullb);
f2298c04 1705
b2b7e001
MB
1706 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1707
74ede5af 1708 rv = null_gendisk_register(nullb);
9ae2d0aa 1709 if (rv)
ca4b2a01 1710 goto out_cleanup_zone;
a514379b
MB
1711
1712 mutex_lock(&lock);
1713 list_add_tail(&nullb->list, &nullb_list);
1714 mutex_unlock(&lock);
3681c85d 1715
f2298c04 1716 return 0;
ca4b2a01
MB
1717out_cleanup_zone:
1718 if (dev->zoned)
1719 null_zone_exit(dev);
24d2f903
CH
1720out_cleanup_blk_queue:
1721 blk_cleanup_queue(nullb->q);
1722out_cleanup_tags:
2984c868 1723 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
82f402fe 1724 blk_mq_free_tag_set(nullb->tag_set);
24d2f903
CH
1725out_cleanup_queues:
1726 cleanup_queues(nullb);
1727out_free_nullb:
1728 kfree(nullb);
1729out:
dc501dc0 1730 return rv;
f2298c04
JA
1731}
1732
1733static int __init null_init(void)
1734{
af096e22 1735 int ret = 0;
f2298c04 1736 unsigned int i;
af096e22 1737 struct nullb *nullb;
2984c868 1738 struct nullb_device *dev;
f2298c04 1739
2984c868 1740 if (g_bs > PAGE_SIZE) {
9967d8ac
R
1741 pr_warn("null_blk: invalid block size\n");
1742 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
2984c868 1743 g_bs = PAGE_SIZE;
9967d8ac 1744 }
f2298c04 1745
ca4b2a01
MB
1746 if (!is_power_of_2(g_zone_size)) {
1747 pr_err("null_blk: zone_size must be power-of-two\n");
1748 return -EINVAL;
1749 }
1750
e50b1e32
JA
1751 if (g_queue_mode == NULL_Q_RQ) {
1752 pr_err("null_blk: legacy IO path no longer available\n");
1753 return -EINVAL;
1754 }
2984c868
SL
1755 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1756 if (g_submit_queues != nr_online_nodes) {
558ab300 1757 pr_warn("null_blk: submit_queues param is set to %u.\n",
d15ee6b1 1758 nr_online_nodes);
2984c868 1759 g_submit_queues = nr_online_nodes;
fc1bc354 1760 }
2984c868
SL
1761 } else if (g_submit_queues > nr_cpu_ids)
1762 g_submit_queues = nr_cpu_ids;
1763 else if (g_submit_queues <= 0)
1764 g_submit_queues = 1;
f2298c04 1765
2984c868
SL
1766 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1767 ret = null_init_tag_set(NULL, &tag_set);
db2d153d
MG
1768 if (ret)
1769 return ret;
1770 }
1771
3bf2bd20
SL
1772 config_group_init(&nullb_subsys.su_group);
1773 mutex_init(&nullb_subsys.su_mutex);
1774
1775 ret = configfs_register_subsystem(&nullb_subsys);
1776 if (ret)
1777 goto err_tagset;
1778
f2298c04
JA
1779 mutex_init(&lock);
1780
f2298c04 1781 null_major = register_blkdev(0, "nullb");
db2d153d
MG
1782 if (null_major < 0) {
1783 ret = null_major;
3bf2bd20 1784 goto err_conf;
db2d153d 1785 }
f2298c04
JA
1786
1787 for (i = 0; i < nr_devices; i++) {
2984c868 1788 dev = null_alloc_dev();
30c516d7
WY
1789 if (!dev) {
1790 ret = -ENOMEM;
2984c868 1791 goto err_dev;
30c516d7 1792 }
2984c868
SL
1793 ret = null_add_dev(dev);
1794 if (ret) {
1795 null_free_dev(dev);
af096e22 1796 goto err_dev;
2984c868 1797 }
f2298c04
JA
1798 }
1799
1800 pr_info("null: module loaded\n");
1801 return 0;
af096e22
MH
1802
1803err_dev:
1804 while (!list_empty(&nullb_list)) {
1805 nullb = list_entry(nullb_list.next, struct nullb, list);
2984c868 1806 dev = nullb->dev;
af096e22 1807 null_del_dev(nullb);
2984c868 1808 null_free_dev(dev);
af096e22 1809 }
af096e22 1810 unregister_blkdev(null_major, "nullb");
3bf2bd20
SL
1811err_conf:
1812 configfs_unregister_subsystem(&nullb_subsys);
db2d153d 1813err_tagset:
2984c868 1814 if (g_queue_mode == NULL_Q_MQ && shared_tags)
db2d153d 1815 blk_mq_free_tag_set(&tag_set);
af096e22 1816 return ret;
f2298c04
JA
1817}
1818
1819static void __exit null_exit(void)
1820{
1821 struct nullb *nullb;
1822
3bf2bd20
SL
1823 configfs_unregister_subsystem(&nullb_subsys);
1824
f2298c04
JA
1825 unregister_blkdev(null_major, "nullb");
1826
1827 mutex_lock(&lock);
1828 while (!list_empty(&nullb_list)) {
2984c868
SL
1829 struct nullb_device *dev;
1830
f2298c04 1831 nullb = list_entry(nullb_list.next, struct nullb, list);
2984c868 1832 dev = nullb->dev;
f2298c04 1833 null_del_dev(nullb);
2984c868 1834 null_free_dev(dev);
f2298c04
JA
1835 }
1836 mutex_unlock(&lock);
6bb9535b 1837
2984c868 1838 if (g_queue_mode == NULL_Q_MQ && shared_tags)
82f402fe 1839 blk_mq_free_tag_set(&tag_set);
f2298c04
JA
1840}
1841
1842module_init(null_init);
1843module_exit(null_exit);
1844
231b3db1 1845MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
f2298c04 1846MODULE_LICENSE("GPL");