]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/null_blk.c
drm/i915: make mappable struct resource centric
[mirror_ubuntu-bionic-kernel.git] / drivers / block / null_blk.c
CommitLineData
3bf2bd20
SL
1/*
2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
4 */
f2298c04 5#include <linux/module.h>
fc1bc354 6
f2298c04
JA
7#include <linux/moduleparam.h>
8#include <linux/sched.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/blk-mq.h>
14#include <linux/hrtimer.h>
b2b7e001 15#include <linux/lightnvm.h>
3bf2bd20 16#include <linux/configfs.h>
2f54a613 17#include <linux/badblocks.h>
f2298c04 18
5bcd0e0c
SL
19#define SECTOR_SHIFT 9
20#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
21#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
22#define SECTOR_SIZE (1 << SECTOR_SHIFT)
23#define SECTOR_MASK (PAGE_SECTORS - 1)
24
25#define FREE_BATCH 16
26
eff2c4f1
SL
27#define TICKS_PER_SEC 50ULL
28#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
29
30static inline u64 mb_per_tick(int mbps)
31{
32 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
33}
f2298c04
JA
34
35struct nullb_cmd {
36 struct list_head list;
37 struct llist_node ll_list;
0864fe09 38 struct __call_single_data csd;
f2298c04
JA
39 struct request *rq;
40 struct bio *bio;
41 unsigned int tag;
0864fe09 42 blk_status_t error;
f2298c04 43 struct nullb_queue *nq;
3c395a96 44 struct hrtimer timer;
f2298c04
JA
45};
46
47struct nullb_queue {
48 unsigned long *tag_map;
49 wait_queue_head_t wait;
50 unsigned int queue_depth;
2984c868 51 struct nullb_device *dev;
f2298c04
JA
52
53 struct nullb_cmd *cmds;
54};
55
3bf2bd20
SL
56/*
57 * Status flags for nullb_device.
58 *
59 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
60 * UP: Device is currently on and visible in userspace.
eff2c4f1 61 * THROTTLED: Device is being throttled.
deb78b41 62 * CACHE: Device is using a write-back cache.
3bf2bd20
SL
63 */
64enum nullb_device_flags {
65 NULLB_DEV_FL_CONFIGURED = 0,
66 NULLB_DEV_FL_UP = 1,
eff2c4f1 67 NULLB_DEV_FL_THROTTLED = 2,
deb78b41 68 NULLB_DEV_FL_CACHE = 3,
3bf2bd20
SL
69};
70
9a7e1b43 71#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
5bcd0e0c
SL
72/*
73 * nullb_page is a page in memory for nullb devices.
74 *
75 * @page: The page holding the data.
76 * @bitmap: The bitmap represents which sector in the page has data.
77 * Each bit represents one block size. For example, sector 8
78 * will use the 7th bit
deb78b41
SL
79 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
80 * page is being flushing to storage. FREE means the cache page is freed and
81 * should be skipped from flushing to storage. Please see
82 * null_make_cache_space
5bcd0e0c
SL
83 */
84struct nullb_page {
85 struct page *page;
9a7e1b43 86 DECLARE_BITMAP(bitmap, MAP_SZ);
5bcd0e0c 87};
9a7e1b43
ML
88#define NULLB_PAGE_LOCK (MAP_SZ - 1)
89#define NULLB_PAGE_FREE (MAP_SZ - 2)
5bcd0e0c 90
2984c868
SL
91struct nullb_device {
92 struct nullb *nullb;
3bf2bd20 93 struct config_item item;
5bcd0e0c 94 struct radix_tree_root data; /* data stored in the disk */
deb78b41 95 struct radix_tree_root cache; /* disk cache data */
3bf2bd20 96 unsigned long flags; /* device flags */
deb78b41 97 unsigned int curr_cache;
2f54a613 98 struct badblocks badblocks;
2984c868
SL
99
100 unsigned long size; /* device size in MB */
101 unsigned long completion_nsec; /* time in ns to complete a request */
deb78b41 102 unsigned long cache_size; /* disk cache size in MB */
2984c868
SL
103 unsigned int submit_queues; /* number of submission queues */
104 unsigned int home_node; /* home node for the device */
105 unsigned int queue_mode; /* block interface */
106 unsigned int blocksize; /* block size */
107 unsigned int irqmode; /* IRQ completion handler */
108 unsigned int hw_queue_depth; /* queue depth */
cedcafad 109 unsigned int index; /* index of the disk, only valid with a disk */
eff2c4f1 110 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
2984c868
SL
111 bool use_lightnvm; /* register as a LightNVM device */
112 bool blocking; /* blocking blk-mq device */
113 bool use_per_node_hctx; /* use per-node allocation for hardware context */
cedcafad 114 bool power; /* power on/off the device */
5bcd0e0c 115 bool memory_backed; /* if data is stored in memory */
306eb6b4 116 bool discard; /* if support discard */
2984c868
SL
117};
118
f2298c04 119struct nullb {
2984c868 120 struct nullb_device *dev;
f2298c04
JA
121 struct list_head list;
122 unsigned int index;
123 struct request_queue *q;
124 struct gendisk *disk;
b0b4e09c 125 struct nvm_dev *ndev;
82f402fe
JA
126 struct blk_mq_tag_set *tag_set;
127 struct blk_mq_tag_set __tag_set;
f2298c04 128 unsigned int queue_depth;
eff2c4f1
SL
129 atomic_long_t cur_bytes;
130 struct hrtimer bw_timer;
deb78b41 131 unsigned long cache_flush_pos;
f2298c04
JA
132 spinlock_t lock;
133
134 struct nullb_queue *queues;
135 unsigned int nr_queues;
b2b7e001 136 char disk_name[DISK_NAME_LEN];
f2298c04
JA
137};
138
139static LIST_HEAD(nullb_list);
140static struct mutex lock;
141static int null_major;
94bc02e3 142static DEFINE_IDA(nullb_indexes);
6bb9535b 143static struct kmem_cache *ppa_cache;
82f402fe 144static struct blk_mq_tag_set tag_set;
f2298c04 145
f2298c04
JA
146enum {
147 NULL_IRQ_NONE = 0,
148 NULL_IRQ_SOFTIRQ = 1,
149 NULL_IRQ_TIMER = 2,
ce2c350b 150};
f2298c04 151
ce2c350b 152enum {
f2298c04
JA
153 NULL_Q_BIO = 0,
154 NULL_Q_RQ = 1,
155 NULL_Q_MQ = 2,
156};
157
b3cffc38 158static int g_no_sched;
159module_param_named(no_sched, g_no_sched, int, S_IRUGO);
160MODULE_PARM_DESC(no_sched, "No io scheduler");
161
2984c868
SL
162static int g_submit_queues = 1;
163module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
f2298c04
JA
164MODULE_PARM_DESC(submit_queues, "Number of submission queues");
165
2984c868
SL
166static int g_home_node = NUMA_NO_NODE;
167module_param_named(home_node, g_home_node, int, S_IRUGO);
f2298c04
JA
168MODULE_PARM_DESC(home_node, "Home node for the device");
169
2984c868 170static int g_queue_mode = NULL_Q_MQ;
709c8667
MB
171
172static int null_param_store_val(const char *str, int *val, int min, int max)
173{
174 int ret, new_val;
175
176 ret = kstrtoint(str, 10, &new_val);
177 if (ret)
178 return -EINVAL;
179
180 if (new_val < min || new_val > max)
181 return -EINVAL;
182
183 *val = new_val;
184 return 0;
185}
186
187static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
188{
2984c868 189 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
709c8667
MB
190}
191
9c27847d 192static const struct kernel_param_ops null_queue_mode_param_ops = {
709c8667
MB
193 .set = null_set_queue_mode,
194 .get = param_get_int,
195};
196
2984c868 197device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
54ae81cd 198MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
f2298c04 199
2984c868
SL
200static int g_gb = 250;
201module_param_named(gb, g_gb, int, S_IRUGO);
f2298c04
JA
202MODULE_PARM_DESC(gb, "Size in GB");
203
2984c868
SL
204static int g_bs = 512;
205module_param_named(bs, g_bs, int, S_IRUGO);
f2298c04
JA
206MODULE_PARM_DESC(bs, "Block size (in bytes)");
207
82f402fe 208static int nr_devices = 1;
f2298c04
JA
209module_param(nr_devices, int, S_IRUGO);
210MODULE_PARM_DESC(nr_devices, "Number of devices to register");
211
2984c868
SL
212static bool g_use_lightnvm;
213module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
b2b7e001
MB
214MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
215
2984c868
SL
216static bool g_blocking;
217module_param_named(blocking, g_blocking, bool, S_IRUGO);
db5bcf87
JA
218MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
219
82f402fe
JA
220static bool shared_tags;
221module_param(shared_tags, bool, S_IRUGO);
222MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
223
2984c868 224static int g_irqmode = NULL_IRQ_SOFTIRQ;
709c8667
MB
225
226static int null_set_irqmode(const char *str, const struct kernel_param *kp)
227{
2984c868 228 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
709c8667
MB
229 NULL_IRQ_TIMER);
230}
231
9c27847d 232static const struct kernel_param_ops null_irqmode_param_ops = {
709c8667
MB
233 .set = null_set_irqmode,
234 .get = param_get_int,
235};
236
2984c868 237device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
f2298c04
JA
238MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
239
2984c868
SL
240static unsigned long g_completion_nsec = 10000;
241module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
f2298c04
JA
242MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
243
2984c868
SL
244static int g_hw_queue_depth = 64;
245module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
f2298c04
JA
246MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
247
2984c868
SL
248static bool g_use_per_node_hctx;
249module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
20005244 250MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
f2298c04 251
3bf2bd20
SL
252static struct nullb_device *null_alloc_dev(void);
253static void null_free_dev(struct nullb_device *dev);
cedcafad
SL
254static void null_del_dev(struct nullb *nullb);
255static int null_add_dev(struct nullb_device *dev);
deb78b41 256static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
3bf2bd20
SL
257
258static inline struct nullb_device *to_nullb_device(struct config_item *item)
259{
260 return item ? container_of(item, struct nullb_device, item) : NULL;
261}
262
263static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
264{
265 return snprintf(page, PAGE_SIZE, "%u\n", val);
266}
267
268static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
269 char *page)
270{
271 return snprintf(page, PAGE_SIZE, "%lu\n", val);
272}
273
274static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
275{
276 return snprintf(page, PAGE_SIZE, "%u\n", val);
277}
278
279static ssize_t nullb_device_uint_attr_store(unsigned int *val,
280 const char *page, size_t count)
281{
282 unsigned int tmp;
283 int result;
284
285 result = kstrtouint(page, 0, &tmp);
286 if (result)
287 return result;
288
289 *val = tmp;
290 return count;
291}
292
293static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
294 const char *page, size_t count)
295{
296 int result;
297 unsigned long tmp;
298
299 result = kstrtoul(page, 0, &tmp);
300 if (result)
301 return result;
302
303 *val = tmp;
304 return count;
305}
306
307static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
308 size_t count)
309{
310 bool tmp;
311 int result;
312
313 result = kstrtobool(page, &tmp);
314 if (result)
315 return result;
316
317 *val = tmp;
318 return count;
319}
320
321/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
322#define NULLB_DEVICE_ATTR(NAME, TYPE) \
323static ssize_t \
324nullb_device_##NAME##_show(struct config_item *item, char *page) \
325{ \
326 return nullb_device_##TYPE##_attr_show( \
327 to_nullb_device(item)->NAME, page); \
328} \
329static ssize_t \
330nullb_device_##NAME##_store(struct config_item *item, const char *page, \
331 size_t count) \
332{ \
333 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
334 return -EBUSY; \
335 return nullb_device_##TYPE##_attr_store( \
336 &to_nullb_device(item)->NAME, page, count); \
337} \
338CONFIGFS_ATTR(nullb_device_, NAME);
339
340NULLB_DEVICE_ATTR(size, ulong);
341NULLB_DEVICE_ATTR(completion_nsec, ulong);
342NULLB_DEVICE_ATTR(submit_queues, uint);
343NULLB_DEVICE_ATTR(home_node, uint);
344NULLB_DEVICE_ATTR(queue_mode, uint);
345NULLB_DEVICE_ATTR(blocksize, uint);
346NULLB_DEVICE_ATTR(irqmode, uint);
347NULLB_DEVICE_ATTR(hw_queue_depth, uint);
cedcafad 348NULLB_DEVICE_ATTR(index, uint);
3bf2bd20
SL
349NULLB_DEVICE_ATTR(use_lightnvm, bool);
350NULLB_DEVICE_ATTR(blocking, bool);
351NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
5bcd0e0c 352NULLB_DEVICE_ATTR(memory_backed, bool);
306eb6b4 353NULLB_DEVICE_ATTR(discard, bool);
eff2c4f1 354NULLB_DEVICE_ATTR(mbps, uint);
deb78b41 355NULLB_DEVICE_ATTR(cache_size, ulong);
3bf2bd20 356
cedcafad
SL
357static ssize_t nullb_device_power_show(struct config_item *item, char *page)
358{
359 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
360}
361
362static ssize_t nullb_device_power_store(struct config_item *item,
363 const char *page, size_t count)
364{
365 struct nullb_device *dev = to_nullb_device(item);
366 bool newp = false;
367 ssize_t ret;
368
369 ret = nullb_device_bool_attr_store(&newp, page, count);
370 if (ret < 0)
371 return ret;
372
373 if (!dev->power && newp) {
374 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
375 return count;
376 if (null_add_dev(dev)) {
377 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
378 return -ENOMEM;
379 }
380
381 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
382 dev->power = newp;
b3c30512 383 } else if (dev->power && !newp) {
cedcafad
SL
384 mutex_lock(&lock);
385 dev->power = newp;
386 null_del_dev(dev->nullb);
387 mutex_unlock(&lock);
388 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
389 }
390
391 return count;
392}
393
394CONFIGFS_ATTR(nullb_device_, power);
395
2f54a613
SL
396static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
397{
398 struct nullb_device *t_dev = to_nullb_device(item);
399
400 return badblocks_show(&t_dev->badblocks, page, 0);
401}
402
403static ssize_t nullb_device_badblocks_store(struct config_item *item,
404 const char *page, size_t count)
405{
406 struct nullb_device *t_dev = to_nullb_device(item);
407 char *orig, *buf, *tmp;
408 u64 start, end;
409 int ret;
410
411 orig = kstrndup(page, count, GFP_KERNEL);
412 if (!orig)
413 return -ENOMEM;
414
415 buf = strstrip(orig);
416
417 ret = -EINVAL;
418 if (buf[0] != '+' && buf[0] != '-')
419 goto out;
420 tmp = strchr(&buf[1], '-');
421 if (!tmp)
422 goto out;
423 *tmp = '\0';
424 ret = kstrtoull(buf + 1, 0, &start);
425 if (ret)
426 goto out;
427 ret = kstrtoull(tmp + 1, 0, &end);
428 if (ret)
429 goto out;
430 ret = -EINVAL;
431 if (start > end)
432 goto out;
433 /* enable badblocks */
434 cmpxchg(&t_dev->badblocks.shift, -1, 0);
435 if (buf[0] == '+')
436 ret = badblocks_set(&t_dev->badblocks, start,
437 end - start + 1, 1);
438 else
439 ret = badblocks_clear(&t_dev->badblocks, start,
440 end - start + 1);
441 if (ret == 0)
442 ret = count;
443out:
444 kfree(orig);
445 return ret;
446}
447CONFIGFS_ATTR(nullb_device_, badblocks);
448
3bf2bd20
SL
449static struct configfs_attribute *nullb_device_attrs[] = {
450 &nullb_device_attr_size,
451 &nullb_device_attr_completion_nsec,
452 &nullb_device_attr_submit_queues,
453 &nullb_device_attr_home_node,
454 &nullb_device_attr_queue_mode,
455 &nullb_device_attr_blocksize,
456 &nullb_device_attr_irqmode,
457 &nullb_device_attr_hw_queue_depth,
cedcafad 458 &nullb_device_attr_index,
3bf2bd20
SL
459 &nullb_device_attr_use_lightnvm,
460 &nullb_device_attr_blocking,
461 &nullb_device_attr_use_per_node_hctx,
cedcafad 462 &nullb_device_attr_power,
5bcd0e0c 463 &nullb_device_attr_memory_backed,
306eb6b4 464 &nullb_device_attr_discard,
eff2c4f1 465 &nullb_device_attr_mbps,
deb78b41 466 &nullb_device_attr_cache_size,
2f54a613 467 &nullb_device_attr_badblocks,
3bf2bd20
SL
468 NULL,
469};
470
471static void nullb_device_release(struct config_item *item)
472{
5bcd0e0c
SL
473 struct nullb_device *dev = to_nullb_device(item);
474
deb78b41 475 null_free_device_storage(dev, false);
5bcd0e0c 476 null_free_dev(dev);
3bf2bd20
SL
477}
478
479static struct configfs_item_operations nullb_device_ops = {
480 .release = nullb_device_release,
481};
482
e1919dff 483static const struct config_item_type nullb_device_type = {
3bf2bd20
SL
484 .ct_item_ops = &nullb_device_ops,
485 .ct_attrs = nullb_device_attrs,
486 .ct_owner = THIS_MODULE,
487};
488
489static struct
490config_item *nullb_group_make_item(struct config_group *group, const char *name)
491{
492 struct nullb_device *dev;
493
494 dev = null_alloc_dev();
495 if (!dev)
496 return ERR_PTR(-ENOMEM);
497
498 config_item_init_type_name(&dev->item, name, &nullb_device_type);
499
500 return &dev->item;
501}
502
503static void
504nullb_group_drop_item(struct config_group *group, struct config_item *item)
505{
cedcafad
SL
506 struct nullb_device *dev = to_nullb_device(item);
507
508 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
509 mutex_lock(&lock);
510 dev->power = false;
511 null_del_dev(dev->nullb);
512 mutex_unlock(&lock);
513 }
514
3bf2bd20
SL
515 config_item_put(item);
516}
517
518static ssize_t memb_group_features_show(struct config_item *item, char *page)
519{
2f54a613 520 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
3bf2bd20
SL
521}
522
523CONFIGFS_ATTR_RO(memb_group_, features);
524
525static struct configfs_attribute *nullb_group_attrs[] = {
526 &memb_group_attr_features,
527 NULL,
528};
529
530static struct configfs_group_operations nullb_group_ops = {
531 .make_item = nullb_group_make_item,
532 .drop_item = nullb_group_drop_item,
533};
534
e1919dff 535static const struct config_item_type nullb_group_type = {
3bf2bd20
SL
536 .ct_group_ops = &nullb_group_ops,
537 .ct_attrs = nullb_group_attrs,
538 .ct_owner = THIS_MODULE,
539};
540
541static struct configfs_subsystem nullb_subsys = {
542 .su_group = {
543 .cg_item = {
544 .ci_namebuf = "nullb",
545 .ci_type = &nullb_group_type,
546 },
547 },
548};
549
deb78b41
SL
550static inline int null_cache_active(struct nullb *nullb)
551{
552 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
553}
554
2984c868
SL
555static struct nullb_device *null_alloc_dev(void)
556{
557 struct nullb_device *dev;
558
559 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
560 if (!dev)
561 return NULL;
5bcd0e0c 562 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
deb78b41 563 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
2f54a613
SL
564 if (badblocks_init(&dev->badblocks, 0)) {
565 kfree(dev);
566 return NULL;
567 }
568
2984c868
SL
569 dev->size = g_gb * 1024;
570 dev->completion_nsec = g_completion_nsec;
571 dev->submit_queues = g_submit_queues;
572 dev->home_node = g_home_node;
573 dev->queue_mode = g_queue_mode;
574 dev->blocksize = g_bs;
575 dev->irqmode = g_irqmode;
576 dev->hw_queue_depth = g_hw_queue_depth;
577 dev->use_lightnvm = g_use_lightnvm;
578 dev->blocking = g_blocking;
579 dev->use_per_node_hctx = g_use_per_node_hctx;
580 return dev;
581}
582
583static void null_free_dev(struct nullb_device *dev)
584{
1addb798
DD
585 if (!dev)
586 return;
587
588 badblocks_exit(&dev->badblocks);
2984c868
SL
589 kfree(dev);
590}
591
f2298c04
JA
592static void put_tag(struct nullb_queue *nq, unsigned int tag)
593{
594 clear_bit_unlock(tag, nq->tag_map);
595
596 if (waitqueue_active(&nq->wait))
597 wake_up(&nq->wait);
598}
599
600static unsigned int get_tag(struct nullb_queue *nq)
601{
602 unsigned int tag;
603
604 do {
605 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
606 if (tag >= nq->queue_depth)
607 return -1U;
608 } while (test_and_set_bit_lock(tag, nq->tag_map));
609
610 return tag;
611}
612
613static void free_cmd(struct nullb_cmd *cmd)
614{
615 put_tag(cmd->nq, cmd->tag);
616}
617
3c395a96
PV
618static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
619
f2298c04
JA
620static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
621{
622 struct nullb_cmd *cmd;
623 unsigned int tag;
624
625 tag = get_tag(nq);
626 if (tag != -1U) {
627 cmd = &nq->cmds[tag];
628 cmd->tag = tag;
629 cmd->nq = nq;
2984c868 630 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
3c395a96
PV
631 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
632 HRTIMER_MODE_REL);
633 cmd->timer.function = null_cmd_timer_expired;
634 }
f2298c04
JA
635 return cmd;
636 }
637
638 return NULL;
639}
640
641static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
642{
643 struct nullb_cmd *cmd;
644 DEFINE_WAIT(wait);
645
646 cmd = __alloc_cmd(nq);
647 if (cmd || !can_wait)
648 return cmd;
649
650 do {
651 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
652 cmd = __alloc_cmd(nq);
653 if (cmd)
654 break;
655
656 io_schedule();
657 } while (1);
658
659 finish_wait(&nq->wait, &wait);
660 return cmd;
661}
662
663static void end_cmd(struct nullb_cmd *cmd)
664{
cf8ecc5a 665 struct request_queue *q = NULL;
2984c868 666 int queue_mode = cmd->nq->dev->queue_mode;
cf8ecc5a 667
e8271201
MK
668 if (cmd->rq)
669 q = cmd->rq->q;
670
ce2c350b
CH
671 switch (queue_mode) {
672 case NULL_Q_MQ:
5bcd0e0c 673 blk_mq_end_request(cmd->rq, cmd->error);
ce2c350b
CH
674 return;
675 case NULL_Q_RQ:
676 INIT_LIST_HEAD(&cmd->rq->queuelist);
5bcd0e0c 677 blk_end_request_all(cmd->rq, cmd->error);
ce2c350b
CH
678 break;
679 case NULL_Q_BIO:
5bcd0e0c 680 cmd->bio->bi_status = cmd->error;
4246a0b6 681 bio_endio(cmd->bio);
48cc661e 682 break;
ce2c350b 683 }
f2298c04 684
48cc661e
JA
685 free_cmd(cmd);
686
cf8ecc5a 687 /* Restart queue if needed, as we are freeing a tag */
48cc661e 688 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
cf8ecc5a
AA
689 unsigned long flags;
690
691 spin_lock_irqsave(q->queue_lock, flags);
48cc661e 692 blk_start_queue_async(q);
cf8ecc5a 693 spin_unlock_irqrestore(q->queue_lock, flags);
f2298c04 694 }
cf8ecc5a
AA
695}
696
697static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
698{
699 end_cmd(container_of(timer, struct nullb_cmd, timer));
f2298c04
JA
700
701 return HRTIMER_NORESTART;
702}
703
704static void null_cmd_end_timer(struct nullb_cmd *cmd)
705{
2984c868 706 ktime_t kt = cmd->nq->dev->completion_nsec;
f2298c04 707
3c395a96 708 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
f2298c04
JA
709}
710
711static void null_softirq_done_fn(struct request *rq)
712{
2984c868
SL
713 struct nullb *nullb = rq->q->queuedata;
714
715 if (nullb->dev->queue_mode == NULL_Q_MQ)
d891fa70
JA
716 end_cmd(blk_mq_rq_to_pdu(rq));
717 else
718 end_cmd(rq->special);
f2298c04
JA
719}
720
5bcd0e0c
SL
721static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
722{
723 struct nullb_page *t_page;
724
725 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
726 if (!t_page)
727 goto out;
728
729 t_page->page = alloc_pages(gfp_flags, 0);
730 if (!t_page->page)
731 goto out_freepage;
732
9a7e1b43 733 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
5bcd0e0c
SL
734 return t_page;
735out_freepage:
736 kfree(t_page);
737out:
738 return NULL;
739}
740
741static void null_free_page(struct nullb_page *t_page)
742{
9a7e1b43
ML
743 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
744 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
deb78b41 745 return;
5bcd0e0c
SL
746 __free_page(t_page->page);
747 kfree(t_page);
748}
749
9a7e1b43
ML
750static bool null_page_empty(struct nullb_page *page)
751{
752 int size = MAP_SZ - 2;
753
754 return find_first_bit(page->bitmap, size) == size;
755}
756
deb78b41
SL
757static void null_free_sector(struct nullb *nullb, sector_t sector,
758 bool is_cache)
5bcd0e0c
SL
759{
760 unsigned int sector_bit;
761 u64 idx;
762 struct nullb_page *t_page, *ret;
763 struct radix_tree_root *root;
764
deb78b41 765 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
5bcd0e0c
SL
766 idx = sector >> PAGE_SECTORS_SHIFT;
767 sector_bit = (sector & SECTOR_MASK);
768
769 t_page = radix_tree_lookup(root, idx);
770 if (t_page) {
9a7e1b43 771 __clear_bit(sector_bit, t_page->bitmap);
5bcd0e0c 772
9a7e1b43 773 if (null_page_empty(t_page)) {
5bcd0e0c
SL
774 ret = radix_tree_delete_item(root, idx, t_page);
775 WARN_ON(ret != t_page);
776 null_free_page(ret);
deb78b41
SL
777 if (is_cache)
778 nullb->dev->curr_cache -= PAGE_SIZE;
5bcd0e0c
SL
779 }
780 }
781}
782
783static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
deb78b41 784 struct nullb_page *t_page, bool is_cache)
5bcd0e0c
SL
785{
786 struct radix_tree_root *root;
787
deb78b41 788 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
5bcd0e0c
SL
789
790 if (radix_tree_insert(root, idx, t_page)) {
791 null_free_page(t_page);
792 t_page = radix_tree_lookup(root, idx);
793 WARN_ON(!t_page || t_page->page->index != idx);
deb78b41
SL
794 } else if (is_cache)
795 nullb->dev->curr_cache += PAGE_SIZE;
5bcd0e0c
SL
796
797 return t_page;
798}
799
deb78b41 800static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
5bcd0e0c
SL
801{
802 unsigned long pos = 0;
803 int nr_pages;
804 struct nullb_page *ret, *t_pages[FREE_BATCH];
805 struct radix_tree_root *root;
806
deb78b41 807 root = is_cache ? &dev->cache : &dev->data;
5bcd0e0c
SL
808
809 do {
810 int i;
811
812 nr_pages = radix_tree_gang_lookup(root,
813 (void **)t_pages, pos, FREE_BATCH);
814
815 for (i = 0; i < nr_pages; i++) {
816 pos = t_pages[i]->page->index;
817 ret = radix_tree_delete_item(root, pos, t_pages[i]);
818 WARN_ON(ret != t_pages[i]);
819 null_free_page(ret);
820 }
821
822 pos++;
823 } while (nr_pages == FREE_BATCH);
deb78b41
SL
824
825 if (is_cache)
826 dev->curr_cache = 0;
5bcd0e0c
SL
827}
828
deb78b41
SL
829static struct nullb_page *__null_lookup_page(struct nullb *nullb,
830 sector_t sector, bool for_write, bool is_cache)
5bcd0e0c
SL
831{
832 unsigned int sector_bit;
833 u64 idx;
834 struct nullb_page *t_page;
deb78b41 835 struct radix_tree_root *root;
5bcd0e0c
SL
836
837 idx = sector >> PAGE_SECTORS_SHIFT;
838 sector_bit = (sector & SECTOR_MASK);
839
deb78b41
SL
840 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
841 t_page = radix_tree_lookup(root, idx);
5bcd0e0c
SL
842 WARN_ON(t_page && t_page->page->index != idx);
843
9a7e1b43 844 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
5bcd0e0c
SL
845 return t_page;
846
847 return NULL;
848}
849
deb78b41
SL
850static struct nullb_page *null_lookup_page(struct nullb *nullb,
851 sector_t sector, bool for_write, bool ignore_cache)
852{
853 struct nullb_page *page = NULL;
854
855 if (!ignore_cache)
856 page = __null_lookup_page(nullb, sector, for_write, true);
857 if (page)
858 return page;
859 return __null_lookup_page(nullb, sector, for_write, false);
860}
861
5bcd0e0c 862static struct nullb_page *null_insert_page(struct nullb *nullb,
deb78b41 863 sector_t sector, bool ignore_cache)
5bcd0e0c
SL
864{
865 u64 idx;
866 struct nullb_page *t_page;
867
deb78b41 868 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
5bcd0e0c
SL
869 if (t_page)
870 return t_page;
871
872 spin_unlock_irq(&nullb->lock);
873
874 t_page = null_alloc_page(GFP_NOIO);
875 if (!t_page)
876 goto out_lock;
877
878 if (radix_tree_preload(GFP_NOIO))
879 goto out_freepage;
880
881 spin_lock_irq(&nullb->lock);
882 idx = sector >> PAGE_SECTORS_SHIFT;
883 t_page->page->index = idx;
deb78b41 884 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
5bcd0e0c
SL
885 radix_tree_preload_end();
886
887 return t_page;
888out_freepage:
889 null_free_page(t_page);
890out_lock:
891 spin_lock_irq(&nullb->lock);
deb78b41
SL
892 return null_lookup_page(nullb, sector, true, ignore_cache);
893}
894
895static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
896{
897 int i;
898 unsigned int offset;
899 u64 idx;
900 struct nullb_page *t_page, *ret;
901 void *dst, *src;
902
903 idx = c_page->page->index;
904
905 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
906
9a7e1b43
ML
907 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
908 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
deb78b41 909 null_free_page(c_page);
9a7e1b43 910 if (t_page && null_page_empty(t_page)) {
deb78b41
SL
911 ret = radix_tree_delete_item(&nullb->dev->data,
912 idx, t_page);
913 null_free_page(t_page);
914 }
915 return 0;
916 }
917
918 if (!t_page)
919 return -ENOMEM;
920
921 src = kmap_atomic(c_page->page);
922 dst = kmap_atomic(t_page->page);
923
924 for (i = 0; i < PAGE_SECTORS;
925 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
9a7e1b43 926 if (test_bit(i, c_page->bitmap)) {
deb78b41
SL
927 offset = (i << SECTOR_SHIFT);
928 memcpy(dst + offset, src + offset,
929 nullb->dev->blocksize);
9a7e1b43 930 __set_bit(i, t_page->bitmap);
deb78b41
SL
931 }
932 }
933
934 kunmap_atomic(dst);
935 kunmap_atomic(src);
936
937 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
938 null_free_page(ret);
939 nullb->dev->curr_cache -= PAGE_SIZE;
940
941 return 0;
942}
943
944static int null_make_cache_space(struct nullb *nullb, unsigned long n)
f2298c04 945{
deb78b41
SL
946 int i, err, nr_pages;
947 struct nullb_page *c_pages[FREE_BATCH];
948 unsigned long flushed = 0, one_round;
949
950again:
951 if ((nullb->dev->cache_size * 1024 * 1024) >
952 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
953 return 0;
954
955 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
956 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
957 /*
958 * nullb_flush_cache_page could unlock before using the c_pages. To
959 * avoid race, we don't allow page free
960 */
961 for (i = 0; i < nr_pages; i++) {
962 nullb->cache_flush_pos = c_pages[i]->page->index;
963 /*
964 * We found the page which is being flushed to disk by other
965 * threads
966 */
9a7e1b43 967 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
deb78b41
SL
968 c_pages[i] = NULL;
969 else
9a7e1b43 970 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
deb78b41
SL
971 }
972
973 one_round = 0;
974 for (i = 0; i < nr_pages; i++) {
975 if (c_pages[i] == NULL)
976 continue;
977 err = null_flush_cache_page(nullb, c_pages[i]);
978 if (err)
979 return err;
980 one_round++;
981 }
982 flushed += one_round << PAGE_SHIFT;
983
984 if (n > flushed) {
985 if (nr_pages == 0)
986 nullb->cache_flush_pos = 0;
987 if (one_round == 0) {
988 /* give other threads a chance */
989 spin_unlock_irq(&nullb->lock);
990 spin_lock_irq(&nullb->lock);
991 }
992 goto again;
993 }
994 return 0;
5bcd0e0c
SL
995}
996
997static int copy_to_nullb(struct nullb *nullb, struct page *source,
deb78b41 998 unsigned int off, sector_t sector, size_t n, bool is_fua)
5bcd0e0c
SL
999{
1000 size_t temp, count = 0;
1001 unsigned int offset;
1002 struct nullb_page *t_page;
1003 void *dst, *src;
1004
1005 while (count < n) {
1006 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1007
deb78b41
SL
1008 if (null_cache_active(nullb) && !is_fua)
1009 null_make_cache_space(nullb, PAGE_SIZE);
1010
5bcd0e0c 1011 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
deb78b41
SL
1012 t_page = null_insert_page(nullb, sector,
1013 !null_cache_active(nullb) || is_fua);
5bcd0e0c
SL
1014 if (!t_page)
1015 return -ENOSPC;
1016
1017 src = kmap_atomic(source);
1018 dst = kmap_atomic(t_page->page);
1019 memcpy(dst + offset, src + off + count, temp);
1020 kunmap_atomic(dst);
1021 kunmap_atomic(src);
1022
9a7e1b43 1023 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
5bcd0e0c 1024
deb78b41
SL
1025 if (is_fua)
1026 null_free_sector(nullb, sector, true);
1027
5bcd0e0c
SL
1028 count += temp;
1029 sector += temp >> SECTOR_SHIFT;
1030 }
1031 return 0;
1032}
1033
1034static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1035 unsigned int off, sector_t sector, size_t n)
1036{
1037 size_t temp, count = 0;
1038 unsigned int offset;
1039 struct nullb_page *t_page;
1040 void *dst, *src;
1041
1042 while (count < n) {
1043 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1044
1045 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
deb78b41
SL
1046 t_page = null_lookup_page(nullb, sector, false,
1047 !null_cache_active(nullb));
5bcd0e0c
SL
1048
1049 dst = kmap_atomic(dest);
1050 if (!t_page) {
1051 memset(dst + off + count, 0, temp);
1052 goto next;
1053 }
1054 src = kmap_atomic(t_page->page);
1055 memcpy(dst + off + count, src + offset, temp);
1056 kunmap_atomic(src);
1057next:
1058 kunmap_atomic(dst);
1059
1060 count += temp;
1061 sector += temp >> SECTOR_SHIFT;
1062 }
1063 return 0;
1064}
1065
306eb6b4
SL
1066static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1067{
1068 size_t temp;
1069
1070 spin_lock_irq(&nullb->lock);
1071 while (n > 0) {
1072 temp = min_t(size_t, n, nullb->dev->blocksize);
deb78b41
SL
1073 null_free_sector(nullb, sector, false);
1074 if (null_cache_active(nullb))
1075 null_free_sector(nullb, sector, true);
306eb6b4
SL
1076 sector += temp >> SECTOR_SHIFT;
1077 n -= temp;
1078 }
1079 spin_unlock_irq(&nullb->lock);
1080}
1081
deb78b41
SL
1082static int null_handle_flush(struct nullb *nullb)
1083{
1084 int err;
1085
1086 if (!null_cache_active(nullb))
1087 return 0;
1088
1089 spin_lock_irq(&nullb->lock);
1090 while (true) {
1091 err = null_make_cache_space(nullb,
1092 nullb->dev->cache_size * 1024 * 1024);
1093 if (err || nullb->dev->curr_cache == 0)
1094 break;
1095 }
1096
1097 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1098 spin_unlock_irq(&nullb->lock);
1099 return err;
1100}
1101
5bcd0e0c 1102static int null_transfer(struct nullb *nullb, struct page *page,
deb78b41
SL
1103 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1104 bool is_fua)
f2298c04 1105{
5bcd0e0c
SL
1106 int err = 0;
1107
1108 if (!is_write) {
1109 err = copy_from_nullb(nullb, page, off, sector, len);
1110 flush_dcache_page(page);
1111 } else {
1112 flush_dcache_page(page);
deb78b41 1113 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
5bcd0e0c
SL
1114 }
1115
1116 return err;
1117}
1118
1119static int null_handle_rq(struct nullb_cmd *cmd)
1120{
1121 struct request *rq = cmd->rq;
1122 struct nullb *nullb = cmd->nq->dev->nullb;
1123 int err;
1124 unsigned int len;
1125 sector_t sector;
1126 struct req_iterator iter;
1127 struct bio_vec bvec;
1128
1129 sector = blk_rq_pos(rq);
1130
306eb6b4
SL
1131 if (req_op(rq) == REQ_OP_DISCARD) {
1132 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1133 return 0;
1134 }
1135
5bcd0e0c
SL
1136 spin_lock_irq(&nullb->lock);
1137 rq_for_each_segment(bvec, rq, iter) {
1138 len = bvec.bv_len;
1139 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
deb78b41
SL
1140 op_is_write(req_op(rq)), sector,
1141 req_op(rq) & REQ_FUA);
5bcd0e0c
SL
1142 if (err) {
1143 spin_unlock_irq(&nullb->lock);
1144 return err;
1145 }
1146 sector += len >> SECTOR_SHIFT;
1147 }
1148 spin_unlock_irq(&nullb->lock);
1149
1150 return 0;
1151}
1152
1153static int null_handle_bio(struct nullb_cmd *cmd)
1154{
1155 struct bio *bio = cmd->bio;
1156 struct nullb *nullb = cmd->nq->dev->nullb;
1157 int err;
1158 unsigned int len;
1159 sector_t sector;
1160 struct bio_vec bvec;
1161 struct bvec_iter iter;
1162
1163 sector = bio->bi_iter.bi_sector;
1164
306eb6b4
SL
1165 if (bio_op(bio) == REQ_OP_DISCARD) {
1166 null_handle_discard(nullb, sector,
1167 bio_sectors(bio) << SECTOR_SHIFT);
1168 return 0;
1169 }
1170
5bcd0e0c
SL
1171 spin_lock_irq(&nullb->lock);
1172 bio_for_each_segment(bvec, bio, iter) {
1173 len = bvec.bv_len;
1174 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
deb78b41
SL
1175 op_is_write(bio_op(bio)), sector,
1176 bio_op(bio) & REQ_FUA);
5bcd0e0c
SL
1177 if (err) {
1178 spin_unlock_irq(&nullb->lock);
1179 return err;
1180 }
1181 sector += len >> SECTOR_SHIFT;
1182 }
1183 spin_unlock_irq(&nullb->lock);
1184 return 0;
1185}
1186
eff2c4f1
SL
1187static void null_stop_queue(struct nullb *nullb)
1188{
1189 struct request_queue *q = nullb->q;
1190
1191 if (nullb->dev->queue_mode == NULL_Q_MQ)
1192 blk_mq_stop_hw_queues(q);
1193 else {
1194 spin_lock_irq(q->queue_lock);
1195 blk_stop_queue(q);
1196 spin_unlock_irq(q->queue_lock);
1197 }
1198}
1199
1200static void null_restart_queue_async(struct nullb *nullb)
1201{
1202 struct request_queue *q = nullb->q;
1203 unsigned long flags;
1204
1205 if (nullb->dev->queue_mode == NULL_Q_MQ)
1206 blk_mq_start_stopped_hw_queues(q, true);
1207 else {
1208 spin_lock_irqsave(q->queue_lock, flags);
1209 blk_start_queue_async(q);
1210 spin_unlock_irqrestore(q->queue_lock, flags);
1211 }
1212}
1213
5bcd0e0c
SL
1214static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1215{
1216 struct nullb_device *dev = cmd->nq->dev;
eff2c4f1 1217 struct nullb *nullb = dev->nullb;
5bcd0e0c
SL
1218 int err = 0;
1219
eff2c4f1
SL
1220 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1221 struct request *rq = cmd->rq;
1222
1223 if (!hrtimer_active(&nullb->bw_timer))
1224 hrtimer_restart(&nullb->bw_timer);
1225
1226 if (atomic_long_sub_return(blk_rq_bytes(rq),
1227 &nullb->cur_bytes) < 0) {
1228 null_stop_queue(nullb);
1229 /* race with timer */
1230 if (atomic_long_read(&nullb->cur_bytes) > 0)
1231 null_restart_queue_async(nullb);
1232 if (dev->queue_mode == NULL_Q_RQ) {
1233 struct request_queue *q = nullb->q;
1234
1235 spin_lock_irq(q->queue_lock);
1236 rq->rq_flags |= RQF_DONTPREP;
1237 blk_requeue_request(q, rq);
1238 spin_unlock_irq(q->queue_lock);
1239 return BLK_STS_OK;
1240 } else
1241 /* requeue request */
1242 return BLK_STS_RESOURCE;
1243 }
1244 }
1245
2f54a613
SL
1246 if (nullb->dev->badblocks.shift != -1) {
1247 int bad_sectors;
1248 sector_t sector, size, first_bad;
1249 bool is_flush = true;
1250
1251 if (dev->queue_mode == NULL_Q_BIO &&
1252 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1253 is_flush = false;
1254 sector = cmd->bio->bi_iter.bi_sector;
1255 size = bio_sectors(cmd->bio);
1256 }
1257 if (dev->queue_mode != NULL_Q_BIO &&
1258 req_op(cmd->rq) != REQ_OP_FLUSH) {
1259 is_flush = false;
1260 sector = blk_rq_pos(cmd->rq);
1261 size = blk_rq_sectors(cmd->rq);
1262 }
1263 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1264 size, &first_bad, &bad_sectors)) {
1265 cmd->error = BLK_STS_IOERR;
1266 goto out;
1267 }
1268 }
1269
5bcd0e0c 1270 if (dev->memory_backed) {
deb78b41
SL
1271 if (dev->queue_mode == NULL_Q_BIO) {
1272 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1273 err = null_handle_flush(nullb);
1274 else
1275 err = null_handle_bio(cmd);
1276 } else {
1277 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1278 err = null_handle_flush(nullb);
1279 else
1280 err = null_handle_rq(cmd);
1281 }
5bcd0e0c
SL
1282 }
1283 cmd->error = errno_to_blk_status(err);
2f54a613 1284out:
f2298c04 1285 /* Complete IO by inline, softirq or timer */
5bcd0e0c 1286 switch (dev->irqmode) {
f2298c04 1287 case NULL_IRQ_SOFTIRQ:
5bcd0e0c 1288 switch (dev->queue_mode) {
ce2c350b 1289 case NULL_Q_MQ:
08e0029a 1290 blk_mq_complete_request(cmd->rq);
ce2c350b
CH
1291 break;
1292 case NULL_Q_RQ:
1293 blk_complete_request(cmd->rq);
1294 break;
1295 case NULL_Q_BIO:
1296 /*
1297 * XXX: no proper submitting cpu information available.
1298 */
1299 end_cmd(cmd);
1300 break;
1301 }
1302 break;
1303 case NULL_IRQ_NONE:
f2298c04 1304 end_cmd(cmd);
f2298c04
JA
1305 break;
1306 case NULL_IRQ_TIMER:
1307 null_cmd_end_timer(cmd);
1308 break;
1309 }
5bcd0e0c 1310 return BLK_STS_OK;
f2298c04
JA
1311}
1312
eff2c4f1
SL
1313static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1314{
1315 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1316 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1317 unsigned int mbps = nullb->dev->mbps;
1318
1319 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1320 return HRTIMER_NORESTART;
1321
1322 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1323 null_restart_queue_async(nullb);
1324
1325 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1326
1327 return HRTIMER_RESTART;
1328}
1329
1330static void nullb_setup_bwtimer(struct nullb *nullb)
1331{
1332 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1333
1334 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1335 nullb->bw_timer.function = nullb_bwtimer_fn;
1336 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1337 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
f2298c04
JA
1338}
1339
1340static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1341{
1342 int index = 0;
1343
1344 if (nullb->nr_queues != 1)
1345 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1346
1347 return &nullb->queues[index];
1348}
1349
dece1635 1350static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
f2298c04
JA
1351{
1352 struct nullb *nullb = q->queuedata;
1353 struct nullb_queue *nq = nullb_to_queue(nullb);
1354 struct nullb_cmd *cmd;
1355
1356 cmd = alloc_cmd(nq, 1);
1357 cmd->bio = bio;
1358
1359 null_handle_cmd(cmd);
dece1635 1360 return BLK_QC_T_NONE;
f2298c04
JA
1361}
1362
1363static int null_rq_prep_fn(struct request_queue *q, struct request *req)
1364{
1365 struct nullb *nullb = q->queuedata;
1366 struct nullb_queue *nq = nullb_to_queue(nullb);
1367 struct nullb_cmd *cmd;
1368
1369 cmd = alloc_cmd(nq, 0);
1370 if (cmd) {
1371 cmd->rq = req;
1372 req->special = cmd;
1373 return BLKPREP_OK;
1374 }
8b70f45e 1375 blk_stop_queue(q);
f2298c04
JA
1376
1377 return BLKPREP_DEFER;
1378}
1379
1380static void null_request_fn(struct request_queue *q)
1381{
1382 struct request *rq;
1383
1384 while ((rq = blk_fetch_request(q)) != NULL) {
1385 struct nullb_cmd *cmd = rq->special;
1386
1387 spin_unlock_irq(q->queue_lock);
1388 null_handle_cmd(cmd);
1389 spin_lock_irq(q->queue_lock);
1390 }
1391}
1392
fc17b653 1393static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
74c45052 1394 const struct blk_mq_queue_data *bd)
f2298c04 1395{
74c45052 1396 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
2984c868 1397 struct nullb_queue *nq = hctx->driver_data;
f2298c04 1398
db5bcf87
JA
1399 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1400
2984c868 1401 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
3c395a96
PV
1402 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1403 cmd->timer.function = null_cmd_timer_expired;
1404 }
74c45052 1405 cmd->rq = bd->rq;
2984c868 1406 cmd->nq = nq;
f2298c04 1407
74c45052 1408 blk_mq_start_request(bd->rq);
e2490073 1409
5bcd0e0c 1410 return null_handle_cmd(cmd);
f2298c04
JA
1411}
1412
f363b089 1413static const struct blk_mq_ops null_mq_ops = {
f2298c04 1414 .queue_rq = null_queue_rq,
ce2c350b 1415 .complete = null_softirq_done_fn,
f2298c04
JA
1416};
1417
de65d2d2
MB
1418static void cleanup_queue(struct nullb_queue *nq)
1419{
1420 kfree(nq->tag_map);
1421 kfree(nq->cmds);
1422}
1423
1424static void cleanup_queues(struct nullb *nullb)
1425{
1426 int i;
1427
1428 for (i = 0; i < nullb->nr_queues; i++)
1429 cleanup_queue(&nullb->queues[i]);
1430
1431 kfree(nullb->queues);
1432}
1433
b2b7e001
MB
1434#ifdef CONFIG_NVM
1435
2a842aca 1436static void null_lnvm_end_io(struct request *rq, blk_status_t status)
b2b7e001
MB
1437{
1438 struct nvm_rq *rqd = rq->end_io_data;
b2b7e001 1439
2a842aca
CH
1440 /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
1441 rqd->error = status ? -EIO : 0;
06894efe 1442 nvm_end_io(rqd);
b2b7e001
MB
1443
1444 blk_put_request(rq);
1445}
1446
16f26c3a 1447static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
b2b7e001 1448{
16f26c3a 1449 struct request_queue *q = dev->q;
b2b7e001
MB
1450 struct request *rq;
1451 struct bio *bio = rqd->bio;
1452
aebf526b
CH
1453 rq = blk_mq_alloc_request(q,
1454 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
b2b7e001
MB
1455 if (IS_ERR(rq))
1456 return -ENOMEM;
1457
2644a3cc 1458 blk_init_request_from_bio(rq, bio);
b2b7e001
MB
1459
1460 rq->end_io_data = rqd;
1461
1462 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
1463
1464 return 0;
1465}
1466
16f26c3a 1467static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
b2b7e001 1468{
2984c868
SL
1469 struct nullb *nullb = dev->q->queuedata;
1470 sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
5b40db99 1471 sector_t blksize;
b2b7e001
MB
1472 struct nvm_id_group *grp;
1473
1474 id->ver_id = 0x1;
1475 id->vmnt = 0;
bf643185 1476 id->cap = 0x2;
b2b7e001 1477 id->dom = 0x1;
5b40db99
MB
1478
1479 id->ppaf.blk_offset = 0;
1480 id->ppaf.blk_len = 16;
1481 id->ppaf.pg_offset = 16;
1482 id->ppaf.pg_len = 16;
1483 id->ppaf.sect_offset = 32;
1484 id->ppaf.sect_len = 8;
1485 id->ppaf.pln_offset = 40;
1486 id->ppaf.pln_len = 8;
1487 id->ppaf.lun_offset = 48;
1488 id->ppaf.lun_len = 8;
1489 id->ppaf.ch_offset = 56;
1490 id->ppaf.ch_len = 8;
b2b7e001 1491
2984c868 1492 sector_div(size, nullb->dev->blocksize); /* convert size to pages */
e93d12ae 1493 size >>= 8; /* concert size to pgs pr blk */
19bd6fe7 1494 grp = &id->grp;
b2b7e001 1495 grp->mtype = 0;
5b40db99 1496 grp->fmtype = 0;
b2b7e001 1497 grp->num_ch = 1;
b2b7e001 1498 grp->num_pg = 256;
5b40db99 1499 blksize = size;
e93d12ae 1500 size >>= 16;
5b40db99 1501 grp->num_lun = size + 1;
e93d12ae 1502 sector_div(blksize, grp->num_lun);
5b40db99
MB
1503 grp->num_blk = blksize;
1504 grp->num_pln = 1;
1505
2984c868
SL
1506 grp->fpg_sz = nullb->dev->blocksize;
1507 grp->csecs = nullb->dev->blocksize;
b2b7e001
MB
1508 grp->trdt = 25000;
1509 grp->trdm = 25000;
1510 grp->tprt = 500000;
1511 grp->tprm = 500000;
1512 grp->tbet = 1500000;
1513 grp->tbem = 1500000;
1514 grp->mpos = 0x010101; /* single plane rwe */
2984c868 1515 grp->cpar = nullb->dev->hw_queue_depth;
b2b7e001
MB
1516
1517 return 0;
1518}
1519
16f26c3a 1520static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
b2b7e001
MB
1521{
1522 mempool_t *virtmem_pool;
1523
6bb9535b 1524 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
b2b7e001
MB
1525 if (!virtmem_pool) {
1526 pr_err("null_blk: Unable to create virtual memory pool\n");
1527 return NULL;
1528 }
1529
1530 return virtmem_pool;
1531}
1532
1533static void null_lnvm_destroy_dma_pool(void *pool)
1534{
1535 mempool_destroy(pool);
1536}
1537
16f26c3a 1538static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
b2b7e001
MB
1539 gfp_t mem_flags, dma_addr_t *dma_handler)
1540{
1541 return mempool_alloc(pool, mem_flags);
1542}
1543
1544static void null_lnvm_dev_dma_free(void *pool, void *entry,
1545 dma_addr_t dma_handler)
1546{
1547 mempool_free(entry, pool);
1548}
1549
1550static struct nvm_dev_ops null_lnvm_dev_ops = {
1551 .identity = null_lnvm_id,
1552 .submit_io = null_lnvm_submit_io,
1553
1554 .create_dma_pool = null_lnvm_create_dma_pool,
1555 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
1556 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
1557 .dev_dma_free = null_lnvm_dev_dma_free,
1558
1559 /* Simulate nvme protocol restriction */
1560 .max_phys_sect = 64,
1561};
9ae2d0aa
MB
1562
1563static int null_nvm_register(struct nullb *nullb)
1564{
b0b4e09c
MB
1565 struct nvm_dev *dev;
1566 int rv;
1567
1568 dev = nvm_alloc_dev(0);
1569 if (!dev)
1570 return -ENOMEM;
1571
1572 dev->q = nullb->q;
1573 memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
1574 dev->ops = &null_lnvm_dev_ops;
1575
1576 rv = nvm_register(dev);
1577 if (rv) {
1578 kfree(dev);
1579 return rv;
1580 }
1581 nullb->ndev = dev;
1582 return 0;
9ae2d0aa
MB
1583}
1584
1585static void null_nvm_unregister(struct nullb *nullb)
1586{
b0b4e09c 1587 nvm_unregister(nullb->ndev);
9ae2d0aa 1588}
b2b7e001 1589#else
9ae2d0aa
MB
1590static int null_nvm_register(struct nullb *nullb)
1591{
92153d30 1592 pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
9ae2d0aa
MB
1593 return -EINVAL;
1594}
1595static void null_nvm_unregister(struct nullb *nullb) {}
b2b7e001
MB
1596#endif /* CONFIG_NVM */
1597
9ae2d0aa
MB
1598static void null_del_dev(struct nullb *nullb)
1599{
2984c868
SL
1600 struct nullb_device *dev = nullb->dev;
1601
94bc02e3
SL
1602 ida_simple_remove(&nullb_indexes, nullb->index);
1603
9ae2d0aa
MB
1604 list_del_init(&nullb->list);
1605
2984c868 1606 if (dev->use_lightnvm)
9ae2d0aa
MB
1607 null_nvm_unregister(nullb);
1608 else
1609 del_gendisk(nullb->disk);
eff2c4f1
SL
1610
1611 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1612 hrtimer_cancel(&nullb->bw_timer);
1613 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1614 null_restart_queue_async(nullb);
1615 }
1616
9ae2d0aa 1617 blk_cleanup_queue(nullb->q);
2984c868
SL
1618 if (dev->queue_mode == NULL_Q_MQ &&
1619 nullb->tag_set == &nullb->__tag_set)
82f402fe 1620 blk_mq_free_tag_set(nullb->tag_set);
2984c868 1621 if (!dev->use_lightnvm)
9ae2d0aa
MB
1622 put_disk(nullb->disk);
1623 cleanup_queues(nullb);
deb78b41
SL
1624 if (null_cache_active(nullb))
1625 null_free_device_storage(nullb->dev, true);
9ae2d0aa 1626 kfree(nullb);
2984c868 1627 dev->nullb = NULL;
9ae2d0aa
MB
1628}
1629
306eb6b4
SL
1630static void null_config_discard(struct nullb *nullb)
1631{
1632 if (nullb->dev->discard == false)
1633 return;
1634 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1635 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1636 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1637 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
9ae2d0aa
MB
1638}
1639
f2298c04
JA
1640static int null_open(struct block_device *bdev, fmode_t mode)
1641{
1642 return 0;
1643}
1644
1645static void null_release(struct gendisk *disk, fmode_t mode)
1646{
1647}
1648
1649static const struct block_device_operations null_fops = {
1650 .owner = THIS_MODULE,
1651 .open = null_open,
1652 .release = null_release,
1653};
1654
82f402fe
JA
1655static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1656{
1657 BUG_ON(!nullb);
1658 BUG_ON(!nq);
1659
1660 init_waitqueue_head(&nq->wait);
1661 nq->queue_depth = nullb->queue_depth;
2984c868 1662 nq->dev = nullb->dev;
82f402fe
JA
1663}
1664
1665static void null_init_queues(struct nullb *nullb)
1666{
1667 struct request_queue *q = nullb->q;
1668 struct blk_mq_hw_ctx *hctx;
1669 struct nullb_queue *nq;
1670 int i;
1671
1672 queue_for_each_hw_ctx(q, hctx, i) {
1673 if (!hctx->nr_ctx || !hctx->tags)
1674 continue;
1675 nq = &nullb->queues[i];
1676 hctx->driver_data = nq;
1677 null_init_queue(nullb, nq);
1678 nullb->nr_queues++;
1679 }
1680}
1681
f2298c04
JA
1682static int setup_commands(struct nullb_queue *nq)
1683{
1684 struct nullb_cmd *cmd;
1685 int i, tag_size;
1686
1687 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
1688 if (!nq->cmds)
2d263a78 1689 return -ENOMEM;
f2298c04
JA
1690
1691 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
1692 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
1693 if (!nq->tag_map) {
1694 kfree(nq->cmds);
2d263a78 1695 return -ENOMEM;
f2298c04
JA
1696 }
1697
1698 for (i = 0; i < nq->queue_depth; i++) {
1699 cmd = &nq->cmds[i];
1700 INIT_LIST_HEAD(&cmd->list);
1701 cmd->ll_list.next = NULL;
1702 cmd->tag = -1U;
1703 }
1704
1705 return 0;
1706}
1707
f2298c04
JA
1708static int setup_queues(struct nullb *nullb)
1709{
2984c868
SL
1710 nullb->queues = kzalloc(nullb->dev->submit_queues *
1711 sizeof(struct nullb_queue), GFP_KERNEL);
f2298c04 1712 if (!nullb->queues)
2d263a78 1713 return -ENOMEM;
f2298c04
JA
1714
1715 nullb->nr_queues = 0;
2984c868 1716 nullb->queue_depth = nullb->dev->hw_queue_depth;
f2298c04 1717
2d263a78
MB
1718 return 0;
1719}
1720
1721static int init_driver_queues(struct nullb *nullb)
1722{
1723 struct nullb_queue *nq;
1724 int i, ret = 0;
f2298c04 1725
2984c868 1726 for (i = 0; i < nullb->dev->submit_queues; i++) {
f2298c04 1727 nq = &nullb->queues[i];
2d263a78
MB
1728
1729 null_init_queue(nullb, nq);
1730
1731 ret = setup_commands(nq);
1732 if (ret)
31f9690e 1733 return ret;
f2298c04
JA
1734 nullb->nr_queues++;
1735 }
2d263a78 1736 return 0;
f2298c04
JA
1737}
1738
9ae2d0aa 1739static int null_gendisk_register(struct nullb *nullb)
f2298c04
JA
1740{
1741 struct gendisk *disk;
f2298c04 1742 sector_t size;
9ae2d0aa 1743
2984c868 1744 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
9ae2d0aa
MB
1745 if (!disk)
1746 return -ENOMEM;
2984c868 1747 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
9ae2d0aa
MB
1748 set_capacity(disk, size >> 9);
1749
1750 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1751 disk->major = null_major;
1752 disk->first_minor = nullb->index;
1753 disk->fops = &null_fops;
1754 disk->private_data = nullb;
1755 disk->queue = nullb->q;
1756 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1757
1758 add_disk(disk);
1759 return 0;
1760}
1761
2984c868 1762static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
82f402fe
JA
1763{
1764 set->ops = &null_mq_ops;
2984c868
SL
1765 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1766 g_submit_queues;
1767 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1768 g_hw_queue_depth;
1769 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
82f402fe
JA
1770 set->cmd_size = sizeof(struct nullb_cmd);
1771 set->flags = BLK_MQ_F_SHOULD_MERGE;
b3cffc38 1772 if (g_no_sched)
1773 set->flags |= BLK_MQ_F_NO_SCHED;
82f402fe
JA
1774 set->driver_data = NULL;
1775
0d06a42f 1776 if ((nullb && nullb->dev->blocking) || g_blocking)
82f402fe
JA
1777 set->flags |= BLK_MQ_F_BLOCKING;
1778
1779 return blk_mq_alloc_tag_set(set);
1780}
1781
cedcafad
SL
1782static void null_validate_conf(struct nullb_device *dev)
1783{
1784 dev->blocksize = round_down(dev->blocksize, 512);
1785 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
1786 if (dev->use_lightnvm && dev->blocksize != 4096)
1787 dev->blocksize = 4096;
1788
1789 if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
1790 dev->queue_mode = NULL_Q_MQ;
1791
1792 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1793 if (dev->submit_queues != nr_online_nodes)
1794 dev->submit_queues = nr_online_nodes;
1795 } else if (dev->submit_queues > nr_cpu_ids)
1796 dev->submit_queues = nr_cpu_ids;
1797 else if (dev->submit_queues == 0)
1798 dev->submit_queues = 1;
1799
1800 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1801 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
5bcd0e0c
SL
1802
1803 /* Do memory allocation, so set blocking */
1804 if (dev->memory_backed)
1805 dev->blocking = true;
deb78b41
SL
1806 else /* cache is meaningless */
1807 dev->cache_size = 0;
1808 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1809 dev->cache_size);
eff2c4f1
SL
1810 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1811 /* can not stop a queue */
1812 if (dev->queue_mode == NULL_Q_BIO)
1813 dev->mbps = 0;
cedcafad
SL
1814}
1815
2984c868 1816static int null_add_dev(struct nullb_device *dev)
9ae2d0aa
MB
1817{
1818 struct nullb *nullb;
dc501dc0 1819 int rv;
f2298c04 1820
cedcafad
SL
1821 null_validate_conf(dev);
1822
2984c868 1823 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
dc501dc0
RE
1824 if (!nullb) {
1825 rv = -ENOMEM;
24d2f903 1826 goto out;
dc501dc0 1827 }
2984c868
SL
1828 nullb->dev = dev;
1829 dev->nullb = nullb;
f2298c04
JA
1830
1831 spin_lock_init(&nullb->lock);
1832
dc501dc0
RE
1833 rv = setup_queues(nullb);
1834 if (rv)
24d2f903 1835 goto out_free_nullb;
f2298c04 1836
2984c868 1837 if (dev->queue_mode == NULL_Q_MQ) {
82f402fe
JA
1838 if (shared_tags) {
1839 nullb->tag_set = &tag_set;
1840 rv = 0;
1841 } else {
1842 nullb->tag_set = &nullb->__tag_set;
2984c868 1843 rv = null_init_tag_set(nullb, nullb->tag_set);
82f402fe
JA
1844 }
1845
dc501dc0 1846 if (rv)
24d2f903
CH
1847 goto out_cleanup_queues;
1848
82f402fe 1849 nullb->q = blk_mq_init_queue(nullb->tag_set);
35b489d3 1850 if (IS_ERR(nullb->q)) {
dc501dc0 1851 rv = -ENOMEM;
24d2f903 1852 goto out_cleanup_tags;
dc501dc0 1853 }
82f402fe 1854 null_init_queues(nullb);
2984c868
SL
1855 } else if (dev->queue_mode == NULL_Q_BIO) {
1856 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
dc501dc0
RE
1857 if (!nullb->q) {
1858 rv = -ENOMEM;
24d2f903 1859 goto out_cleanup_queues;
dc501dc0 1860 }
f2298c04 1861 blk_queue_make_request(nullb->q, null_queue_bio);
31f9690e
JK
1862 rv = init_driver_queues(nullb);
1863 if (rv)
1864 goto out_cleanup_blk_queue;
f2298c04 1865 } else {
2984c868
SL
1866 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
1867 dev->home_node);
dc501dc0
RE
1868 if (!nullb->q) {
1869 rv = -ENOMEM;
24d2f903 1870 goto out_cleanup_queues;
dc501dc0 1871 }
f2298c04 1872 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
24d2f903 1873 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
31f9690e
JK
1874 rv = init_driver_queues(nullb);
1875 if (rv)
1876 goto out_cleanup_blk_queue;
f2298c04
JA
1877 }
1878
eff2c4f1
SL
1879 if (dev->mbps) {
1880 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1881 nullb_setup_bwtimer(nullb);
1882 }
1883
deb78b41
SL
1884 if (dev->cache_size > 0) {
1885 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1886 blk_queue_write_cache(nullb->q, true, true);
1887 blk_queue_flush_queueable(nullb->q, true);
1888 }
1889
f2298c04
JA
1890 nullb->q->queuedata = nullb;
1891 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
b277da0a 1892 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
f2298c04 1893
f2298c04 1894 mutex_lock(&lock);
94bc02e3 1895 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
cedcafad 1896 dev->index = nullb->index;
f2298c04
JA
1897 mutex_unlock(&lock);
1898
2984c868
SL
1899 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1900 blk_queue_physical_block_size(nullb->q, dev->blocksize);
f2298c04 1901
306eb6b4 1902 null_config_discard(nullb);
f2298c04 1903
b2b7e001
MB
1904 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1905
2984c868 1906 if (dev->use_lightnvm)
9ae2d0aa
MB
1907 rv = null_nvm_register(nullb);
1908 else
1909 rv = null_gendisk_register(nullb);
b2b7e001 1910
9ae2d0aa
MB
1911 if (rv)
1912 goto out_cleanup_blk_queue;
a514379b
MB
1913
1914 mutex_lock(&lock);
1915 list_add_tail(&nullb->list, &nullb_list);
1916 mutex_unlock(&lock);
3681c85d 1917
f2298c04 1918 return 0;
24d2f903
CH
1919out_cleanup_blk_queue:
1920 blk_cleanup_queue(nullb->q);
1921out_cleanup_tags:
2984c868 1922 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
82f402fe 1923 blk_mq_free_tag_set(nullb->tag_set);
24d2f903
CH
1924out_cleanup_queues:
1925 cleanup_queues(nullb);
1926out_free_nullb:
1927 kfree(nullb);
1928out:
dc501dc0 1929 return rv;
f2298c04
JA
1930}
1931
1932static int __init null_init(void)
1933{
af096e22 1934 int ret = 0;
f2298c04 1935 unsigned int i;
af096e22 1936 struct nullb *nullb;
2984c868 1937 struct nullb_device *dev;
f2298c04 1938
2984c868 1939 if (g_bs > PAGE_SIZE) {
9967d8ac
R
1940 pr_warn("null_blk: invalid block size\n");
1941 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
2984c868 1942 g_bs = PAGE_SIZE;
9967d8ac 1943 }
f2298c04 1944
2984c868 1945 if (g_use_lightnvm && g_bs != 4096) {
6bb9535b
MB
1946 pr_warn("null_blk: LightNVM only supports 4k block size\n");
1947 pr_warn("null_blk: defaults block size to 4k\n");
2984c868 1948 g_bs = 4096;
6bb9535b
MB
1949 }
1950
2984c868 1951 if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
b2b7e001
MB
1952 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
1953 pr_warn("null_blk: defaults queue mode to blk-mq\n");
2984c868 1954 g_queue_mode = NULL_Q_MQ;
b2b7e001
MB
1955 }
1956
2984c868
SL
1957 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1958 if (g_submit_queues != nr_online_nodes) {
558ab300 1959 pr_warn("null_blk: submit_queues param is set to %u.\n",
d15ee6b1 1960 nr_online_nodes);
2984c868 1961 g_submit_queues = nr_online_nodes;
fc1bc354 1962 }
2984c868
SL
1963 } else if (g_submit_queues > nr_cpu_ids)
1964 g_submit_queues = nr_cpu_ids;
1965 else if (g_submit_queues <= 0)
1966 g_submit_queues = 1;
f2298c04 1967
2984c868
SL
1968 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1969 ret = null_init_tag_set(NULL, &tag_set);
db2d153d
MG
1970 if (ret)
1971 return ret;
1972 }
1973
3bf2bd20
SL
1974 config_group_init(&nullb_subsys.su_group);
1975 mutex_init(&nullb_subsys.su_mutex);
1976
1977 ret = configfs_register_subsystem(&nullb_subsys);
1978 if (ret)
1979 goto err_tagset;
1980
f2298c04
JA
1981 mutex_init(&lock);
1982
f2298c04 1983 null_major = register_blkdev(0, "nullb");
db2d153d
MG
1984 if (null_major < 0) {
1985 ret = null_major;
3bf2bd20 1986 goto err_conf;
db2d153d 1987 }
f2298c04 1988
2984c868 1989 if (g_use_lightnvm) {
6bb9535b
MB
1990 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
1991 0, 0, NULL);
1992 if (!ppa_cache) {
1993 pr_err("null_blk: unable to create ppa cache\n");
af096e22
MH
1994 ret = -ENOMEM;
1995 goto err_ppa;
6bb9535b
MB
1996 }
1997 }
1998
f2298c04 1999 for (i = 0; i < nr_devices; i++) {
2984c868 2000 dev = null_alloc_dev();
30c516d7
WY
2001 if (!dev) {
2002 ret = -ENOMEM;
2984c868 2003 goto err_dev;
30c516d7 2004 }
2984c868
SL
2005 ret = null_add_dev(dev);
2006 if (ret) {
2007 null_free_dev(dev);
af096e22 2008 goto err_dev;
2984c868 2009 }
f2298c04
JA
2010 }
2011
2012 pr_info("null: module loaded\n");
2013 return 0;
af096e22
MH
2014
2015err_dev:
2016 while (!list_empty(&nullb_list)) {
2017 nullb = list_entry(nullb_list.next, struct nullb, list);
2984c868 2018 dev = nullb->dev;
af096e22 2019 null_del_dev(nullb);
2984c868 2020 null_free_dev(dev);
af096e22 2021 }
6bb9535b 2022 kmem_cache_destroy(ppa_cache);
af096e22
MH
2023err_ppa:
2024 unregister_blkdev(null_major, "nullb");
3bf2bd20
SL
2025err_conf:
2026 configfs_unregister_subsystem(&nullb_subsys);
db2d153d 2027err_tagset:
2984c868 2028 if (g_queue_mode == NULL_Q_MQ && shared_tags)
db2d153d 2029 blk_mq_free_tag_set(&tag_set);
af096e22 2030 return ret;
f2298c04
JA
2031}
2032
2033static void __exit null_exit(void)
2034{
2035 struct nullb *nullb;
2036
3bf2bd20
SL
2037 configfs_unregister_subsystem(&nullb_subsys);
2038
f2298c04
JA
2039 unregister_blkdev(null_major, "nullb");
2040
2041 mutex_lock(&lock);
2042 while (!list_empty(&nullb_list)) {
2984c868
SL
2043 struct nullb_device *dev;
2044
f2298c04 2045 nullb = list_entry(nullb_list.next, struct nullb, list);
2984c868 2046 dev = nullb->dev;
f2298c04 2047 null_del_dev(nullb);
2984c868 2048 null_free_dev(dev);
f2298c04
JA
2049 }
2050 mutex_unlock(&lock);
6bb9535b 2051
2984c868 2052 if (g_queue_mode == NULL_Q_MQ && shared_tags)
82f402fe
JA
2053 blk_mq_free_tag_set(&tag_set);
2054
6bb9535b 2055 kmem_cache_destroy(ppa_cache);
f2298c04
JA
2056}
2057
2058module_init(null_init);
2059module_exit(null_exit);
2060
231b3db1 2061MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
f2298c04 2062MODULE_LICENSE("GPL");