]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/null_blk.c
nullb: factor disk parameters
[mirror_ubuntu-bionic-kernel.git] / drivers / block / null_blk.c
CommitLineData
f2298c04 1#include <linux/module.h>
fc1bc354 2
f2298c04
JA
3#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
b2b7e001 11#include <linux/lightnvm.h>
f2298c04
JA
12
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
3c395a96 21 struct hrtimer timer;
f2298c04
JA
22};
23
24struct nullb_queue {
25 unsigned long *tag_map;
26 wait_queue_head_t wait;
27 unsigned int queue_depth;
2984c868 28 struct nullb_device *dev;
f2298c04
JA
29
30 struct nullb_cmd *cmds;
31};
32
2984c868
SL
33struct nullb_device {
34 struct nullb *nullb;
35
36 unsigned long size; /* device size in MB */
37 unsigned long completion_nsec; /* time in ns to complete a request */
38 unsigned int submit_queues; /* number of submission queues */
39 unsigned int home_node; /* home node for the device */
40 unsigned int queue_mode; /* block interface */
41 unsigned int blocksize; /* block size */
42 unsigned int irqmode; /* IRQ completion handler */
43 unsigned int hw_queue_depth; /* queue depth */
44 bool use_lightnvm; /* register as a LightNVM device */
45 bool blocking; /* blocking blk-mq device */
46 bool use_per_node_hctx; /* use per-node allocation for hardware context */
47};
48
f2298c04 49struct nullb {
2984c868 50 struct nullb_device *dev;
f2298c04
JA
51 struct list_head list;
52 unsigned int index;
53 struct request_queue *q;
54 struct gendisk *disk;
b0b4e09c 55 struct nvm_dev *ndev;
82f402fe
JA
56 struct blk_mq_tag_set *tag_set;
57 struct blk_mq_tag_set __tag_set;
f2298c04
JA
58 struct hrtimer timer;
59 unsigned int queue_depth;
60 spinlock_t lock;
61
62 struct nullb_queue *queues;
63 unsigned int nr_queues;
b2b7e001 64 char disk_name[DISK_NAME_LEN];
f2298c04
JA
65};
66
67static LIST_HEAD(nullb_list);
68static struct mutex lock;
69static int null_major;
70static int nullb_indexes;
6bb9535b 71static struct kmem_cache *ppa_cache;
82f402fe 72static struct blk_mq_tag_set tag_set;
f2298c04 73
f2298c04
JA
74enum {
75 NULL_IRQ_NONE = 0,
76 NULL_IRQ_SOFTIRQ = 1,
77 NULL_IRQ_TIMER = 2,
ce2c350b 78};
f2298c04 79
ce2c350b 80enum {
f2298c04
JA
81 NULL_Q_BIO = 0,
82 NULL_Q_RQ = 1,
83 NULL_Q_MQ = 2,
84};
85
2984c868
SL
86static int g_submit_queues = 1;
87module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
f2298c04
JA
88MODULE_PARM_DESC(submit_queues, "Number of submission queues");
89
2984c868
SL
90static int g_home_node = NUMA_NO_NODE;
91module_param_named(home_node, g_home_node, int, S_IRUGO);
f2298c04
JA
92MODULE_PARM_DESC(home_node, "Home node for the device");
93
2984c868 94static int g_queue_mode = NULL_Q_MQ;
709c8667
MB
95
96static int null_param_store_val(const char *str, int *val, int min, int max)
97{
98 int ret, new_val;
99
100 ret = kstrtoint(str, 10, &new_val);
101 if (ret)
102 return -EINVAL;
103
104 if (new_val < min || new_val > max)
105 return -EINVAL;
106
107 *val = new_val;
108 return 0;
109}
110
111static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
112{
2984c868 113 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
709c8667
MB
114}
115
9c27847d 116static const struct kernel_param_ops null_queue_mode_param_ops = {
709c8667
MB
117 .set = null_set_queue_mode,
118 .get = param_get_int,
119};
120
2984c868 121device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
54ae81cd 122MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
f2298c04 123
2984c868
SL
124static int g_gb = 250;
125module_param_named(gb, g_gb, int, S_IRUGO);
f2298c04
JA
126MODULE_PARM_DESC(gb, "Size in GB");
127
2984c868
SL
128static int g_bs = 512;
129module_param_named(bs, g_bs, int, S_IRUGO);
f2298c04
JA
130MODULE_PARM_DESC(bs, "Block size (in bytes)");
131
82f402fe 132static int nr_devices = 1;
f2298c04
JA
133module_param(nr_devices, int, S_IRUGO);
134MODULE_PARM_DESC(nr_devices, "Number of devices to register");
135
2984c868
SL
136static bool g_use_lightnvm;
137module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
b2b7e001
MB
138MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
139
2984c868
SL
140static bool g_blocking;
141module_param_named(blocking, g_blocking, bool, S_IRUGO);
db5bcf87
JA
142MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
143
82f402fe
JA
144static bool shared_tags;
145module_param(shared_tags, bool, S_IRUGO);
146MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
147
2984c868 148static int g_irqmode = NULL_IRQ_SOFTIRQ;
709c8667
MB
149
150static int null_set_irqmode(const char *str, const struct kernel_param *kp)
151{
2984c868 152 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
709c8667
MB
153 NULL_IRQ_TIMER);
154}
155
9c27847d 156static const struct kernel_param_ops null_irqmode_param_ops = {
709c8667
MB
157 .set = null_set_irqmode,
158 .get = param_get_int,
159};
160
2984c868 161device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
f2298c04
JA
162MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
163
2984c868
SL
164static unsigned long g_completion_nsec = 10000;
165module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
f2298c04
JA
166MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
167
2984c868
SL
168static int g_hw_queue_depth = 64;
169module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
f2298c04
JA
170MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
171
2984c868
SL
172static bool g_use_per_node_hctx;
173module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
20005244 174MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
f2298c04 175
2984c868
SL
176static struct nullb_device *null_alloc_dev(void)
177{
178 struct nullb_device *dev;
179
180 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
181 if (!dev)
182 return NULL;
183 dev->size = g_gb * 1024;
184 dev->completion_nsec = g_completion_nsec;
185 dev->submit_queues = g_submit_queues;
186 dev->home_node = g_home_node;
187 dev->queue_mode = g_queue_mode;
188 dev->blocksize = g_bs;
189 dev->irqmode = g_irqmode;
190 dev->hw_queue_depth = g_hw_queue_depth;
191 dev->use_lightnvm = g_use_lightnvm;
192 dev->blocking = g_blocking;
193 dev->use_per_node_hctx = g_use_per_node_hctx;
194 return dev;
195}
196
197static void null_free_dev(struct nullb_device *dev)
198{
199 kfree(dev);
200}
201
f2298c04
JA
202static void put_tag(struct nullb_queue *nq, unsigned int tag)
203{
204 clear_bit_unlock(tag, nq->tag_map);
205
206 if (waitqueue_active(&nq->wait))
207 wake_up(&nq->wait);
208}
209
210static unsigned int get_tag(struct nullb_queue *nq)
211{
212 unsigned int tag;
213
214 do {
215 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
216 if (tag >= nq->queue_depth)
217 return -1U;
218 } while (test_and_set_bit_lock(tag, nq->tag_map));
219
220 return tag;
221}
222
223static void free_cmd(struct nullb_cmd *cmd)
224{
225 put_tag(cmd->nq, cmd->tag);
226}
227
3c395a96
PV
228static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
229
f2298c04
JA
230static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
231{
232 struct nullb_cmd *cmd;
233 unsigned int tag;
234
235 tag = get_tag(nq);
236 if (tag != -1U) {
237 cmd = &nq->cmds[tag];
238 cmd->tag = tag;
239 cmd->nq = nq;
2984c868 240 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
3c395a96
PV
241 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
242 HRTIMER_MODE_REL);
243 cmd->timer.function = null_cmd_timer_expired;
244 }
f2298c04
JA
245 return cmd;
246 }
247
248 return NULL;
249}
250
251static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
252{
253 struct nullb_cmd *cmd;
254 DEFINE_WAIT(wait);
255
256 cmd = __alloc_cmd(nq);
257 if (cmd || !can_wait)
258 return cmd;
259
260 do {
261 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
262 cmd = __alloc_cmd(nq);
263 if (cmd)
264 break;
265
266 io_schedule();
267 } while (1);
268
269 finish_wait(&nq->wait, &wait);
270 return cmd;
271}
272
273static void end_cmd(struct nullb_cmd *cmd)
274{
cf8ecc5a 275 struct request_queue *q = NULL;
2984c868 276 int queue_mode = cmd->nq->dev->queue_mode;
cf8ecc5a 277
e8271201
MK
278 if (cmd->rq)
279 q = cmd->rq->q;
280
ce2c350b
CH
281 switch (queue_mode) {
282 case NULL_Q_MQ:
2a842aca 283 blk_mq_end_request(cmd->rq, BLK_STS_OK);
ce2c350b
CH
284 return;
285 case NULL_Q_RQ:
286 INIT_LIST_HEAD(&cmd->rq->queuelist);
2a842aca 287 blk_end_request_all(cmd->rq, BLK_STS_OK);
ce2c350b
CH
288 break;
289 case NULL_Q_BIO:
4246a0b6 290 bio_endio(cmd->bio);
48cc661e 291 break;
ce2c350b 292 }
f2298c04 293
48cc661e
JA
294 free_cmd(cmd);
295
cf8ecc5a 296 /* Restart queue if needed, as we are freeing a tag */
48cc661e 297 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
cf8ecc5a
AA
298 unsigned long flags;
299
300 spin_lock_irqsave(q->queue_lock, flags);
48cc661e 301 blk_start_queue_async(q);
cf8ecc5a 302 spin_unlock_irqrestore(q->queue_lock, flags);
f2298c04 303 }
cf8ecc5a
AA
304}
305
306static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
307{
308 end_cmd(container_of(timer, struct nullb_cmd, timer));
f2298c04
JA
309
310 return HRTIMER_NORESTART;
311}
312
313static void null_cmd_end_timer(struct nullb_cmd *cmd)
314{
2984c868 315 ktime_t kt = cmd->nq->dev->completion_nsec;
f2298c04 316
3c395a96 317 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
f2298c04
JA
318}
319
320static void null_softirq_done_fn(struct request *rq)
321{
2984c868
SL
322 struct nullb *nullb = rq->q->queuedata;
323
324 if (nullb->dev->queue_mode == NULL_Q_MQ)
d891fa70
JA
325 end_cmd(blk_mq_rq_to_pdu(rq));
326 else
327 end_cmd(rq->special);
f2298c04
JA
328}
329
f2298c04
JA
330static inline void null_handle_cmd(struct nullb_cmd *cmd)
331{
332 /* Complete IO by inline, softirq or timer */
2984c868 333 switch (cmd->nq->dev->irqmode) {
f2298c04 334 case NULL_IRQ_SOFTIRQ:
2984c868 335 switch (cmd->nq->dev->queue_mode) {
ce2c350b 336 case NULL_Q_MQ:
08e0029a 337 blk_mq_complete_request(cmd->rq);
ce2c350b
CH
338 break;
339 case NULL_Q_RQ:
340 blk_complete_request(cmd->rq);
341 break;
342 case NULL_Q_BIO:
343 /*
344 * XXX: no proper submitting cpu information available.
345 */
346 end_cmd(cmd);
347 break;
348 }
349 break;
350 case NULL_IRQ_NONE:
f2298c04 351 end_cmd(cmd);
f2298c04
JA
352 break;
353 case NULL_IRQ_TIMER:
354 null_cmd_end_timer(cmd);
355 break;
356 }
357}
358
359static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
360{
361 int index = 0;
362
363 if (nullb->nr_queues != 1)
364 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
365
366 return &nullb->queues[index];
367}
368
dece1635 369static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
f2298c04
JA
370{
371 struct nullb *nullb = q->queuedata;
372 struct nullb_queue *nq = nullb_to_queue(nullb);
373 struct nullb_cmd *cmd;
374
375 cmd = alloc_cmd(nq, 1);
376 cmd->bio = bio;
377
378 null_handle_cmd(cmd);
dece1635 379 return BLK_QC_T_NONE;
f2298c04
JA
380}
381
382static int null_rq_prep_fn(struct request_queue *q, struct request *req)
383{
384 struct nullb *nullb = q->queuedata;
385 struct nullb_queue *nq = nullb_to_queue(nullb);
386 struct nullb_cmd *cmd;
387
388 cmd = alloc_cmd(nq, 0);
389 if (cmd) {
390 cmd->rq = req;
391 req->special = cmd;
392 return BLKPREP_OK;
393 }
8b70f45e 394 blk_stop_queue(q);
f2298c04
JA
395
396 return BLKPREP_DEFER;
397}
398
399static void null_request_fn(struct request_queue *q)
400{
401 struct request *rq;
402
403 while ((rq = blk_fetch_request(q)) != NULL) {
404 struct nullb_cmd *cmd = rq->special;
405
406 spin_unlock_irq(q->queue_lock);
407 null_handle_cmd(cmd);
408 spin_lock_irq(q->queue_lock);
409 }
410}
411
fc17b653 412static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
74c45052 413 const struct blk_mq_queue_data *bd)
f2298c04 414{
74c45052 415 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
2984c868 416 struct nullb_queue *nq = hctx->driver_data;
f2298c04 417
db5bcf87
JA
418 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
419
2984c868 420 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
3c395a96
PV
421 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
422 cmd->timer.function = null_cmd_timer_expired;
423 }
74c45052 424 cmd->rq = bd->rq;
2984c868 425 cmd->nq = nq;
f2298c04 426
74c45052 427 blk_mq_start_request(bd->rq);
e2490073 428
f2298c04 429 null_handle_cmd(cmd);
fc17b653 430 return BLK_STS_OK;
f2298c04
JA
431}
432
f363b089 433static const struct blk_mq_ops null_mq_ops = {
f2298c04 434 .queue_rq = null_queue_rq,
ce2c350b 435 .complete = null_softirq_done_fn,
f2298c04
JA
436};
437
de65d2d2
MB
438static void cleanup_queue(struct nullb_queue *nq)
439{
440 kfree(nq->tag_map);
441 kfree(nq->cmds);
442}
443
444static void cleanup_queues(struct nullb *nullb)
445{
446 int i;
447
448 for (i = 0; i < nullb->nr_queues; i++)
449 cleanup_queue(&nullb->queues[i]);
450
451 kfree(nullb->queues);
452}
453
b2b7e001
MB
454#ifdef CONFIG_NVM
455
2a842aca 456static void null_lnvm_end_io(struct request *rq, blk_status_t status)
b2b7e001
MB
457{
458 struct nvm_rq *rqd = rq->end_io_data;
b2b7e001 459
2a842aca
CH
460 /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
461 rqd->error = status ? -EIO : 0;
06894efe 462 nvm_end_io(rqd);
b2b7e001
MB
463
464 blk_put_request(rq);
465}
466
16f26c3a 467static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
b2b7e001 468{
16f26c3a 469 struct request_queue *q = dev->q;
b2b7e001
MB
470 struct request *rq;
471 struct bio *bio = rqd->bio;
472
aebf526b
CH
473 rq = blk_mq_alloc_request(q,
474 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
b2b7e001
MB
475 if (IS_ERR(rq))
476 return -ENOMEM;
477
2644a3cc 478 blk_init_request_from_bio(rq, bio);
b2b7e001
MB
479
480 rq->end_io_data = rqd;
481
482 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
483
484 return 0;
485}
486
16f26c3a 487static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
b2b7e001 488{
2984c868
SL
489 struct nullb *nullb = dev->q->queuedata;
490 sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
5b40db99 491 sector_t blksize;
b2b7e001
MB
492 struct nvm_id_group *grp;
493
494 id->ver_id = 0x1;
495 id->vmnt = 0;
bf643185 496 id->cap = 0x2;
b2b7e001 497 id->dom = 0x1;
5b40db99
MB
498
499 id->ppaf.blk_offset = 0;
500 id->ppaf.blk_len = 16;
501 id->ppaf.pg_offset = 16;
502 id->ppaf.pg_len = 16;
503 id->ppaf.sect_offset = 32;
504 id->ppaf.sect_len = 8;
505 id->ppaf.pln_offset = 40;
506 id->ppaf.pln_len = 8;
507 id->ppaf.lun_offset = 48;
508 id->ppaf.lun_len = 8;
509 id->ppaf.ch_offset = 56;
510 id->ppaf.ch_len = 8;
b2b7e001 511
2984c868 512 sector_div(size, nullb->dev->blocksize); /* convert size to pages */
e93d12ae 513 size >>= 8; /* concert size to pgs pr blk */
19bd6fe7 514 grp = &id->grp;
b2b7e001 515 grp->mtype = 0;
5b40db99 516 grp->fmtype = 0;
b2b7e001 517 grp->num_ch = 1;
b2b7e001 518 grp->num_pg = 256;
5b40db99 519 blksize = size;
e93d12ae 520 size >>= 16;
5b40db99 521 grp->num_lun = size + 1;
e93d12ae 522 sector_div(blksize, grp->num_lun);
5b40db99
MB
523 grp->num_blk = blksize;
524 grp->num_pln = 1;
525
2984c868
SL
526 grp->fpg_sz = nullb->dev->blocksize;
527 grp->csecs = nullb->dev->blocksize;
b2b7e001
MB
528 grp->trdt = 25000;
529 grp->trdm = 25000;
530 grp->tprt = 500000;
531 grp->tprm = 500000;
532 grp->tbet = 1500000;
533 grp->tbem = 1500000;
534 grp->mpos = 0x010101; /* single plane rwe */
2984c868 535 grp->cpar = nullb->dev->hw_queue_depth;
b2b7e001
MB
536
537 return 0;
538}
539
16f26c3a 540static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
b2b7e001
MB
541{
542 mempool_t *virtmem_pool;
543
6bb9535b 544 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
b2b7e001
MB
545 if (!virtmem_pool) {
546 pr_err("null_blk: Unable to create virtual memory pool\n");
547 return NULL;
548 }
549
550 return virtmem_pool;
551}
552
553static void null_lnvm_destroy_dma_pool(void *pool)
554{
555 mempool_destroy(pool);
556}
557
16f26c3a 558static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
b2b7e001
MB
559 gfp_t mem_flags, dma_addr_t *dma_handler)
560{
561 return mempool_alloc(pool, mem_flags);
562}
563
564static void null_lnvm_dev_dma_free(void *pool, void *entry,
565 dma_addr_t dma_handler)
566{
567 mempool_free(entry, pool);
568}
569
570static struct nvm_dev_ops null_lnvm_dev_ops = {
571 .identity = null_lnvm_id,
572 .submit_io = null_lnvm_submit_io,
573
574 .create_dma_pool = null_lnvm_create_dma_pool,
575 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
576 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
577 .dev_dma_free = null_lnvm_dev_dma_free,
578
579 /* Simulate nvme protocol restriction */
580 .max_phys_sect = 64,
581};
9ae2d0aa
MB
582
583static int null_nvm_register(struct nullb *nullb)
584{
b0b4e09c
MB
585 struct nvm_dev *dev;
586 int rv;
587
588 dev = nvm_alloc_dev(0);
589 if (!dev)
590 return -ENOMEM;
591
592 dev->q = nullb->q;
593 memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
594 dev->ops = &null_lnvm_dev_ops;
595
596 rv = nvm_register(dev);
597 if (rv) {
598 kfree(dev);
599 return rv;
600 }
601 nullb->ndev = dev;
602 return 0;
9ae2d0aa
MB
603}
604
605static void null_nvm_unregister(struct nullb *nullb)
606{
b0b4e09c 607 nvm_unregister(nullb->ndev);
9ae2d0aa 608}
b2b7e001 609#else
9ae2d0aa
MB
610static int null_nvm_register(struct nullb *nullb)
611{
92153d30 612 pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
9ae2d0aa
MB
613 return -EINVAL;
614}
615static void null_nvm_unregister(struct nullb *nullb) {}
b2b7e001
MB
616#endif /* CONFIG_NVM */
617
9ae2d0aa
MB
618static void null_del_dev(struct nullb *nullb)
619{
2984c868
SL
620 struct nullb_device *dev = nullb->dev;
621
9ae2d0aa
MB
622 list_del_init(&nullb->list);
623
2984c868 624 if (dev->use_lightnvm)
9ae2d0aa
MB
625 null_nvm_unregister(nullb);
626 else
627 del_gendisk(nullb->disk);
628 blk_cleanup_queue(nullb->q);
2984c868
SL
629 if (dev->queue_mode == NULL_Q_MQ &&
630 nullb->tag_set == &nullb->__tag_set)
82f402fe 631 blk_mq_free_tag_set(nullb->tag_set);
2984c868 632 if (!dev->use_lightnvm)
9ae2d0aa
MB
633 put_disk(nullb->disk);
634 cleanup_queues(nullb);
635 kfree(nullb);
2984c868 636 dev->nullb = NULL;
9ae2d0aa
MB
637}
638
f2298c04
JA
639static int null_open(struct block_device *bdev, fmode_t mode)
640{
641 return 0;
642}
643
644static void null_release(struct gendisk *disk, fmode_t mode)
645{
646}
647
648static const struct block_device_operations null_fops = {
649 .owner = THIS_MODULE,
650 .open = null_open,
651 .release = null_release,
652};
653
82f402fe
JA
654static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
655{
656 BUG_ON(!nullb);
657 BUG_ON(!nq);
658
659 init_waitqueue_head(&nq->wait);
660 nq->queue_depth = nullb->queue_depth;
2984c868 661 nq->dev = nullb->dev;
82f402fe
JA
662}
663
664static void null_init_queues(struct nullb *nullb)
665{
666 struct request_queue *q = nullb->q;
667 struct blk_mq_hw_ctx *hctx;
668 struct nullb_queue *nq;
669 int i;
670
671 queue_for_each_hw_ctx(q, hctx, i) {
672 if (!hctx->nr_ctx || !hctx->tags)
673 continue;
674 nq = &nullb->queues[i];
675 hctx->driver_data = nq;
676 null_init_queue(nullb, nq);
677 nullb->nr_queues++;
678 }
679}
680
f2298c04
JA
681static int setup_commands(struct nullb_queue *nq)
682{
683 struct nullb_cmd *cmd;
684 int i, tag_size;
685
686 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
687 if (!nq->cmds)
2d263a78 688 return -ENOMEM;
f2298c04
JA
689
690 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
691 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
692 if (!nq->tag_map) {
693 kfree(nq->cmds);
2d263a78 694 return -ENOMEM;
f2298c04
JA
695 }
696
697 for (i = 0; i < nq->queue_depth; i++) {
698 cmd = &nq->cmds[i];
699 INIT_LIST_HEAD(&cmd->list);
700 cmd->ll_list.next = NULL;
701 cmd->tag = -1U;
702 }
703
704 return 0;
705}
706
f2298c04
JA
707static int setup_queues(struct nullb *nullb)
708{
2984c868
SL
709 nullb->queues = kzalloc(nullb->dev->submit_queues *
710 sizeof(struct nullb_queue), GFP_KERNEL);
f2298c04 711 if (!nullb->queues)
2d263a78 712 return -ENOMEM;
f2298c04
JA
713
714 nullb->nr_queues = 0;
2984c868 715 nullb->queue_depth = nullb->dev->hw_queue_depth;
f2298c04 716
2d263a78
MB
717 return 0;
718}
719
720static int init_driver_queues(struct nullb *nullb)
721{
722 struct nullb_queue *nq;
723 int i, ret = 0;
f2298c04 724
2984c868 725 for (i = 0; i < nullb->dev->submit_queues; i++) {
f2298c04 726 nq = &nullb->queues[i];
2d263a78
MB
727
728 null_init_queue(nullb, nq);
729
730 ret = setup_commands(nq);
731 if (ret)
31f9690e 732 return ret;
f2298c04
JA
733 nullb->nr_queues++;
734 }
2d263a78 735 return 0;
f2298c04
JA
736}
737
9ae2d0aa 738static int null_gendisk_register(struct nullb *nullb)
f2298c04
JA
739{
740 struct gendisk *disk;
f2298c04 741 sector_t size;
9ae2d0aa 742
2984c868 743 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
9ae2d0aa
MB
744 if (!disk)
745 return -ENOMEM;
2984c868 746 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
9ae2d0aa
MB
747 set_capacity(disk, size >> 9);
748
749 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
750 disk->major = null_major;
751 disk->first_minor = nullb->index;
752 disk->fops = &null_fops;
753 disk->private_data = nullb;
754 disk->queue = nullb->q;
755 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
756
757 add_disk(disk);
758 return 0;
759}
760
2984c868 761static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
82f402fe
JA
762{
763 set->ops = &null_mq_ops;
2984c868
SL
764 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
765 g_submit_queues;
766 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
767 g_hw_queue_depth;
768 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
82f402fe
JA
769 set->cmd_size = sizeof(struct nullb_cmd);
770 set->flags = BLK_MQ_F_SHOULD_MERGE;
771 set->driver_data = NULL;
772
2984c868 773 if (nullb->dev->blocking)
82f402fe
JA
774 set->flags |= BLK_MQ_F_BLOCKING;
775
776 return blk_mq_alloc_tag_set(set);
777}
778
2984c868 779static int null_add_dev(struct nullb_device *dev)
9ae2d0aa
MB
780{
781 struct nullb *nullb;
dc501dc0 782 int rv;
f2298c04 783
2984c868 784 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
dc501dc0
RE
785 if (!nullb) {
786 rv = -ENOMEM;
24d2f903 787 goto out;
dc501dc0 788 }
2984c868
SL
789 nullb->dev = dev;
790 dev->nullb = nullb;
f2298c04
JA
791
792 spin_lock_init(&nullb->lock);
793
dc501dc0
RE
794 rv = setup_queues(nullb);
795 if (rv)
24d2f903 796 goto out_free_nullb;
f2298c04 797
2984c868 798 if (dev->queue_mode == NULL_Q_MQ) {
82f402fe
JA
799 if (shared_tags) {
800 nullb->tag_set = &tag_set;
801 rv = 0;
802 } else {
803 nullb->tag_set = &nullb->__tag_set;
2984c868 804 rv = null_init_tag_set(nullb, nullb->tag_set);
82f402fe
JA
805 }
806
dc501dc0 807 if (rv)
24d2f903
CH
808 goto out_cleanup_queues;
809
82f402fe 810 nullb->q = blk_mq_init_queue(nullb->tag_set);
35b489d3 811 if (IS_ERR(nullb->q)) {
dc501dc0 812 rv = -ENOMEM;
24d2f903 813 goto out_cleanup_tags;
dc501dc0 814 }
82f402fe 815 null_init_queues(nullb);
2984c868
SL
816 } else if (dev->queue_mode == NULL_Q_BIO) {
817 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
dc501dc0
RE
818 if (!nullb->q) {
819 rv = -ENOMEM;
24d2f903 820 goto out_cleanup_queues;
dc501dc0 821 }
f2298c04 822 blk_queue_make_request(nullb->q, null_queue_bio);
31f9690e
JK
823 rv = init_driver_queues(nullb);
824 if (rv)
825 goto out_cleanup_blk_queue;
f2298c04 826 } else {
2984c868
SL
827 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
828 dev->home_node);
dc501dc0
RE
829 if (!nullb->q) {
830 rv = -ENOMEM;
24d2f903 831 goto out_cleanup_queues;
dc501dc0 832 }
f2298c04 833 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
24d2f903 834 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
31f9690e
JK
835 rv = init_driver_queues(nullb);
836 if (rv)
837 goto out_cleanup_blk_queue;
f2298c04
JA
838 }
839
f2298c04
JA
840 nullb->q->queuedata = nullb;
841 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
b277da0a 842 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
f2298c04 843
f2298c04 844 mutex_lock(&lock);
f2298c04
JA
845 nullb->index = nullb_indexes++;
846 mutex_unlock(&lock);
847
2984c868
SL
848 blk_queue_logical_block_size(nullb->q, dev->blocksize);
849 blk_queue_physical_block_size(nullb->q, dev->blocksize);
f2298c04 850
b2b7e001
MB
851 sprintf(nullb->disk_name, "nullb%d", nullb->index);
852
2984c868 853 if (dev->use_lightnvm)
9ae2d0aa
MB
854 rv = null_nvm_register(nullb);
855 else
856 rv = null_gendisk_register(nullb);
b2b7e001 857
9ae2d0aa
MB
858 if (rv)
859 goto out_cleanup_blk_queue;
a514379b
MB
860
861 mutex_lock(&lock);
862 list_add_tail(&nullb->list, &nullb_list);
863 mutex_unlock(&lock);
3681c85d 864
f2298c04 865 return 0;
24d2f903
CH
866out_cleanup_blk_queue:
867 blk_cleanup_queue(nullb->q);
868out_cleanup_tags:
2984c868 869 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
82f402fe 870 blk_mq_free_tag_set(nullb->tag_set);
24d2f903
CH
871out_cleanup_queues:
872 cleanup_queues(nullb);
873out_free_nullb:
874 kfree(nullb);
875out:
2984c868 876 null_free_dev(dev);
dc501dc0 877 return rv;
f2298c04
JA
878}
879
880static int __init null_init(void)
881{
af096e22 882 int ret = 0;
f2298c04 883 unsigned int i;
af096e22 884 struct nullb *nullb;
2984c868 885 struct nullb_device *dev;
f2298c04 886
2984c868 887 if (g_bs > PAGE_SIZE) {
9967d8ac
R
888 pr_warn("null_blk: invalid block size\n");
889 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
2984c868 890 g_bs = PAGE_SIZE;
9967d8ac 891 }
f2298c04 892
2984c868 893 if (g_use_lightnvm && g_bs != 4096) {
6bb9535b
MB
894 pr_warn("null_blk: LightNVM only supports 4k block size\n");
895 pr_warn("null_blk: defaults block size to 4k\n");
2984c868 896 g_bs = 4096;
6bb9535b
MB
897 }
898
2984c868 899 if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
b2b7e001
MB
900 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
901 pr_warn("null_blk: defaults queue mode to blk-mq\n");
2984c868 902 g_queue_mode = NULL_Q_MQ;
b2b7e001
MB
903 }
904
2984c868
SL
905 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
906 if (g_submit_queues != nr_online_nodes) {
558ab300 907 pr_warn("null_blk: submit_queues param is set to %u.\n",
d15ee6b1 908 nr_online_nodes);
2984c868 909 g_submit_queues = nr_online_nodes;
fc1bc354 910 }
2984c868
SL
911 } else if (g_submit_queues > nr_cpu_ids)
912 g_submit_queues = nr_cpu_ids;
913 else if (g_submit_queues <= 0)
914 g_submit_queues = 1;
f2298c04 915
2984c868
SL
916 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
917 ret = null_init_tag_set(NULL, &tag_set);
db2d153d
MG
918 if (ret)
919 return ret;
920 }
921
f2298c04
JA
922 mutex_init(&lock);
923
f2298c04 924 null_major = register_blkdev(0, "nullb");
db2d153d
MG
925 if (null_major < 0) {
926 ret = null_major;
927 goto err_tagset;
928 }
f2298c04 929
2984c868 930 if (g_use_lightnvm) {
6bb9535b
MB
931 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
932 0, 0, NULL);
933 if (!ppa_cache) {
934 pr_err("null_blk: unable to create ppa cache\n");
af096e22
MH
935 ret = -ENOMEM;
936 goto err_ppa;
6bb9535b
MB
937 }
938 }
939
f2298c04 940 for (i = 0; i < nr_devices; i++) {
2984c868
SL
941 dev = null_alloc_dev();
942 if (!dev)
943 goto err_dev;
944 ret = null_add_dev(dev);
945 if (ret) {
946 null_free_dev(dev);
af096e22 947 goto err_dev;
2984c868 948 }
f2298c04
JA
949 }
950
951 pr_info("null: module loaded\n");
952 return 0;
af096e22
MH
953
954err_dev:
955 while (!list_empty(&nullb_list)) {
956 nullb = list_entry(nullb_list.next, struct nullb, list);
2984c868 957 dev = nullb->dev;
af096e22 958 null_del_dev(nullb);
2984c868 959 null_free_dev(dev);
af096e22 960 }
6bb9535b 961 kmem_cache_destroy(ppa_cache);
af096e22
MH
962err_ppa:
963 unregister_blkdev(null_major, "nullb");
db2d153d 964err_tagset:
2984c868 965 if (g_queue_mode == NULL_Q_MQ && shared_tags)
db2d153d 966 blk_mq_free_tag_set(&tag_set);
af096e22 967 return ret;
f2298c04
JA
968}
969
970static void __exit null_exit(void)
971{
972 struct nullb *nullb;
973
974 unregister_blkdev(null_major, "nullb");
975
976 mutex_lock(&lock);
977 while (!list_empty(&nullb_list)) {
2984c868
SL
978 struct nullb_device *dev;
979
f2298c04 980 nullb = list_entry(nullb_list.next, struct nullb, list);
2984c868 981 dev = nullb->dev;
f2298c04 982 null_del_dev(nullb);
2984c868 983 null_free_dev(dev);
f2298c04
JA
984 }
985 mutex_unlock(&lock);
6bb9535b 986
2984c868 987 if (g_queue_mode == NULL_Q_MQ && shared_tags)
82f402fe
JA
988 blk_mq_free_tag_set(&tag_set);
989
6bb9535b 990 kmem_cache_destroy(ppa_cache);
f2298c04
JA
991}
992
993module_init(null_init);
994module_exit(null_exit);
995
996MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
997MODULE_LICENSE("GPL");