]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/null_blk.c
Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[mirror_ubuntu-bionic-kernel.git] / drivers / block / null_blk.c
CommitLineData
f2298c04 1#include <linux/module.h>
fc1bc354 2
f2298c04
JA
3#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
b2b7e001 11#include <linux/lightnvm.h>
f2298c04
JA
12
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
3c395a96 21 struct hrtimer timer;
f2298c04
JA
22};
23
24struct nullb_queue {
25 unsigned long *tag_map;
26 wait_queue_head_t wait;
27 unsigned int queue_depth;
28
29 struct nullb_cmd *cmds;
30};
31
32struct nullb {
33 struct list_head list;
34 unsigned int index;
35 struct request_queue *q;
36 struct gendisk *disk;
b0b4e09c 37 struct nvm_dev *ndev;
24d2f903 38 struct blk_mq_tag_set tag_set;
f2298c04
JA
39 struct hrtimer timer;
40 unsigned int queue_depth;
41 spinlock_t lock;
42
43 struct nullb_queue *queues;
44 unsigned int nr_queues;
b2b7e001 45 char disk_name[DISK_NAME_LEN];
f2298c04
JA
46};
47
48static LIST_HEAD(nullb_list);
49static struct mutex lock;
50static int null_major;
51static int nullb_indexes;
6bb9535b 52static struct kmem_cache *ppa_cache;
f2298c04 53
f2298c04
JA
54enum {
55 NULL_IRQ_NONE = 0,
56 NULL_IRQ_SOFTIRQ = 1,
57 NULL_IRQ_TIMER = 2,
ce2c350b 58};
f2298c04 59
ce2c350b 60enum {
f2298c04
JA
61 NULL_Q_BIO = 0,
62 NULL_Q_RQ = 1,
63 NULL_Q_MQ = 2,
64};
65
2d263a78 66static int submit_queues;
f2298c04
JA
67module_param(submit_queues, int, S_IRUGO);
68MODULE_PARM_DESC(submit_queues, "Number of submission queues");
69
70static int home_node = NUMA_NO_NODE;
71module_param(home_node, int, S_IRUGO);
72MODULE_PARM_DESC(home_node, "Home node for the device");
73
74static int queue_mode = NULL_Q_MQ;
709c8667
MB
75
76static int null_param_store_val(const char *str, int *val, int min, int max)
77{
78 int ret, new_val;
79
80 ret = kstrtoint(str, 10, &new_val);
81 if (ret)
82 return -EINVAL;
83
84 if (new_val < min || new_val > max)
85 return -EINVAL;
86
87 *val = new_val;
88 return 0;
89}
90
91static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
92{
93 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
94}
95
9c27847d 96static const struct kernel_param_ops null_queue_mode_param_ops = {
709c8667
MB
97 .set = null_set_queue_mode,
98 .get = param_get_int,
99};
100
101device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
54ae81cd 102MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
f2298c04
JA
103
104static int gb = 250;
105module_param(gb, int, S_IRUGO);
106MODULE_PARM_DESC(gb, "Size in GB");
107
108static int bs = 512;
109module_param(bs, int, S_IRUGO);
110MODULE_PARM_DESC(bs, "Block size (in bytes)");
111
112static int nr_devices = 2;
113module_param(nr_devices, int, S_IRUGO);
114MODULE_PARM_DESC(nr_devices, "Number of devices to register");
115
b2b7e001
MB
116static bool use_lightnvm;
117module_param(use_lightnvm, bool, S_IRUGO);
118MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
119
db5bcf87
JA
120static bool blocking;
121module_param(blocking, bool, S_IRUGO);
122MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
123
f2298c04 124static int irqmode = NULL_IRQ_SOFTIRQ;
709c8667
MB
125
126static int null_set_irqmode(const char *str, const struct kernel_param *kp)
127{
128 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
129 NULL_IRQ_TIMER);
130}
131
9c27847d 132static const struct kernel_param_ops null_irqmode_param_ops = {
709c8667
MB
133 .set = null_set_irqmode,
134 .get = param_get_int,
135};
136
137device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
f2298c04
JA
138MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
139
dbac1175
AA
140static unsigned long completion_nsec = 10000;
141module_param(completion_nsec, ulong, S_IRUGO);
f2298c04
JA
142MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
143
144static int hw_queue_depth = 64;
145module_param(hw_queue_depth, int, S_IRUGO);
146MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
147
20005244 148static bool use_per_node_hctx = false;
f2298c04 149module_param(use_per_node_hctx, bool, S_IRUGO);
20005244 150MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
f2298c04
JA
151
152static void put_tag(struct nullb_queue *nq, unsigned int tag)
153{
154 clear_bit_unlock(tag, nq->tag_map);
155
156 if (waitqueue_active(&nq->wait))
157 wake_up(&nq->wait);
158}
159
160static unsigned int get_tag(struct nullb_queue *nq)
161{
162 unsigned int tag;
163
164 do {
165 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
166 if (tag >= nq->queue_depth)
167 return -1U;
168 } while (test_and_set_bit_lock(tag, nq->tag_map));
169
170 return tag;
171}
172
173static void free_cmd(struct nullb_cmd *cmd)
174{
175 put_tag(cmd->nq, cmd->tag);
176}
177
3c395a96
PV
178static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
179
f2298c04
JA
180static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
181{
182 struct nullb_cmd *cmd;
183 unsigned int tag;
184
185 tag = get_tag(nq);
186 if (tag != -1U) {
187 cmd = &nq->cmds[tag];
188 cmd->tag = tag;
189 cmd->nq = nq;
3c395a96
PV
190 if (irqmode == NULL_IRQ_TIMER) {
191 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
192 HRTIMER_MODE_REL);
193 cmd->timer.function = null_cmd_timer_expired;
194 }
f2298c04
JA
195 return cmd;
196 }
197
198 return NULL;
199}
200
201static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
202{
203 struct nullb_cmd *cmd;
204 DEFINE_WAIT(wait);
205
206 cmd = __alloc_cmd(nq);
207 if (cmd || !can_wait)
208 return cmd;
209
210 do {
211 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
212 cmd = __alloc_cmd(nq);
213 if (cmd)
214 break;
215
216 io_schedule();
217 } while (1);
218
219 finish_wait(&nq->wait, &wait);
220 return cmd;
221}
222
223static void end_cmd(struct nullb_cmd *cmd)
224{
cf8ecc5a
AA
225 struct request_queue *q = NULL;
226
e8271201
MK
227 if (cmd->rq)
228 q = cmd->rq->q;
229
ce2c350b
CH
230 switch (queue_mode) {
231 case NULL_Q_MQ:
c8a446ad 232 blk_mq_end_request(cmd->rq, 0);
ce2c350b
CH
233 return;
234 case NULL_Q_RQ:
235 INIT_LIST_HEAD(&cmd->rq->queuelist);
236 blk_end_request_all(cmd->rq, 0);
237 break;
238 case NULL_Q_BIO:
4246a0b6 239 bio_endio(cmd->bio);
48cc661e 240 break;
ce2c350b 241 }
f2298c04 242
48cc661e
JA
243 free_cmd(cmd);
244
cf8ecc5a 245 /* Restart queue if needed, as we are freeing a tag */
48cc661e 246 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
cf8ecc5a
AA
247 unsigned long flags;
248
249 spin_lock_irqsave(q->queue_lock, flags);
48cc661e 250 blk_start_queue_async(q);
cf8ecc5a 251 spin_unlock_irqrestore(q->queue_lock, flags);
f2298c04 252 }
cf8ecc5a
AA
253}
254
255static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
256{
257 end_cmd(container_of(timer, struct nullb_cmd, timer));
f2298c04
JA
258
259 return HRTIMER_NORESTART;
260}
261
262static void null_cmd_end_timer(struct nullb_cmd *cmd)
263{
8b0e1953 264 ktime_t kt = completion_nsec;
f2298c04 265
3c395a96 266 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
f2298c04
JA
267}
268
269static void null_softirq_done_fn(struct request *rq)
270{
d891fa70
JA
271 if (queue_mode == NULL_Q_MQ)
272 end_cmd(blk_mq_rq_to_pdu(rq));
273 else
274 end_cmd(rq->special);
f2298c04
JA
275}
276
f2298c04
JA
277static inline void null_handle_cmd(struct nullb_cmd *cmd)
278{
279 /* Complete IO by inline, softirq or timer */
280 switch (irqmode) {
f2298c04 281 case NULL_IRQ_SOFTIRQ:
ce2c350b
CH
282 switch (queue_mode) {
283 case NULL_Q_MQ:
08e0029a 284 blk_mq_complete_request(cmd->rq);
ce2c350b
CH
285 break;
286 case NULL_Q_RQ:
287 blk_complete_request(cmd->rq);
288 break;
289 case NULL_Q_BIO:
290 /*
291 * XXX: no proper submitting cpu information available.
292 */
293 end_cmd(cmd);
294 break;
295 }
296 break;
297 case NULL_IRQ_NONE:
f2298c04 298 end_cmd(cmd);
f2298c04
JA
299 break;
300 case NULL_IRQ_TIMER:
301 null_cmd_end_timer(cmd);
302 break;
303 }
304}
305
306static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
307{
308 int index = 0;
309
310 if (nullb->nr_queues != 1)
311 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
312
313 return &nullb->queues[index];
314}
315
dece1635 316static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
f2298c04
JA
317{
318 struct nullb *nullb = q->queuedata;
319 struct nullb_queue *nq = nullb_to_queue(nullb);
320 struct nullb_cmd *cmd;
321
322 cmd = alloc_cmd(nq, 1);
323 cmd->bio = bio;
324
325 null_handle_cmd(cmd);
dece1635 326 return BLK_QC_T_NONE;
f2298c04
JA
327}
328
329static int null_rq_prep_fn(struct request_queue *q, struct request *req)
330{
331 struct nullb *nullb = q->queuedata;
332 struct nullb_queue *nq = nullb_to_queue(nullb);
333 struct nullb_cmd *cmd;
334
335 cmd = alloc_cmd(nq, 0);
336 if (cmd) {
337 cmd->rq = req;
338 req->special = cmd;
339 return BLKPREP_OK;
340 }
8b70f45e 341 blk_stop_queue(q);
f2298c04
JA
342
343 return BLKPREP_DEFER;
344}
345
346static void null_request_fn(struct request_queue *q)
347{
348 struct request *rq;
349
350 while ((rq = blk_fetch_request(q)) != NULL) {
351 struct nullb_cmd *cmd = rq->special;
352
353 spin_unlock_irq(q->queue_lock);
354 null_handle_cmd(cmd);
355 spin_lock_irq(q->queue_lock);
356 }
357}
358
74c45052
JA
359static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
360 const struct blk_mq_queue_data *bd)
f2298c04 361{
74c45052 362 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
f2298c04 363
db5bcf87
JA
364 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
365
3c395a96
PV
366 if (irqmode == NULL_IRQ_TIMER) {
367 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
368 cmd->timer.function = null_cmd_timer_expired;
369 }
74c45052 370 cmd->rq = bd->rq;
f2298c04
JA
371 cmd->nq = hctx->driver_data;
372
74c45052 373 blk_mq_start_request(bd->rq);
e2490073 374
f2298c04
JA
375 null_handle_cmd(cmd);
376 return BLK_MQ_RQ_QUEUE_OK;
377}
378
2d263a78
MB
379static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
380{
381 BUG_ON(!nullb);
382 BUG_ON(!nq);
383
384 init_waitqueue_head(&nq->wait);
385 nq->queue_depth = nullb->queue_depth;
386}
387
f2298c04
JA
388static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
389 unsigned int index)
390{
391 struct nullb *nullb = data;
392 struct nullb_queue *nq = &nullb->queues[index];
393
f2298c04 394 hctx->driver_data = nq;
2d263a78
MB
395 null_init_queue(nullb, nq);
396 nullb->nr_queues++;
f2298c04
JA
397
398 return 0;
399}
400
f363b089 401static const struct blk_mq_ops null_mq_ops = {
f2298c04 402 .queue_rq = null_queue_rq,
f2298c04 403 .init_hctx = null_init_hctx,
ce2c350b 404 .complete = null_softirq_done_fn,
f2298c04
JA
405};
406
de65d2d2
MB
407static void cleanup_queue(struct nullb_queue *nq)
408{
409 kfree(nq->tag_map);
410 kfree(nq->cmds);
411}
412
413static void cleanup_queues(struct nullb *nullb)
414{
415 int i;
416
417 for (i = 0; i < nullb->nr_queues; i++)
418 cleanup_queue(&nullb->queues[i]);
419
420 kfree(nullb->queues);
421}
422
b2b7e001
MB
423#ifdef CONFIG_NVM
424
425static void null_lnvm_end_io(struct request *rq, int error)
426{
427 struct nvm_rq *rqd = rq->end_io_data;
b2b7e001 428
06894efe
MB
429 rqd->error = error;
430 nvm_end_io(rqd);
b2b7e001
MB
431
432 blk_put_request(rq);
433}
434
16f26c3a 435static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
b2b7e001 436{
16f26c3a 437 struct request_queue *q = dev->q;
b2b7e001
MB
438 struct request *rq;
439 struct bio *bio = rqd->bio;
440
aebf526b
CH
441 rq = blk_mq_alloc_request(q,
442 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
b2b7e001
MB
443 if (IS_ERR(rq))
444 return -ENOMEM;
445
2644a3cc 446 blk_init_request_from_bio(rq, bio);
b2b7e001
MB
447
448 rq->end_io_data = rqd;
449
450 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
451
452 return 0;
453}
454
16f26c3a 455static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
b2b7e001
MB
456{
457 sector_t size = gb * 1024 * 1024 * 1024ULL;
5b40db99 458 sector_t blksize;
b2b7e001
MB
459 struct nvm_id_group *grp;
460
461 id->ver_id = 0x1;
462 id->vmnt = 0;
bf643185 463 id->cap = 0x2;
b2b7e001 464 id->dom = 0x1;
5b40db99
MB
465
466 id->ppaf.blk_offset = 0;
467 id->ppaf.blk_len = 16;
468 id->ppaf.pg_offset = 16;
469 id->ppaf.pg_len = 16;
470 id->ppaf.sect_offset = 32;
471 id->ppaf.sect_len = 8;
472 id->ppaf.pln_offset = 40;
473 id->ppaf.pln_len = 8;
474 id->ppaf.lun_offset = 48;
475 id->ppaf.lun_len = 8;
476 id->ppaf.ch_offset = 56;
477 id->ppaf.ch_len = 8;
b2b7e001 478
e93d12ae
AB
479 sector_div(size, bs); /* convert size to pages */
480 size >>= 8; /* concert size to pgs pr blk */
19bd6fe7 481 grp = &id->grp;
b2b7e001 482 grp->mtype = 0;
5b40db99 483 grp->fmtype = 0;
b2b7e001 484 grp->num_ch = 1;
b2b7e001 485 grp->num_pg = 256;
5b40db99 486 blksize = size;
e93d12ae 487 size >>= 16;
5b40db99 488 grp->num_lun = size + 1;
e93d12ae 489 sector_div(blksize, grp->num_lun);
5b40db99
MB
490 grp->num_blk = blksize;
491 grp->num_pln = 1;
492
b2b7e001
MB
493 grp->fpg_sz = bs;
494 grp->csecs = bs;
495 grp->trdt = 25000;
496 grp->trdm = 25000;
497 grp->tprt = 500000;
498 grp->tprm = 500000;
499 grp->tbet = 1500000;
500 grp->tbem = 1500000;
501 grp->mpos = 0x010101; /* single plane rwe */
502 grp->cpar = hw_queue_depth;
503
504 return 0;
505}
506
16f26c3a 507static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
b2b7e001
MB
508{
509 mempool_t *virtmem_pool;
510
6bb9535b 511 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
b2b7e001
MB
512 if (!virtmem_pool) {
513 pr_err("null_blk: Unable to create virtual memory pool\n");
514 return NULL;
515 }
516
517 return virtmem_pool;
518}
519
520static void null_lnvm_destroy_dma_pool(void *pool)
521{
522 mempool_destroy(pool);
523}
524
16f26c3a 525static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
b2b7e001
MB
526 gfp_t mem_flags, dma_addr_t *dma_handler)
527{
528 return mempool_alloc(pool, mem_flags);
529}
530
531static void null_lnvm_dev_dma_free(void *pool, void *entry,
532 dma_addr_t dma_handler)
533{
534 mempool_free(entry, pool);
535}
536
537static struct nvm_dev_ops null_lnvm_dev_ops = {
538 .identity = null_lnvm_id,
539 .submit_io = null_lnvm_submit_io,
540
541 .create_dma_pool = null_lnvm_create_dma_pool,
542 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
543 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
544 .dev_dma_free = null_lnvm_dev_dma_free,
545
546 /* Simulate nvme protocol restriction */
547 .max_phys_sect = 64,
548};
9ae2d0aa
MB
549
550static int null_nvm_register(struct nullb *nullb)
551{
b0b4e09c
MB
552 struct nvm_dev *dev;
553 int rv;
554
555 dev = nvm_alloc_dev(0);
556 if (!dev)
557 return -ENOMEM;
558
559 dev->q = nullb->q;
560 memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
561 dev->ops = &null_lnvm_dev_ops;
562
563 rv = nvm_register(dev);
564 if (rv) {
565 kfree(dev);
566 return rv;
567 }
568 nullb->ndev = dev;
569 return 0;
9ae2d0aa
MB
570}
571
572static void null_nvm_unregister(struct nullb *nullb)
573{
b0b4e09c 574 nvm_unregister(nullb->ndev);
9ae2d0aa 575}
b2b7e001 576#else
9ae2d0aa
MB
577static int null_nvm_register(struct nullb *nullb)
578{
92153d30 579 pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
9ae2d0aa
MB
580 return -EINVAL;
581}
582static void null_nvm_unregister(struct nullb *nullb) {}
b2b7e001
MB
583#endif /* CONFIG_NVM */
584
9ae2d0aa
MB
585static void null_del_dev(struct nullb *nullb)
586{
587 list_del_init(&nullb->list);
588
589 if (use_lightnvm)
590 null_nvm_unregister(nullb);
591 else
592 del_gendisk(nullb->disk);
593 blk_cleanup_queue(nullb->q);
594 if (queue_mode == NULL_Q_MQ)
595 blk_mq_free_tag_set(&nullb->tag_set);
596 if (!use_lightnvm)
597 put_disk(nullb->disk);
598 cleanup_queues(nullb);
599 kfree(nullb);
600}
601
f2298c04
JA
602static int null_open(struct block_device *bdev, fmode_t mode)
603{
604 return 0;
605}
606
607static void null_release(struct gendisk *disk, fmode_t mode)
608{
609}
610
611static const struct block_device_operations null_fops = {
612 .owner = THIS_MODULE,
613 .open = null_open,
614 .release = null_release,
615};
616
617static int setup_commands(struct nullb_queue *nq)
618{
619 struct nullb_cmd *cmd;
620 int i, tag_size;
621
622 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
623 if (!nq->cmds)
2d263a78 624 return -ENOMEM;
f2298c04
JA
625
626 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
627 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
628 if (!nq->tag_map) {
629 kfree(nq->cmds);
2d263a78 630 return -ENOMEM;
f2298c04
JA
631 }
632
633 for (i = 0; i < nq->queue_depth; i++) {
634 cmd = &nq->cmds[i];
635 INIT_LIST_HEAD(&cmd->list);
636 cmd->ll_list.next = NULL;
637 cmd->tag = -1U;
638 }
639
640 return 0;
641}
642
f2298c04
JA
643static int setup_queues(struct nullb *nullb)
644{
2d263a78
MB
645 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
646 GFP_KERNEL);
f2298c04 647 if (!nullb->queues)
2d263a78 648 return -ENOMEM;
f2298c04
JA
649
650 nullb->nr_queues = 0;
651 nullb->queue_depth = hw_queue_depth;
652
2d263a78
MB
653 return 0;
654}
655
656static int init_driver_queues(struct nullb *nullb)
657{
658 struct nullb_queue *nq;
659 int i, ret = 0;
f2298c04
JA
660
661 for (i = 0; i < submit_queues; i++) {
662 nq = &nullb->queues[i];
2d263a78
MB
663
664 null_init_queue(nullb, nq);
665
666 ret = setup_commands(nq);
667 if (ret)
31f9690e 668 return ret;
f2298c04
JA
669 nullb->nr_queues++;
670 }
2d263a78 671 return 0;
f2298c04
JA
672}
673
9ae2d0aa 674static int null_gendisk_register(struct nullb *nullb)
f2298c04
JA
675{
676 struct gendisk *disk;
f2298c04 677 sector_t size;
9ae2d0aa
MB
678
679 disk = nullb->disk = alloc_disk_node(1, home_node);
680 if (!disk)
681 return -ENOMEM;
682 size = gb * 1024 * 1024 * 1024ULL;
683 set_capacity(disk, size >> 9);
684
685 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
686 disk->major = null_major;
687 disk->first_minor = nullb->index;
688 disk->fops = &null_fops;
689 disk->private_data = nullb;
690 disk->queue = nullb->q;
691 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
692
693 add_disk(disk);
694 return 0;
695}
696
697static int null_add_dev(void)
698{
699 struct nullb *nullb;
dc501dc0 700 int rv;
f2298c04
JA
701
702 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
dc501dc0
RE
703 if (!nullb) {
704 rv = -ENOMEM;
24d2f903 705 goto out;
dc501dc0 706 }
f2298c04
JA
707
708 spin_lock_init(&nullb->lock);
709
57053d8c
MB
710 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
711 submit_queues = nr_online_nodes;
712
dc501dc0
RE
713 rv = setup_queues(nullb);
714 if (rv)
24d2f903 715 goto out_free_nullb;
f2298c04
JA
716
717 if (queue_mode == NULL_Q_MQ) {
cdef54dd 718 nullb->tag_set.ops = &null_mq_ops;
24d2f903
CH
719 nullb->tag_set.nr_hw_queues = submit_queues;
720 nullb->tag_set.queue_depth = hw_queue_depth;
721 nullb->tag_set.numa_node = home_node;
722 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
723 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
724 nullb->tag_set.driver_data = nullb;
725
db5bcf87
JA
726 if (blocking)
727 nullb->tag_set.flags |= BLK_MQ_F_BLOCKING;
728
dc501dc0
RE
729 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
730 if (rv)
24d2f903
CH
731 goto out_cleanup_queues;
732
733 nullb->q = blk_mq_init_queue(&nullb->tag_set);
35b489d3 734 if (IS_ERR(nullb->q)) {
dc501dc0 735 rv = -ENOMEM;
24d2f903 736 goto out_cleanup_tags;
dc501dc0 737 }
f2298c04
JA
738 } else if (queue_mode == NULL_Q_BIO) {
739 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
dc501dc0
RE
740 if (!nullb->q) {
741 rv = -ENOMEM;
24d2f903 742 goto out_cleanup_queues;
dc501dc0 743 }
f2298c04 744 blk_queue_make_request(nullb->q, null_queue_bio);
31f9690e
JK
745 rv = init_driver_queues(nullb);
746 if (rv)
747 goto out_cleanup_blk_queue;
f2298c04
JA
748 } else {
749 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
dc501dc0
RE
750 if (!nullb->q) {
751 rv = -ENOMEM;
24d2f903 752 goto out_cleanup_queues;
dc501dc0 753 }
f2298c04 754 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
24d2f903 755 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
31f9690e
JK
756 rv = init_driver_queues(nullb);
757 if (rv)
758 goto out_cleanup_blk_queue;
f2298c04
JA
759 }
760
f2298c04
JA
761 nullb->q->queuedata = nullb;
762 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
b277da0a 763 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
f2298c04 764
f2298c04 765 mutex_lock(&lock);
f2298c04
JA
766 nullb->index = nullb_indexes++;
767 mutex_unlock(&lock);
768
769 blk_queue_logical_block_size(nullb->q, bs);
770 blk_queue_physical_block_size(nullb->q, bs);
771
b2b7e001
MB
772 sprintf(nullb->disk_name, "nullb%d", nullb->index);
773
9ae2d0aa
MB
774 if (use_lightnvm)
775 rv = null_nvm_register(nullb);
776 else
777 rv = null_gendisk_register(nullb);
b2b7e001 778
9ae2d0aa
MB
779 if (rv)
780 goto out_cleanup_blk_queue;
a514379b
MB
781
782 mutex_lock(&lock);
783 list_add_tail(&nullb->list, &nullb_list);
784 mutex_unlock(&lock);
3681c85d 785
f2298c04 786 return 0;
24d2f903
CH
787out_cleanup_blk_queue:
788 blk_cleanup_queue(nullb->q);
789out_cleanup_tags:
790 if (queue_mode == NULL_Q_MQ)
791 blk_mq_free_tag_set(&nullb->tag_set);
792out_cleanup_queues:
793 cleanup_queues(nullb);
794out_free_nullb:
795 kfree(nullb);
796out:
dc501dc0 797 return rv;
f2298c04
JA
798}
799
800static int __init null_init(void)
801{
af096e22 802 int ret = 0;
f2298c04 803 unsigned int i;
af096e22 804 struct nullb *nullb;
f2298c04 805
9967d8ac
R
806 if (bs > PAGE_SIZE) {
807 pr_warn("null_blk: invalid block size\n");
808 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
809 bs = PAGE_SIZE;
810 }
f2298c04 811
6bb9535b
MB
812 if (use_lightnvm && bs != 4096) {
813 pr_warn("null_blk: LightNVM only supports 4k block size\n");
814 pr_warn("null_blk: defaults block size to 4k\n");
815 bs = 4096;
816 }
817
b2b7e001
MB
818 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
819 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
820 pr_warn("null_blk: defaults queue mode to blk-mq\n");
821 queue_mode = NULL_Q_MQ;
822 }
823
d15ee6b1 824 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
fc1bc354 825 if (submit_queues < nr_online_nodes) {
d15ee6b1
MB
826 pr_warn("null_blk: submit_queues param is set to %u.",
827 nr_online_nodes);
fc1bc354
MB
828 submit_queues = nr_online_nodes;
829 }
d15ee6b1 830 } else if (submit_queues > nr_cpu_ids)
f2298c04
JA
831 submit_queues = nr_cpu_ids;
832 else if (!submit_queues)
833 submit_queues = 1;
834
835 mutex_init(&lock);
836
f2298c04
JA
837 null_major = register_blkdev(0, "nullb");
838 if (null_major < 0)
839 return null_major;
840
6bb9535b
MB
841 if (use_lightnvm) {
842 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
843 0, 0, NULL);
844 if (!ppa_cache) {
845 pr_err("null_blk: unable to create ppa cache\n");
af096e22
MH
846 ret = -ENOMEM;
847 goto err_ppa;
6bb9535b
MB
848 }
849 }
850
f2298c04 851 for (i = 0; i < nr_devices; i++) {
af096e22
MH
852 ret = null_add_dev();
853 if (ret)
854 goto err_dev;
f2298c04
JA
855 }
856
857 pr_info("null: module loaded\n");
858 return 0;
af096e22
MH
859
860err_dev:
861 while (!list_empty(&nullb_list)) {
862 nullb = list_entry(nullb_list.next, struct nullb, list);
863 null_del_dev(nullb);
864 }
6bb9535b 865 kmem_cache_destroy(ppa_cache);
af096e22
MH
866err_ppa:
867 unregister_blkdev(null_major, "nullb");
868 return ret;
f2298c04
JA
869}
870
871static void __exit null_exit(void)
872{
873 struct nullb *nullb;
874
875 unregister_blkdev(null_major, "nullb");
876
877 mutex_lock(&lock);
878 while (!list_empty(&nullb_list)) {
879 nullb = list_entry(nullb_list.next, struct nullb, list);
880 null_del_dev(nullb);
881 }
882 mutex_unlock(&lock);
6bb9535b
MB
883
884 kmem_cache_destroy(ppa_cache);
f2298c04
JA
885}
886
887module_init(null_init);
888module_exit(null_exit);
889
890MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
891MODULE_LICENSE("GPL");