]>
Commit | Line | Data |
---|---|---|
f2298c04 | 1 | #include <linux/module.h> |
fc1bc354 | 2 | |
f2298c04 JA |
3 | #include <linux/moduleparam.h> |
4 | #include <linux/sched.h> | |
5 | #include <linux/fs.h> | |
6 | #include <linux/blkdev.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/blk-mq.h> | |
10 | #include <linux/hrtimer.h> | |
b2b7e001 | 11 | #include <linux/lightnvm.h> |
f2298c04 JA |
12 | |
13 | struct nullb_cmd { | |
14 | struct list_head list; | |
15 | struct llist_node ll_list; | |
16 | struct call_single_data csd; | |
17 | struct request *rq; | |
18 | struct bio *bio; | |
19 | unsigned int tag; | |
20 | struct nullb_queue *nq; | |
3c395a96 | 21 | struct hrtimer timer; |
f2298c04 JA |
22 | }; |
23 | ||
24 | struct nullb_queue { | |
25 | unsigned long *tag_map; | |
26 | wait_queue_head_t wait; | |
27 | unsigned int queue_depth; | |
28 | ||
29 | struct nullb_cmd *cmds; | |
30 | }; | |
31 | ||
32 | struct nullb { | |
33 | struct list_head list; | |
34 | unsigned int index; | |
35 | struct request_queue *q; | |
36 | struct gendisk *disk; | |
24d2f903 | 37 | struct blk_mq_tag_set tag_set; |
f2298c04 JA |
38 | struct hrtimer timer; |
39 | unsigned int queue_depth; | |
40 | spinlock_t lock; | |
41 | ||
42 | struct nullb_queue *queues; | |
43 | unsigned int nr_queues; | |
b2b7e001 | 44 | char disk_name[DISK_NAME_LEN]; |
f2298c04 JA |
45 | }; |
46 | ||
47 | static LIST_HEAD(nullb_list); | |
48 | static struct mutex lock; | |
49 | static int null_major; | |
50 | static int nullb_indexes; | |
6bb9535b | 51 | static struct kmem_cache *ppa_cache; |
f2298c04 | 52 | |
f2298c04 JA |
53 | enum { |
54 | NULL_IRQ_NONE = 0, | |
55 | NULL_IRQ_SOFTIRQ = 1, | |
56 | NULL_IRQ_TIMER = 2, | |
ce2c350b | 57 | }; |
f2298c04 | 58 | |
ce2c350b | 59 | enum { |
f2298c04 JA |
60 | NULL_Q_BIO = 0, |
61 | NULL_Q_RQ = 1, | |
62 | NULL_Q_MQ = 2, | |
63 | }; | |
64 | ||
2d263a78 | 65 | static int submit_queues; |
f2298c04 JA |
66 | module_param(submit_queues, int, S_IRUGO); |
67 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | |
68 | ||
69 | static int home_node = NUMA_NO_NODE; | |
70 | module_param(home_node, int, S_IRUGO); | |
71 | MODULE_PARM_DESC(home_node, "Home node for the device"); | |
72 | ||
73 | static int queue_mode = NULL_Q_MQ; | |
709c8667 MB |
74 | |
75 | static int null_param_store_val(const char *str, int *val, int min, int max) | |
76 | { | |
77 | int ret, new_val; | |
78 | ||
79 | ret = kstrtoint(str, 10, &new_val); | |
80 | if (ret) | |
81 | return -EINVAL; | |
82 | ||
83 | if (new_val < min || new_val > max) | |
84 | return -EINVAL; | |
85 | ||
86 | *val = new_val; | |
87 | return 0; | |
88 | } | |
89 | ||
90 | static int null_set_queue_mode(const char *str, const struct kernel_param *kp) | |
91 | { | |
92 | return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ); | |
93 | } | |
94 | ||
9c27847d | 95 | static const struct kernel_param_ops null_queue_mode_param_ops = { |
709c8667 MB |
96 | .set = null_set_queue_mode, |
97 | .get = param_get_int, | |
98 | }; | |
99 | ||
100 | device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO); | |
54ae81cd | 101 | MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); |
f2298c04 JA |
102 | |
103 | static int gb = 250; | |
104 | module_param(gb, int, S_IRUGO); | |
105 | MODULE_PARM_DESC(gb, "Size in GB"); | |
106 | ||
107 | static int bs = 512; | |
108 | module_param(bs, int, S_IRUGO); | |
109 | MODULE_PARM_DESC(bs, "Block size (in bytes)"); | |
110 | ||
111 | static int nr_devices = 2; | |
112 | module_param(nr_devices, int, S_IRUGO); | |
113 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); | |
114 | ||
b2b7e001 MB |
115 | static bool use_lightnvm; |
116 | module_param(use_lightnvm, bool, S_IRUGO); | |
117 | MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); | |
118 | ||
f2298c04 | 119 | static int irqmode = NULL_IRQ_SOFTIRQ; |
709c8667 MB |
120 | |
121 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) | |
122 | { | |
123 | return null_param_store_val(str, &irqmode, NULL_IRQ_NONE, | |
124 | NULL_IRQ_TIMER); | |
125 | } | |
126 | ||
9c27847d | 127 | static const struct kernel_param_ops null_irqmode_param_ops = { |
709c8667 MB |
128 | .set = null_set_irqmode, |
129 | .get = param_get_int, | |
130 | }; | |
131 | ||
132 | device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); | |
f2298c04 JA |
133 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); |
134 | ||
dbac1175 AA |
135 | static unsigned long completion_nsec = 10000; |
136 | module_param(completion_nsec, ulong, S_IRUGO); | |
f2298c04 JA |
137 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); |
138 | ||
139 | static int hw_queue_depth = 64; | |
140 | module_param(hw_queue_depth, int, S_IRUGO); | |
141 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | |
142 | ||
20005244 | 143 | static bool use_per_node_hctx = false; |
f2298c04 | 144 | module_param(use_per_node_hctx, bool, S_IRUGO); |
20005244 | 145 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
f2298c04 JA |
146 | |
147 | static void put_tag(struct nullb_queue *nq, unsigned int tag) | |
148 | { | |
149 | clear_bit_unlock(tag, nq->tag_map); | |
150 | ||
151 | if (waitqueue_active(&nq->wait)) | |
152 | wake_up(&nq->wait); | |
153 | } | |
154 | ||
155 | static unsigned int get_tag(struct nullb_queue *nq) | |
156 | { | |
157 | unsigned int tag; | |
158 | ||
159 | do { | |
160 | tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); | |
161 | if (tag >= nq->queue_depth) | |
162 | return -1U; | |
163 | } while (test_and_set_bit_lock(tag, nq->tag_map)); | |
164 | ||
165 | return tag; | |
166 | } | |
167 | ||
168 | static void free_cmd(struct nullb_cmd *cmd) | |
169 | { | |
170 | put_tag(cmd->nq, cmd->tag); | |
171 | } | |
172 | ||
3c395a96 PV |
173 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); |
174 | ||
f2298c04 JA |
175 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) |
176 | { | |
177 | struct nullb_cmd *cmd; | |
178 | unsigned int tag; | |
179 | ||
180 | tag = get_tag(nq); | |
181 | if (tag != -1U) { | |
182 | cmd = &nq->cmds[tag]; | |
183 | cmd->tag = tag; | |
184 | cmd->nq = nq; | |
3c395a96 PV |
185 | if (irqmode == NULL_IRQ_TIMER) { |
186 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, | |
187 | HRTIMER_MODE_REL); | |
188 | cmd->timer.function = null_cmd_timer_expired; | |
189 | } | |
f2298c04 JA |
190 | return cmd; |
191 | } | |
192 | ||
193 | return NULL; | |
194 | } | |
195 | ||
196 | static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) | |
197 | { | |
198 | struct nullb_cmd *cmd; | |
199 | DEFINE_WAIT(wait); | |
200 | ||
201 | cmd = __alloc_cmd(nq); | |
202 | if (cmd || !can_wait) | |
203 | return cmd; | |
204 | ||
205 | do { | |
206 | prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); | |
207 | cmd = __alloc_cmd(nq); | |
208 | if (cmd) | |
209 | break; | |
210 | ||
211 | io_schedule(); | |
212 | } while (1); | |
213 | ||
214 | finish_wait(&nq->wait, &wait); | |
215 | return cmd; | |
216 | } | |
217 | ||
218 | static void end_cmd(struct nullb_cmd *cmd) | |
219 | { | |
cf8ecc5a AA |
220 | struct request_queue *q = NULL; |
221 | ||
e8271201 MK |
222 | if (cmd->rq) |
223 | q = cmd->rq->q; | |
224 | ||
ce2c350b CH |
225 | switch (queue_mode) { |
226 | case NULL_Q_MQ: | |
c8a446ad | 227 | blk_mq_end_request(cmd->rq, 0); |
ce2c350b CH |
228 | return; |
229 | case NULL_Q_RQ: | |
230 | INIT_LIST_HEAD(&cmd->rq->queuelist); | |
231 | blk_end_request_all(cmd->rq, 0); | |
232 | break; | |
233 | case NULL_Q_BIO: | |
4246a0b6 | 234 | bio_endio(cmd->bio); |
48cc661e | 235 | break; |
ce2c350b | 236 | } |
f2298c04 | 237 | |
48cc661e JA |
238 | free_cmd(cmd); |
239 | ||
cf8ecc5a | 240 | /* Restart queue if needed, as we are freeing a tag */ |
48cc661e | 241 | if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) { |
cf8ecc5a AA |
242 | unsigned long flags; |
243 | ||
244 | spin_lock_irqsave(q->queue_lock, flags); | |
48cc661e | 245 | blk_start_queue_async(q); |
cf8ecc5a | 246 | spin_unlock_irqrestore(q->queue_lock, flags); |
f2298c04 | 247 | } |
cf8ecc5a AA |
248 | } |
249 | ||
250 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | |
251 | { | |
252 | end_cmd(container_of(timer, struct nullb_cmd, timer)); | |
f2298c04 JA |
253 | |
254 | return HRTIMER_NORESTART; | |
255 | } | |
256 | ||
257 | static void null_cmd_end_timer(struct nullb_cmd *cmd) | |
258 | { | |
3c395a96 | 259 | ktime_t kt = ktime_set(0, completion_nsec); |
f2298c04 | 260 | |
3c395a96 | 261 | hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); |
f2298c04 JA |
262 | } |
263 | ||
264 | static void null_softirq_done_fn(struct request *rq) | |
265 | { | |
d891fa70 JA |
266 | if (queue_mode == NULL_Q_MQ) |
267 | end_cmd(blk_mq_rq_to_pdu(rq)); | |
268 | else | |
269 | end_cmd(rq->special); | |
f2298c04 JA |
270 | } |
271 | ||
f2298c04 JA |
272 | static inline void null_handle_cmd(struct nullb_cmd *cmd) |
273 | { | |
274 | /* Complete IO by inline, softirq or timer */ | |
275 | switch (irqmode) { | |
f2298c04 | 276 | case NULL_IRQ_SOFTIRQ: |
ce2c350b CH |
277 | switch (queue_mode) { |
278 | case NULL_Q_MQ: | |
f4829a9b | 279 | blk_mq_complete_request(cmd->rq, cmd->rq->errors); |
ce2c350b CH |
280 | break; |
281 | case NULL_Q_RQ: | |
282 | blk_complete_request(cmd->rq); | |
283 | break; | |
284 | case NULL_Q_BIO: | |
285 | /* | |
286 | * XXX: no proper submitting cpu information available. | |
287 | */ | |
288 | end_cmd(cmd); | |
289 | break; | |
290 | } | |
291 | break; | |
292 | case NULL_IRQ_NONE: | |
f2298c04 | 293 | end_cmd(cmd); |
f2298c04 JA |
294 | break; |
295 | case NULL_IRQ_TIMER: | |
296 | null_cmd_end_timer(cmd); | |
297 | break; | |
298 | } | |
299 | } | |
300 | ||
301 | static struct nullb_queue *nullb_to_queue(struct nullb *nullb) | |
302 | { | |
303 | int index = 0; | |
304 | ||
305 | if (nullb->nr_queues != 1) | |
306 | index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); | |
307 | ||
308 | return &nullb->queues[index]; | |
309 | } | |
310 | ||
dece1635 | 311 | static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) |
f2298c04 JA |
312 | { |
313 | struct nullb *nullb = q->queuedata; | |
314 | struct nullb_queue *nq = nullb_to_queue(nullb); | |
315 | struct nullb_cmd *cmd; | |
316 | ||
317 | cmd = alloc_cmd(nq, 1); | |
318 | cmd->bio = bio; | |
319 | ||
320 | null_handle_cmd(cmd); | |
dece1635 | 321 | return BLK_QC_T_NONE; |
f2298c04 JA |
322 | } |
323 | ||
324 | static int null_rq_prep_fn(struct request_queue *q, struct request *req) | |
325 | { | |
326 | struct nullb *nullb = q->queuedata; | |
327 | struct nullb_queue *nq = nullb_to_queue(nullb); | |
328 | struct nullb_cmd *cmd; | |
329 | ||
330 | cmd = alloc_cmd(nq, 0); | |
331 | if (cmd) { | |
332 | cmd->rq = req; | |
333 | req->special = cmd; | |
334 | return BLKPREP_OK; | |
335 | } | |
8b70f45e | 336 | blk_stop_queue(q); |
f2298c04 JA |
337 | |
338 | return BLKPREP_DEFER; | |
339 | } | |
340 | ||
341 | static void null_request_fn(struct request_queue *q) | |
342 | { | |
343 | struct request *rq; | |
344 | ||
345 | while ((rq = blk_fetch_request(q)) != NULL) { | |
346 | struct nullb_cmd *cmd = rq->special; | |
347 | ||
348 | spin_unlock_irq(q->queue_lock); | |
349 | null_handle_cmd(cmd); | |
350 | spin_lock_irq(q->queue_lock); | |
351 | } | |
352 | } | |
353 | ||
74c45052 JA |
354 | static int null_queue_rq(struct blk_mq_hw_ctx *hctx, |
355 | const struct blk_mq_queue_data *bd) | |
f2298c04 | 356 | { |
74c45052 | 357 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
f2298c04 | 358 | |
3c395a96 PV |
359 | if (irqmode == NULL_IRQ_TIMER) { |
360 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
361 | cmd->timer.function = null_cmd_timer_expired; | |
362 | } | |
74c45052 | 363 | cmd->rq = bd->rq; |
f2298c04 JA |
364 | cmd->nq = hctx->driver_data; |
365 | ||
74c45052 | 366 | blk_mq_start_request(bd->rq); |
e2490073 | 367 | |
f2298c04 JA |
368 | null_handle_cmd(cmd); |
369 | return BLK_MQ_RQ_QUEUE_OK; | |
370 | } | |
371 | ||
2d263a78 MB |
372 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) |
373 | { | |
374 | BUG_ON(!nullb); | |
375 | BUG_ON(!nq); | |
376 | ||
377 | init_waitqueue_head(&nq->wait); | |
378 | nq->queue_depth = nullb->queue_depth; | |
379 | } | |
380 | ||
f2298c04 JA |
381 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
382 | unsigned int index) | |
383 | { | |
384 | struct nullb *nullb = data; | |
385 | struct nullb_queue *nq = &nullb->queues[index]; | |
386 | ||
f2298c04 | 387 | hctx->driver_data = nq; |
2d263a78 MB |
388 | null_init_queue(nullb, nq); |
389 | nullb->nr_queues++; | |
f2298c04 JA |
390 | |
391 | return 0; | |
392 | } | |
393 | ||
394 | static struct blk_mq_ops null_mq_ops = { | |
395 | .queue_rq = null_queue_rq, | |
396 | .map_queue = blk_mq_map_queue, | |
397 | .init_hctx = null_init_hctx, | |
ce2c350b | 398 | .complete = null_softirq_done_fn, |
f2298c04 JA |
399 | }; |
400 | ||
de65d2d2 MB |
401 | static void cleanup_queue(struct nullb_queue *nq) |
402 | { | |
403 | kfree(nq->tag_map); | |
404 | kfree(nq->cmds); | |
405 | } | |
406 | ||
407 | static void cleanup_queues(struct nullb *nullb) | |
408 | { | |
409 | int i; | |
410 | ||
411 | for (i = 0; i < nullb->nr_queues; i++) | |
412 | cleanup_queue(&nullb->queues[i]); | |
413 | ||
414 | kfree(nullb->queues); | |
415 | } | |
416 | ||
f2298c04 JA |
417 | static void null_del_dev(struct nullb *nullb) |
418 | { | |
419 | list_del_init(&nullb->list); | |
420 | ||
b2b7e001 | 421 | if (use_lightnvm) |
54514aa4 MB |
422 | nvm_unregister(nullb->disk_name); |
423 | else | |
424 | del_gendisk(nullb->disk); | |
518d00b7 | 425 | blk_cleanup_queue(nullb->q); |
24d2f903 CH |
426 | if (queue_mode == NULL_Q_MQ) |
427 | blk_mq_free_tag_set(&nullb->tag_set); | |
54514aa4 MB |
428 | if (!use_lightnvm) |
429 | put_disk(nullb->disk); | |
de65d2d2 | 430 | cleanup_queues(nullb); |
f2298c04 JA |
431 | kfree(nullb); |
432 | } | |
433 | ||
b2b7e001 MB |
434 | #ifdef CONFIG_NVM |
435 | ||
436 | static void null_lnvm_end_io(struct request *rq, int error) | |
437 | { | |
438 | struct nvm_rq *rqd = rq->end_io_data; | |
b2b7e001 | 439 | |
91276162 | 440 | nvm_end_io(rqd, error); |
b2b7e001 MB |
441 | |
442 | blk_put_request(rq); | |
443 | } | |
444 | ||
16f26c3a | 445 | static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) |
b2b7e001 | 446 | { |
16f26c3a | 447 | struct request_queue *q = dev->q; |
b2b7e001 MB |
448 | struct request *rq; |
449 | struct bio *bio = rqd->bio; | |
450 | ||
6f3b0e8b | 451 | rq = blk_mq_alloc_request(q, bio_rw(bio), 0); |
b2b7e001 MB |
452 | if (IS_ERR(rq)) |
453 | return -ENOMEM; | |
454 | ||
455 | rq->cmd_type = REQ_TYPE_DRV_PRIV; | |
456 | rq->__sector = bio->bi_iter.bi_sector; | |
457 | rq->ioprio = bio_prio(bio); | |
458 | ||
459 | if (bio_has_data(bio)) | |
460 | rq->nr_phys_segments = bio_phys_segments(q, bio); | |
461 | ||
462 | rq->__data_len = bio->bi_iter.bi_size; | |
463 | rq->bio = rq->biotail = bio; | |
464 | ||
465 | rq->end_io_data = rqd; | |
466 | ||
467 | blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
16f26c3a | 472 | static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) |
b2b7e001 MB |
473 | { |
474 | sector_t size = gb * 1024 * 1024 * 1024ULL; | |
5b40db99 | 475 | sector_t blksize; |
b2b7e001 MB |
476 | struct nvm_id_group *grp; |
477 | ||
478 | id->ver_id = 0x1; | |
479 | id->vmnt = 0; | |
480 | id->cgrps = 1; | |
bf643185 | 481 | id->cap = 0x2; |
b2b7e001 | 482 | id->dom = 0x1; |
5b40db99 MB |
483 | |
484 | id->ppaf.blk_offset = 0; | |
485 | id->ppaf.blk_len = 16; | |
486 | id->ppaf.pg_offset = 16; | |
487 | id->ppaf.pg_len = 16; | |
488 | id->ppaf.sect_offset = 32; | |
489 | id->ppaf.sect_len = 8; | |
490 | id->ppaf.pln_offset = 40; | |
491 | id->ppaf.pln_len = 8; | |
492 | id->ppaf.lun_offset = 48; | |
493 | id->ppaf.lun_len = 8; | |
494 | id->ppaf.ch_offset = 56; | |
495 | id->ppaf.ch_len = 8; | |
b2b7e001 | 496 | |
e93d12ae AB |
497 | sector_div(size, bs); /* convert size to pages */ |
498 | size >>= 8; /* concert size to pgs pr blk */ | |
b2b7e001 MB |
499 | grp = &id->groups[0]; |
500 | grp->mtype = 0; | |
5b40db99 | 501 | grp->fmtype = 0; |
b2b7e001 | 502 | grp->num_ch = 1; |
b2b7e001 | 503 | grp->num_pg = 256; |
5b40db99 | 504 | blksize = size; |
e93d12ae | 505 | size >>= 16; |
5b40db99 | 506 | grp->num_lun = size + 1; |
e93d12ae | 507 | sector_div(blksize, grp->num_lun); |
5b40db99 MB |
508 | grp->num_blk = blksize; |
509 | grp->num_pln = 1; | |
510 | ||
b2b7e001 MB |
511 | grp->fpg_sz = bs; |
512 | grp->csecs = bs; | |
513 | grp->trdt = 25000; | |
514 | grp->trdm = 25000; | |
515 | grp->tprt = 500000; | |
516 | grp->tprm = 500000; | |
517 | grp->tbet = 1500000; | |
518 | grp->tbem = 1500000; | |
519 | grp->mpos = 0x010101; /* single plane rwe */ | |
520 | grp->cpar = hw_queue_depth; | |
521 | ||
522 | return 0; | |
523 | } | |
524 | ||
16f26c3a | 525 | static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name) |
b2b7e001 MB |
526 | { |
527 | mempool_t *virtmem_pool; | |
528 | ||
6bb9535b | 529 | virtmem_pool = mempool_create_slab_pool(64, ppa_cache); |
b2b7e001 MB |
530 | if (!virtmem_pool) { |
531 | pr_err("null_blk: Unable to create virtual memory pool\n"); | |
532 | return NULL; | |
533 | } | |
534 | ||
535 | return virtmem_pool; | |
536 | } | |
537 | ||
538 | static void null_lnvm_destroy_dma_pool(void *pool) | |
539 | { | |
540 | mempool_destroy(pool); | |
541 | } | |
542 | ||
16f26c3a | 543 | static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, |
b2b7e001 MB |
544 | gfp_t mem_flags, dma_addr_t *dma_handler) |
545 | { | |
546 | return mempool_alloc(pool, mem_flags); | |
547 | } | |
548 | ||
549 | static void null_lnvm_dev_dma_free(void *pool, void *entry, | |
550 | dma_addr_t dma_handler) | |
551 | { | |
552 | mempool_free(entry, pool); | |
553 | } | |
554 | ||
555 | static struct nvm_dev_ops null_lnvm_dev_ops = { | |
556 | .identity = null_lnvm_id, | |
557 | .submit_io = null_lnvm_submit_io, | |
558 | ||
559 | .create_dma_pool = null_lnvm_create_dma_pool, | |
560 | .destroy_dma_pool = null_lnvm_destroy_dma_pool, | |
561 | .dev_dma_alloc = null_lnvm_dev_dma_alloc, | |
562 | .dev_dma_free = null_lnvm_dev_dma_free, | |
563 | ||
564 | /* Simulate nvme protocol restriction */ | |
565 | .max_phys_sect = 64, | |
566 | }; | |
567 | #else | |
568 | static struct nvm_dev_ops null_lnvm_dev_ops; | |
569 | #endif /* CONFIG_NVM */ | |
570 | ||
f2298c04 JA |
571 | static int null_open(struct block_device *bdev, fmode_t mode) |
572 | { | |
573 | return 0; | |
574 | } | |
575 | ||
576 | static void null_release(struct gendisk *disk, fmode_t mode) | |
577 | { | |
578 | } | |
579 | ||
580 | static const struct block_device_operations null_fops = { | |
581 | .owner = THIS_MODULE, | |
582 | .open = null_open, | |
583 | .release = null_release, | |
584 | }; | |
585 | ||
586 | static int setup_commands(struct nullb_queue *nq) | |
587 | { | |
588 | struct nullb_cmd *cmd; | |
589 | int i, tag_size; | |
590 | ||
591 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | |
592 | if (!nq->cmds) | |
2d263a78 | 593 | return -ENOMEM; |
f2298c04 JA |
594 | |
595 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | |
596 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | |
597 | if (!nq->tag_map) { | |
598 | kfree(nq->cmds); | |
2d263a78 | 599 | return -ENOMEM; |
f2298c04 JA |
600 | } |
601 | ||
602 | for (i = 0; i < nq->queue_depth; i++) { | |
603 | cmd = &nq->cmds[i]; | |
604 | INIT_LIST_HEAD(&cmd->list); | |
605 | cmd->ll_list.next = NULL; | |
606 | cmd->tag = -1U; | |
607 | } | |
608 | ||
609 | return 0; | |
610 | } | |
611 | ||
f2298c04 JA |
612 | static int setup_queues(struct nullb *nullb) |
613 | { | |
2d263a78 MB |
614 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
615 | GFP_KERNEL); | |
f2298c04 | 616 | if (!nullb->queues) |
2d263a78 | 617 | return -ENOMEM; |
f2298c04 JA |
618 | |
619 | nullb->nr_queues = 0; | |
620 | nullb->queue_depth = hw_queue_depth; | |
621 | ||
2d263a78 MB |
622 | return 0; |
623 | } | |
624 | ||
625 | static int init_driver_queues(struct nullb *nullb) | |
626 | { | |
627 | struct nullb_queue *nq; | |
628 | int i, ret = 0; | |
f2298c04 JA |
629 | |
630 | for (i = 0; i < submit_queues; i++) { | |
631 | nq = &nullb->queues[i]; | |
2d263a78 MB |
632 | |
633 | null_init_queue(nullb, nq); | |
634 | ||
635 | ret = setup_commands(nq); | |
636 | if (ret) | |
31f9690e | 637 | return ret; |
f2298c04 JA |
638 | nullb->nr_queues++; |
639 | } | |
2d263a78 | 640 | return 0; |
f2298c04 JA |
641 | } |
642 | ||
643 | static int null_add_dev(void) | |
644 | { | |
645 | struct gendisk *disk; | |
646 | struct nullb *nullb; | |
647 | sector_t size; | |
dc501dc0 | 648 | int rv; |
f2298c04 JA |
649 | |
650 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); | |
dc501dc0 RE |
651 | if (!nullb) { |
652 | rv = -ENOMEM; | |
24d2f903 | 653 | goto out; |
dc501dc0 | 654 | } |
f2298c04 JA |
655 | |
656 | spin_lock_init(&nullb->lock); | |
657 | ||
57053d8c MB |
658 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) |
659 | submit_queues = nr_online_nodes; | |
660 | ||
dc501dc0 RE |
661 | rv = setup_queues(nullb); |
662 | if (rv) | |
24d2f903 | 663 | goto out_free_nullb; |
f2298c04 JA |
664 | |
665 | if (queue_mode == NULL_Q_MQ) { | |
cdef54dd | 666 | nullb->tag_set.ops = &null_mq_ops; |
24d2f903 CH |
667 | nullb->tag_set.nr_hw_queues = submit_queues; |
668 | nullb->tag_set.queue_depth = hw_queue_depth; | |
669 | nullb->tag_set.numa_node = home_node; | |
670 | nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); | |
671 | nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | |
672 | nullb->tag_set.driver_data = nullb; | |
673 | ||
dc501dc0 RE |
674 | rv = blk_mq_alloc_tag_set(&nullb->tag_set); |
675 | if (rv) | |
24d2f903 CH |
676 | goto out_cleanup_queues; |
677 | ||
678 | nullb->q = blk_mq_init_queue(&nullb->tag_set); | |
35b489d3 | 679 | if (IS_ERR(nullb->q)) { |
dc501dc0 | 680 | rv = -ENOMEM; |
24d2f903 | 681 | goto out_cleanup_tags; |
dc501dc0 | 682 | } |
f2298c04 JA |
683 | } else if (queue_mode == NULL_Q_BIO) { |
684 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | |
dc501dc0 RE |
685 | if (!nullb->q) { |
686 | rv = -ENOMEM; | |
24d2f903 | 687 | goto out_cleanup_queues; |
dc501dc0 | 688 | } |
f2298c04 | 689 | blk_queue_make_request(nullb->q, null_queue_bio); |
31f9690e JK |
690 | rv = init_driver_queues(nullb); |
691 | if (rv) | |
692 | goto out_cleanup_blk_queue; | |
f2298c04 JA |
693 | } else { |
694 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | |
dc501dc0 RE |
695 | if (!nullb->q) { |
696 | rv = -ENOMEM; | |
24d2f903 | 697 | goto out_cleanup_queues; |
dc501dc0 | 698 | } |
f2298c04 | 699 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
24d2f903 | 700 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
31f9690e JK |
701 | rv = init_driver_queues(nullb); |
702 | if (rv) | |
703 | goto out_cleanup_blk_queue; | |
f2298c04 JA |
704 | } |
705 | ||
f2298c04 JA |
706 | nullb->q->queuedata = nullb; |
707 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | |
b277da0a | 708 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); |
f2298c04 | 709 | |
f2298c04 JA |
710 | |
711 | mutex_lock(&lock); | |
712 | list_add_tail(&nullb->list, &nullb_list); | |
713 | nullb->index = nullb_indexes++; | |
714 | mutex_unlock(&lock); | |
715 | ||
716 | blk_queue_logical_block_size(nullb->q, bs); | |
717 | blk_queue_physical_block_size(nullb->q, bs); | |
718 | ||
b2b7e001 MB |
719 | sprintf(nullb->disk_name, "nullb%d", nullb->index); |
720 | ||
721 | if (use_lightnvm) { | |
722 | rv = nvm_register(nullb->q, nullb->disk_name, | |
723 | &null_lnvm_dev_ops); | |
724 | if (rv) | |
725 | goto out_cleanup_blk_queue; | |
726 | goto done; | |
727 | } | |
728 | ||
729 | disk = nullb->disk = alloc_disk_node(1, home_node); | |
730 | if (!disk) { | |
731 | rv = -ENOMEM; | |
732 | goto out_cleanup_lightnvm; | |
733 | } | |
f2298c04 | 734 | size = gb * 1024 * 1024 * 1024ULL; |
5fdb7e1b | 735 | set_capacity(disk, size >> 9); |
f2298c04 | 736 | |
227290b4 | 737 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; |
f2298c04 JA |
738 | disk->major = null_major; |
739 | disk->first_minor = nullb->index; | |
740 | disk->fops = &null_fops; | |
741 | disk->private_data = nullb; | |
742 | disk->queue = nullb->q; | |
b2b7e001 MB |
743 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); |
744 | ||
f2298c04 | 745 | add_disk(disk); |
b2b7e001 | 746 | done: |
f2298c04 | 747 | return 0; |
24d2f903 | 748 | |
b2b7e001 MB |
749 | out_cleanup_lightnvm: |
750 | if (use_lightnvm) | |
751 | nvm_unregister(nullb->disk_name); | |
24d2f903 CH |
752 | out_cleanup_blk_queue: |
753 | blk_cleanup_queue(nullb->q); | |
754 | out_cleanup_tags: | |
755 | if (queue_mode == NULL_Q_MQ) | |
756 | blk_mq_free_tag_set(&nullb->tag_set); | |
757 | out_cleanup_queues: | |
758 | cleanup_queues(nullb); | |
759 | out_free_nullb: | |
760 | kfree(nullb); | |
761 | out: | |
dc501dc0 | 762 | return rv; |
f2298c04 JA |
763 | } |
764 | ||
765 | static int __init null_init(void) | |
766 | { | |
af096e22 | 767 | int ret = 0; |
f2298c04 | 768 | unsigned int i; |
af096e22 | 769 | struct nullb *nullb; |
f2298c04 | 770 | |
9967d8ac R |
771 | if (bs > PAGE_SIZE) { |
772 | pr_warn("null_blk: invalid block size\n"); | |
773 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); | |
774 | bs = PAGE_SIZE; | |
775 | } | |
f2298c04 | 776 | |
6bb9535b MB |
777 | if (use_lightnvm && bs != 4096) { |
778 | pr_warn("null_blk: LightNVM only supports 4k block size\n"); | |
779 | pr_warn("null_blk: defaults block size to 4k\n"); | |
780 | bs = 4096; | |
781 | } | |
782 | ||
b2b7e001 MB |
783 | if (use_lightnvm && queue_mode != NULL_Q_MQ) { |
784 | pr_warn("null_blk: LightNVM only supported for blk-mq\n"); | |
785 | pr_warn("null_blk: defaults queue mode to blk-mq\n"); | |
786 | queue_mode = NULL_Q_MQ; | |
787 | } | |
788 | ||
d15ee6b1 | 789 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
fc1bc354 | 790 | if (submit_queues < nr_online_nodes) { |
d15ee6b1 MB |
791 | pr_warn("null_blk: submit_queues param is set to %u.", |
792 | nr_online_nodes); | |
fc1bc354 MB |
793 | submit_queues = nr_online_nodes; |
794 | } | |
d15ee6b1 | 795 | } else if (submit_queues > nr_cpu_ids) |
f2298c04 JA |
796 | submit_queues = nr_cpu_ids; |
797 | else if (!submit_queues) | |
798 | submit_queues = 1; | |
799 | ||
800 | mutex_init(&lock); | |
801 | ||
f2298c04 JA |
802 | null_major = register_blkdev(0, "nullb"); |
803 | if (null_major < 0) | |
804 | return null_major; | |
805 | ||
6bb9535b MB |
806 | if (use_lightnvm) { |
807 | ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), | |
808 | 0, 0, NULL); | |
809 | if (!ppa_cache) { | |
810 | pr_err("null_blk: unable to create ppa cache\n"); | |
af096e22 MH |
811 | ret = -ENOMEM; |
812 | goto err_ppa; | |
6bb9535b MB |
813 | } |
814 | } | |
815 | ||
f2298c04 | 816 | for (i = 0; i < nr_devices; i++) { |
af096e22 MH |
817 | ret = null_add_dev(); |
818 | if (ret) | |
819 | goto err_dev; | |
f2298c04 JA |
820 | } |
821 | ||
822 | pr_info("null: module loaded\n"); | |
823 | return 0; | |
af096e22 MH |
824 | |
825 | err_dev: | |
826 | while (!list_empty(&nullb_list)) { | |
827 | nullb = list_entry(nullb_list.next, struct nullb, list); | |
828 | null_del_dev(nullb); | |
829 | } | |
6bb9535b | 830 | kmem_cache_destroy(ppa_cache); |
af096e22 MH |
831 | err_ppa: |
832 | unregister_blkdev(null_major, "nullb"); | |
833 | return ret; | |
f2298c04 JA |
834 | } |
835 | ||
836 | static void __exit null_exit(void) | |
837 | { | |
838 | struct nullb *nullb; | |
839 | ||
840 | unregister_blkdev(null_major, "nullb"); | |
841 | ||
842 | mutex_lock(&lock); | |
843 | while (!list_empty(&nullb_list)) { | |
844 | nullb = list_entry(nullb_list.next, struct nullb, list); | |
845 | null_del_dev(nullb); | |
846 | } | |
847 | mutex_unlock(&lock); | |
6bb9535b MB |
848 | |
849 | kmem_cache_destroy(ppa_cache); | |
f2298c04 JA |
850 | } |
851 | ||
852 | module_init(null_init); | |
853 | module_exit(null_exit); | |
854 | ||
855 | MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); | |
856 | MODULE_LICENSE("GPL"); |