]>
Commit | Line | Data |
---|---|---|
f2298c04 | 1 | #include <linux/module.h> |
fc1bc354 | 2 | |
f2298c04 JA |
3 | #include <linux/moduleparam.h> |
4 | #include <linux/sched.h> | |
5 | #include <linux/fs.h> | |
6 | #include <linux/blkdev.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/blk-mq.h> | |
10 | #include <linux/hrtimer.h> | |
11 | ||
12 | struct nullb_cmd { | |
13 | struct list_head list; | |
14 | struct llist_node ll_list; | |
15 | struct call_single_data csd; | |
16 | struct request *rq; | |
17 | struct bio *bio; | |
18 | unsigned int tag; | |
19 | struct nullb_queue *nq; | |
20 | }; | |
21 | ||
22 | struct nullb_queue { | |
23 | unsigned long *tag_map; | |
24 | wait_queue_head_t wait; | |
25 | unsigned int queue_depth; | |
26 | ||
27 | struct nullb_cmd *cmds; | |
28 | }; | |
29 | ||
30 | struct nullb { | |
31 | struct list_head list; | |
32 | unsigned int index; | |
33 | struct request_queue *q; | |
34 | struct gendisk *disk; | |
35 | struct hrtimer timer; | |
36 | unsigned int queue_depth; | |
37 | spinlock_t lock; | |
38 | ||
39 | struct nullb_queue *queues; | |
40 | unsigned int nr_queues; | |
41 | }; | |
42 | ||
43 | static LIST_HEAD(nullb_list); | |
44 | static struct mutex lock; | |
45 | static int null_major; | |
46 | static int nullb_indexes; | |
47 | ||
48 | struct completion_queue { | |
49 | struct llist_head list; | |
50 | struct hrtimer timer; | |
51 | }; | |
52 | ||
53 | /* | |
54 | * These are per-cpu for now, they will need to be configured by the | |
55 | * complete_queues parameter and appropriately mapped. | |
56 | */ | |
57 | static DEFINE_PER_CPU(struct completion_queue, completion_queues); | |
58 | ||
59 | enum { | |
60 | NULL_IRQ_NONE = 0, | |
61 | NULL_IRQ_SOFTIRQ = 1, | |
62 | NULL_IRQ_TIMER = 2, | |
63 | ||
64 | NULL_Q_BIO = 0, | |
65 | NULL_Q_RQ = 1, | |
66 | NULL_Q_MQ = 2, | |
67 | }; | |
68 | ||
2d263a78 | 69 | static int submit_queues; |
f2298c04 JA |
70 | module_param(submit_queues, int, S_IRUGO); |
71 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | |
72 | ||
73 | static int home_node = NUMA_NO_NODE; | |
74 | module_param(home_node, int, S_IRUGO); | |
75 | MODULE_PARM_DESC(home_node, "Home node for the device"); | |
76 | ||
77 | static int queue_mode = NULL_Q_MQ; | |
78 | module_param(queue_mode, int, S_IRUGO); | |
79 | MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)"); | |
80 | ||
81 | static int gb = 250; | |
82 | module_param(gb, int, S_IRUGO); | |
83 | MODULE_PARM_DESC(gb, "Size in GB"); | |
84 | ||
85 | static int bs = 512; | |
86 | module_param(bs, int, S_IRUGO); | |
87 | MODULE_PARM_DESC(bs, "Block size (in bytes)"); | |
88 | ||
89 | static int nr_devices = 2; | |
90 | module_param(nr_devices, int, S_IRUGO); | |
91 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); | |
92 | ||
93 | static int irqmode = NULL_IRQ_SOFTIRQ; | |
94 | module_param(irqmode, int, S_IRUGO); | |
95 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); | |
96 | ||
97 | static int completion_nsec = 10000; | |
98 | module_param(completion_nsec, int, S_IRUGO); | |
99 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); | |
100 | ||
101 | static int hw_queue_depth = 64; | |
102 | module_param(hw_queue_depth, int, S_IRUGO); | |
103 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | |
104 | ||
20005244 | 105 | static bool use_per_node_hctx = false; |
f2298c04 | 106 | module_param(use_per_node_hctx, bool, S_IRUGO); |
20005244 | 107 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
f2298c04 JA |
108 | |
109 | static void put_tag(struct nullb_queue *nq, unsigned int tag) | |
110 | { | |
111 | clear_bit_unlock(tag, nq->tag_map); | |
112 | ||
113 | if (waitqueue_active(&nq->wait)) | |
114 | wake_up(&nq->wait); | |
115 | } | |
116 | ||
117 | static unsigned int get_tag(struct nullb_queue *nq) | |
118 | { | |
119 | unsigned int tag; | |
120 | ||
121 | do { | |
122 | tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); | |
123 | if (tag >= nq->queue_depth) | |
124 | return -1U; | |
125 | } while (test_and_set_bit_lock(tag, nq->tag_map)); | |
126 | ||
127 | return tag; | |
128 | } | |
129 | ||
130 | static void free_cmd(struct nullb_cmd *cmd) | |
131 | { | |
132 | put_tag(cmd->nq, cmd->tag); | |
133 | } | |
134 | ||
135 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) | |
136 | { | |
137 | struct nullb_cmd *cmd; | |
138 | unsigned int tag; | |
139 | ||
140 | tag = get_tag(nq); | |
141 | if (tag != -1U) { | |
142 | cmd = &nq->cmds[tag]; | |
143 | cmd->tag = tag; | |
144 | cmd->nq = nq; | |
145 | return cmd; | |
146 | } | |
147 | ||
148 | return NULL; | |
149 | } | |
150 | ||
151 | static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) | |
152 | { | |
153 | struct nullb_cmd *cmd; | |
154 | DEFINE_WAIT(wait); | |
155 | ||
156 | cmd = __alloc_cmd(nq); | |
157 | if (cmd || !can_wait) | |
158 | return cmd; | |
159 | ||
160 | do { | |
161 | prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); | |
162 | cmd = __alloc_cmd(nq); | |
163 | if (cmd) | |
164 | break; | |
165 | ||
166 | io_schedule(); | |
167 | } while (1); | |
168 | ||
169 | finish_wait(&nq->wait, &wait); | |
170 | return cmd; | |
171 | } | |
172 | ||
173 | static void end_cmd(struct nullb_cmd *cmd) | |
174 | { | |
175 | if (cmd->rq) { | |
176 | if (queue_mode == NULL_Q_MQ) | |
177 | blk_mq_end_io(cmd->rq, 0); | |
178 | else { | |
179 | INIT_LIST_HEAD(&cmd->rq->queuelist); | |
180 | blk_end_request_all(cmd->rq, 0); | |
181 | } | |
182 | } else if (cmd->bio) | |
183 | bio_endio(cmd->bio, 0); | |
184 | ||
185 | if (queue_mode != NULL_Q_MQ) | |
186 | free_cmd(cmd); | |
187 | } | |
188 | ||
189 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | |
190 | { | |
191 | struct completion_queue *cq; | |
192 | struct llist_node *entry; | |
193 | struct nullb_cmd *cmd; | |
194 | ||
195 | cq = &per_cpu(completion_queues, smp_processor_id()); | |
196 | ||
197 | while ((entry = llist_del_all(&cq->list)) != NULL) { | |
198 | do { | |
199 | cmd = container_of(entry, struct nullb_cmd, ll_list); | |
200 | end_cmd(cmd); | |
201 | entry = entry->next; | |
202 | } while (entry); | |
203 | } | |
204 | ||
205 | return HRTIMER_NORESTART; | |
206 | } | |
207 | ||
208 | static void null_cmd_end_timer(struct nullb_cmd *cmd) | |
209 | { | |
210 | struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); | |
211 | ||
212 | cmd->ll_list.next = NULL; | |
213 | if (llist_add(&cmd->ll_list, &cq->list)) { | |
214 | ktime_t kt = ktime_set(0, completion_nsec); | |
215 | ||
216 | hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL); | |
217 | } | |
218 | ||
219 | put_cpu(); | |
220 | } | |
221 | ||
222 | static void null_softirq_done_fn(struct request *rq) | |
223 | { | |
224 | blk_end_request_all(rq, 0); | |
225 | } | |
226 | ||
044c8d4b | 227 | #ifdef CONFIG_SMP |
f2298c04 JA |
228 | |
229 | static void null_ipi_cmd_end_io(void *data) | |
230 | { | |
231 | struct completion_queue *cq; | |
232 | struct llist_node *entry, *next; | |
233 | struct nullb_cmd *cmd; | |
234 | ||
235 | cq = &per_cpu(completion_queues, smp_processor_id()); | |
236 | ||
237 | entry = llist_del_all(&cq->list); | |
238 | ||
239 | while (entry) { | |
240 | next = entry->next; | |
241 | cmd = llist_entry(entry, struct nullb_cmd, ll_list); | |
242 | end_cmd(cmd); | |
243 | entry = next; | |
244 | } | |
245 | } | |
246 | ||
247 | static void null_cmd_end_ipi(struct nullb_cmd *cmd) | |
248 | { | |
249 | struct call_single_data *data = &cmd->csd; | |
250 | int cpu = get_cpu(); | |
251 | struct completion_queue *cq = &per_cpu(completion_queues, cpu); | |
252 | ||
253 | cmd->ll_list.next = NULL; | |
254 | ||
255 | if (llist_add(&cmd->ll_list, &cq->list)) { | |
256 | data->func = null_ipi_cmd_end_io; | |
257 | data->flags = 0; | |
258 | __smp_call_function_single(cpu, data, 0); | |
259 | } | |
260 | ||
261 | put_cpu(); | |
262 | } | |
263 | ||
044c8d4b | 264 | #endif /* CONFIG_SMP */ |
f2298c04 JA |
265 | |
266 | static inline void null_handle_cmd(struct nullb_cmd *cmd) | |
267 | { | |
268 | /* Complete IO by inline, softirq or timer */ | |
269 | switch (irqmode) { | |
270 | case NULL_IRQ_NONE: | |
271 | end_cmd(cmd); | |
272 | break; | |
273 | case NULL_IRQ_SOFTIRQ: | |
044c8d4b | 274 | #ifdef CONFIG_SMP |
f2298c04 JA |
275 | null_cmd_end_ipi(cmd); |
276 | #else | |
277 | end_cmd(cmd); | |
278 | #endif | |
279 | break; | |
280 | case NULL_IRQ_TIMER: | |
281 | null_cmd_end_timer(cmd); | |
282 | break; | |
283 | } | |
284 | } | |
285 | ||
286 | static struct nullb_queue *nullb_to_queue(struct nullb *nullb) | |
287 | { | |
288 | int index = 0; | |
289 | ||
290 | if (nullb->nr_queues != 1) | |
291 | index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); | |
292 | ||
293 | return &nullb->queues[index]; | |
294 | } | |
295 | ||
296 | static void null_queue_bio(struct request_queue *q, struct bio *bio) | |
297 | { | |
298 | struct nullb *nullb = q->queuedata; | |
299 | struct nullb_queue *nq = nullb_to_queue(nullb); | |
300 | struct nullb_cmd *cmd; | |
301 | ||
302 | cmd = alloc_cmd(nq, 1); | |
303 | cmd->bio = bio; | |
304 | ||
305 | null_handle_cmd(cmd); | |
306 | } | |
307 | ||
308 | static int null_rq_prep_fn(struct request_queue *q, struct request *req) | |
309 | { | |
310 | struct nullb *nullb = q->queuedata; | |
311 | struct nullb_queue *nq = nullb_to_queue(nullb); | |
312 | struct nullb_cmd *cmd; | |
313 | ||
314 | cmd = alloc_cmd(nq, 0); | |
315 | if (cmd) { | |
316 | cmd->rq = req; | |
317 | req->special = cmd; | |
318 | return BLKPREP_OK; | |
319 | } | |
320 | ||
321 | return BLKPREP_DEFER; | |
322 | } | |
323 | ||
324 | static void null_request_fn(struct request_queue *q) | |
325 | { | |
326 | struct request *rq; | |
327 | ||
328 | while ((rq = blk_fetch_request(q)) != NULL) { | |
329 | struct nullb_cmd *cmd = rq->special; | |
330 | ||
331 | spin_unlock_irq(q->queue_lock); | |
332 | null_handle_cmd(cmd); | |
333 | spin_lock_irq(q->queue_lock); | |
334 | } | |
335 | } | |
336 | ||
337 | static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | |
338 | { | |
339 | struct nullb_cmd *cmd = rq->special; | |
340 | ||
341 | cmd->rq = rq; | |
342 | cmd->nq = hctx->driver_data; | |
343 | ||
344 | null_handle_cmd(cmd); | |
345 | return BLK_MQ_RQ_QUEUE_OK; | |
346 | } | |
347 | ||
348 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | |
349 | { | |
fc1bc354 MB |
350 | int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); |
351 | int tip = (reg->nr_hw_queues % nr_online_nodes); | |
352 | int node = 0, i, n; | |
353 | ||
354 | /* | |
355 | * Split submit queues evenly wrt to the number of nodes. If uneven, | |
356 | * fill the first buckets with one extra, until the rest is filled with | |
357 | * no extra. | |
358 | */ | |
359 | for (i = 0, n = 1; i < hctx_index; i++, n++) { | |
360 | if (n % b_size == 0) { | |
361 | n = 0; | |
362 | node++; | |
363 | ||
364 | tip--; | |
365 | if (!tip) | |
366 | b_size = reg->nr_hw_queues / nr_online_nodes; | |
367 | } | |
368 | } | |
369 | ||
370 | /* | |
371 | * A node might not be online, therefore map the relative node id to the | |
372 | * real node id. | |
373 | */ | |
374 | for_each_online_node(n) { | |
375 | if (!node) | |
376 | break; | |
377 | node--; | |
378 | } | |
379 | ||
380 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); | |
f2298c04 JA |
381 | } |
382 | ||
383 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | |
384 | { | |
385 | kfree(hctx); | |
386 | } | |
387 | ||
2d263a78 MB |
388 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) |
389 | { | |
390 | BUG_ON(!nullb); | |
391 | BUG_ON(!nq); | |
392 | ||
393 | init_waitqueue_head(&nq->wait); | |
394 | nq->queue_depth = nullb->queue_depth; | |
395 | } | |
396 | ||
f2298c04 JA |
397 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
398 | unsigned int index) | |
399 | { | |
400 | struct nullb *nullb = data; | |
401 | struct nullb_queue *nq = &nullb->queues[index]; | |
402 | ||
f2298c04 | 403 | hctx->driver_data = nq; |
2d263a78 MB |
404 | null_init_queue(nullb, nq); |
405 | nullb->nr_queues++; | |
f2298c04 JA |
406 | |
407 | return 0; | |
408 | } | |
409 | ||
410 | static struct blk_mq_ops null_mq_ops = { | |
411 | .queue_rq = null_queue_rq, | |
412 | .map_queue = blk_mq_map_queue, | |
413 | .init_hctx = null_init_hctx, | |
414 | }; | |
415 | ||
416 | static struct blk_mq_reg null_mq_reg = { | |
417 | .ops = &null_mq_ops, | |
418 | .queue_depth = 64, | |
419 | .cmd_size = sizeof(struct nullb_cmd), | |
420 | .flags = BLK_MQ_F_SHOULD_MERGE, | |
421 | }; | |
422 | ||
423 | static void null_del_dev(struct nullb *nullb) | |
424 | { | |
425 | list_del_init(&nullb->list); | |
426 | ||
427 | del_gendisk(nullb->disk); | |
428 | if (queue_mode == NULL_Q_MQ) | |
429 | blk_mq_free_queue(nullb->q); | |
430 | else | |
431 | blk_cleanup_queue(nullb->q); | |
432 | put_disk(nullb->disk); | |
433 | kfree(nullb); | |
434 | } | |
435 | ||
436 | static int null_open(struct block_device *bdev, fmode_t mode) | |
437 | { | |
438 | return 0; | |
439 | } | |
440 | ||
441 | static void null_release(struct gendisk *disk, fmode_t mode) | |
442 | { | |
443 | } | |
444 | ||
445 | static const struct block_device_operations null_fops = { | |
446 | .owner = THIS_MODULE, | |
447 | .open = null_open, | |
448 | .release = null_release, | |
449 | }; | |
450 | ||
451 | static int setup_commands(struct nullb_queue *nq) | |
452 | { | |
453 | struct nullb_cmd *cmd; | |
454 | int i, tag_size; | |
455 | ||
456 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | |
457 | if (!nq->cmds) | |
2d263a78 | 458 | return -ENOMEM; |
f2298c04 JA |
459 | |
460 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | |
461 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | |
462 | if (!nq->tag_map) { | |
463 | kfree(nq->cmds); | |
2d263a78 | 464 | return -ENOMEM; |
f2298c04 JA |
465 | } |
466 | ||
467 | for (i = 0; i < nq->queue_depth; i++) { | |
468 | cmd = &nq->cmds[i]; | |
469 | INIT_LIST_HEAD(&cmd->list); | |
470 | cmd->ll_list.next = NULL; | |
471 | cmd->tag = -1U; | |
472 | } | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
477 | static void cleanup_queue(struct nullb_queue *nq) | |
478 | { | |
479 | kfree(nq->tag_map); | |
480 | kfree(nq->cmds); | |
481 | } | |
482 | ||
483 | static void cleanup_queues(struct nullb *nullb) | |
484 | { | |
485 | int i; | |
486 | ||
487 | for (i = 0; i < nullb->nr_queues; i++) | |
488 | cleanup_queue(&nullb->queues[i]); | |
489 | ||
490 | kfree(nullb->queues); | |
491 | } | |
492 | ||
493 | static int setup_queues(struct nullb *nullb) | |
494 | { | |
2d263a78 MB |
495 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
496 | GFP_KERNEL); | |
f2298c04 | 497 | if (!nullb->queues) |
2d263a78 | 498 | return -ENOMEM; |
f2298c04 JA |
499 | |
500 | nullb->nr_queues = 0; | |
501 | nullb->queue_depth = hw_queue_depth; | |
502 | ||
2d263a78 MB |
503 | return 0; |
504 | } | |
505 | ||
506 | static int init_driver_queues(struct nullb *nullb) | |
507 | { | |
508 | struct nullb_queue *nq; | |
509 | int i, ret = 0; | |
f2298c04 JA |
510 | |
511 | for (i = 0; i < submit_queues; i++) { | |
512 | nq = &nullb->queues[i]; | |
2d263a78 MB |
513 | |
514 | null_init_queue(nullb, nq); | |
515 | ||
516 | ret = setup_commands(nq); | |
517 | if (ret) | |
518 | goto err_queue; | |
f2298c04 JA |
519 | nullb->nr_queues++; |
520 | } | |
521 | ||
2d263a78 MB |
522 | return 0; |
523 | err_queue: | |
f2298c04 | 524 | cleanup_queues(nullb); |
2d263a78 | 525 | return ret; |
f2298c04 JA |
526 | } |
527 | ||
528 | static int null_add_dev(void) | |
529 | { | |
530 | struct gendisk *disk; | |
531 | struct nullb *nullb; | |
532 | sector_t size; | |
533 | ||
534 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); | |
535 | if (!nullb) | |
536 | return -ENOMEM; | |
537 | ||
538 | spin_lock_init(&nullb->lock); | |
539 | ||
57053d8c MB |
540 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) |
541 | submit_queues = nr_online_nodes; | |
542 | ||
f2298c04 JA |
543 | if (setup_queues(nullb)) |
544 | goto err; | |
545 | ||
546 | if (queue_mode == NULL_Q_MQ) { | |
547 | null_mq_reg.numa_node = home_node; | |
548 | null_mq_reg.queue_depth = hw_queue_depth; | |
57053d8c | 549 | null_mq_reg.nr_hw_queues = submit_queues; |
f2298c04 JA |
550 | |
551 | if (use_per_node_hctx) { | |
552 | null_mq_reg.ops->alloc_hctx = null_alloc_hctx; | |
553 | null_mq_reg.ops->free_hctx = null_free_hctx; | |
f2298c04 JA |
554 | } else { |
555 | null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; | |
556 | null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; | |
f2298c04 JA |
557 | } |
558 | ||
559 | nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); | |
560 | } else if (queue_mode == NULL_Q_BIO) { | |
561 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | |
562 | blk_queue_make_request(nullb->q, null_queue_bio); | |
2d263a78 | 563 | init_driver_queues(nullb); |
f2298c04 JA |
564 | } else { |
565 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | |
566 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | |
567 | if (nullb->q) | |
568 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | |
2d263a78 | 569 | init_driver_queues(nullb); |
f2298c04 JA |
570 | } |
571 | ||
572 | if (!nullb->q) | |
573 | goto queue_fail; | |
574 | ||
575 | nullb->q->queuedata = nullb; | |
576 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | |
577 | ||
578 | disk = nullb->disk = alloc_disk_node(1, home_node); | |
579 | if (!disk) { | |
580 | queue_fail: | |
581 | if (queue_mode == NULL_Q_MQ) | |
582 | blk_mq_free_queue(nullb->q); | |
583 | else | |
584 | blk_cleanup_queue(nullb->q); | |
585 | cleanup_queues(nullb); | |
586 | err: | |
587 | kfree(nullb); | |
588 | return -ENOMEM; | |
589 | } | |
590 | ||
591 | mutex_lock(&lock); | |
592 | list_add_tail(&nullb->list, &nullb_list); | |
593 | nullb->index = nullb_indexes++; | |
594 | mutex_unlock(&lock); | |
595 | ||
596 | blk_queue_logical_block_size(nullb->q, bs); | |
597 | blk_queue_physical_block_size(nullb->q, bs); | |
598 | ||
599 | size = gb * 1024 * 1024 * 1024ULL; | |
600 | sector_div(size, bs); | |
601 | set_capacity(disk, size); | |
602 | ||
603 | disk->flags |= GENHD_FL_EXT_DEVT; | |
604 | disk->major = null_major; | |
605 | disk->first_minor = nullb->index; | |
606 | disk->fops = &null_fops; | |
607 | disk->private_data = nullb; | |
608 | disk->queue = nullb->q; | |
609 | sprintf(disk->disk_name, "nullb%d", nullb->index); | |
610 | add_disk(disk); | |
611 | return 0; | |
612 | } | |
613 | ||
614 | static int __init null_init(void) | |
615 | { | |
616 | unsigned int i; | |
617 | ||
044c8d4b | 618 | #if !defined(CONFIG_SMP) |
f2298c04 JA |
619 | if (irqmode == NULL_IRQ_SOFTIRQ) { |
620 | pr_warn("null_blk: softirq completions not available.\n"); | |
621 | pr_warn("null_blk: using direct completions.\n"); | |
622 | irqmode = NULL_IRQ_NONE; | |
623 | } | |
624 | #endif | |
625 | ||
d15ee6b1 | 626 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
fc1bc354 | 627 | if (submit_queues < nr_online_nodes) { |
d15ee6b1 MB |
628 | pr_warn("null_blk: submit_queues param is set to %u.", |
629 | nr_online_nodes); | |
fc1bc354 MB |
630 | submit_queues = nr_online_nodes; |
631 | } | |
d15ee6b1 | 632 | } else if (submit_queues > nr_cpu_ids) |
f2298c04 JA |
633 | submit_queues = nr_cpu_ids; |
634 | else if (!submit_queues) | |
635 | submit_queues = 1; | |
636 | ||
637 | mutex_init(&lock); | |
638 | ||
639 | /* Initialize a separate list for each CPU for issuing softirqs */ | |
640 | for_each_possible_cpu(i) { | |
641 | struct completion_queue *cq = &per_cpu(completion_queues, i); | |
642 | ||
643 | init_llist_head(&cq->list); | |
644 | ||
645 | if (irqmode != NULL_IRQ_TIMER) | |
646 | continue; | |
647 | ||
648 | hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
649 | cq->timer.function = null_cmd_timer_expired; | |
650 | } | |
651 | ||
652 | null_major = register_blkdev(0, "nullb"); | |
653 | if (null_major < 0) | |
654 | return null_major; | |
655 | ||
656 | for (i = 0; i < nr_devices; i++) { | |
657 | if (null_add_dev()) { | |
658 | unregister_blkdev(null_major, "nullb"); | |
659 | return -EINVAL; | |
660 | } | |
661 | } | |
662 | ||
663 | pr_info("null: module loaded\n"); | |
664 | return 0; | |
665 | } | |
666 | ||
667 | static void __exit null_exit(void) | |
668 | { | |
669 | struct nullb *nullb; | |
670 | ||
671 | unregister_blkdev(null_major, "nullb"); | |
672 | ||
673 | mutex_lock(&lock); | |
674 | while (!list_empty(&nullb_list)) { | |
675 | nullb = list_entry(nullb_list.next, struct nullb, list); | |
676 | null_del_dev(nullb); | |
677 | } | |
678 | mutex_unlock(&lock); | |
679 | } | |
680 | ||
681 | module_init(null_init); | |
682 | module_exit(null_exit); | |
683 | ||
684 | MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); | |
685 | MODULE_LICENSE("GPL"); |