]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/block/nbd.c
block: Introduce BLK_MQ_REQ_PREEMPT
[mirror_ubuntu-bionic-kernel.git] / drivers / block / nbd.c
1 /*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
10 * This file is released under GPLv2 or later.
11 *
12 * (part of code stolen from loop.c)
13 */
14
15 #include <linux/major.h>
16
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <linux/fs.h>
23 #include <linux/bio.h>
24 #include <linux/stat.h>
25 #include <linux/errno.h>
26 #include <linux/file.h>
27 #include <linux/ioctl.h>
28 #include <linux/mutex.h>
29 #include <linux/compiler.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <net/sock.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
39
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
42
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
46
47 static DEFINE_IDR(nbd_index_idr);
48 static DEFINE_MUTEX(nbd_index_mutex);
49 static int nbd_total_devices = 0;
50
51 struct nbd_sock {
52 struct socket *sock;
53 struct mutex tx_lock;
54 struct request *pending;
55 int sent;
56 bool dead;
57 int fallback_index;
58 int cookie;
59 };
60
61 struct recv_thread_args {
62 struct work_struct work;
63 struct nbd_device *nbd;
64 int index;
65 };
66
67 struct link_dead_args {
68 struct work_struct work;
69 int index;
70 };
71
72 #define NBD_TIMEDOUT 0
73 #define NBD_DISCONNECT_REQUESTED 1
74 #define NBD_DISCONNECTED 2
75 #define NBD_HAS_PID_FILE 3
76 #define NBD_HAS_CONFIG_REF 4
77 #define NBD_BOUND 5
78 #define NBD_DESTROY_ON_DISCONNECT 6
79
80 struct nbd_config {
81 u32 flags;
82 unsigned long runtime_flags;
83 u64 dead_conn_timeout;
84
85 struct nbd_sock **socks;
86 int num_connections;
87 atomic_t live_connections;
88 wait_queue_head_t conn_wait;
89
90 atomic_t recv_threads;
91 wait_queue_head_t recv_wq;
92 loff_t blksize;
93 loff_t bytesize;
94 #if IS_ENABLED(CONFIG_DEBUG_FS)
95 struct dentry *dbg_dir;
96 #endif
97 };
98
99 struct nbd_device {
100 struct blk_mq_tag_set tag_set;
101
102 int index;
103 refcount_t config_refs;
104 refcount_t refs;
105 struct nbd_config *config;
106 struct mutex config_lock;
107 struct gendisk *disk;
108
109 struct list_head list;
110 struct task_struct *task_recv;
111 struct task_struct *task_setup;
112 };
113
114 struct nbd_cmd {
115 struct nbd_device *nbd;
116 int index;
117 int cookie;
118 struct completion send_complete;
119 blk_status_t status;
120 };
121
122 #if IS_ENABLED(CONFIG_DEBUG_FS)
123 static struct dentry *nbd_dbg_dir;
124 #endif
125
126 #define nbd_name(nbd) ((nbd)->disk->disk_name)
127
128 #define NBD_MAGIC 0x68797548
129
130 static unsigned int nbds_max = 16;
131 static int max_part = 16;
132 static struct workqueue_struct *recv_workqueue;
133 static int part_shift;
134
135 static int nbd_dev_dbg_init(struct nbd_device *nbd);
136 static void nbd_dev_dbg_close(struct nbd_device *nbd);
137 static void nbd_config_put(struct nbd_device *nbd);
138 static void nbd_connect_reply(struct genl_info *info, int index);
139 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
140 static void nbd_dead_link_work(struct work_struct *work);
141
142 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
143 {
144 return disk_to_dev(nbd->disk);
145 }
146
147 static const char *nbdcmd_to_ascii(int cmd)
148 {
149 switch (cmd) {
150 case NBD_CMD_READ: return "read";
151 case NBD_CMD_WRITE: return "write";
152 case NBD_CMD_DISC: return "disconnect";
153 case NBD_CMD_FLUSH: return "flush";
154 case NBD_CMD_TRIM: return "trim/discard";
155 }
156 return "invalid";
157 }
158
159 static ssize_t pid_show(struct device *dev,
160 struct device_attribute *attr, char *buf)
161 {
162 struct gendisk *disk = dev_to_disk(dev);
163 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
164
165 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
166 }
167
168 static const struct device_attribute pid_attr = {
169 .attr = { .name = "pid", .mode = S_IRUGO},
170 .show = pid_show,
171 };
172
173 static void nbd_dev_remove(struct nbd_device *nbd)
174 {
175 struct gendisk *disk = nbd->disk;
176 if (disk) {
177 del_gendisk(disk);
178 blk_cleanup_queue(disk->queue);
179 blk_mq_free_tag_set(&nbd->tag_set);
180 disk->private_data = NULL;
181 put_disk(disk);
182 }
183 kfree(nbd);
184 }
185
186 static void nbd_put(struct nbd_device *nbd)
187 {
188 if (refcount_dec_and_mutex_lock(&nbd->refs,
189 &nbd_index_mutex)) {
190 idr_remove(&nbd_index_idr, nbd->index);
191 mutex_unlock(&nbd_index_mutex);
192 nbd_dev_remove(nbd);
193 }
194 }
195
196 static int nbd_disconnected(struct nbd_config *config)
197 {
198 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
199 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
200 }
201
202 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
203 int notify)
204 {
205 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
206 struct link_dead_args *args;
207 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
208 if (args) {
209 INIT_WORK(&args->work, nbd_dead_link_work);
210 args->index = nbd->index;
211 queue_work(system_wq, &args->work);
212 }
213 }
214 if (!nsock->dead) {
215 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
216 atomic_dec(&nbd->config->live_connections);
217 }
218 nsock->dead = true;
219 nsock->pending = NULL;
220 nsock->sent = 0;
221 }
222
223 static void nbd_size_clear(struct nbd_device *nbd)
224 {
225 if (nbd->config->bytesize) {
226 set_capacity(nbd->disk, 0);
227 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
228 }
229 }
230
231 static void nbd_size_update(struct nbd_device *nbd)
232 {
233 struct nbd_config *config = nbd->config;
234 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
235 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
236 set_capacity(nbd->disk, config->bytesize >> 9);
237 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
238 }
239
240 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
241 loff_t nr_blocks)
242 {
243 struct nbd_config *config = nbd->config;
244 config->blksize = blocksize;
245 config->bytesize = blocksize * nr_blocks;
246 nbd_size_update(nbd);
247 }
248
249 static void nbd_complete_rq(struct request *req)
250 {
251 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
252
253 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd,
254 cmd->status ? "failed" : "done");
255
256 blk_mq_end_request(req, cmd->status);
257 }
258
259 /*
260 * Forcibly shutdown the socket causing all listeners to error
261 */
262 static void sock_shutdown(struct nbd_device *nbd)
263 {
264 struct nbd_config *config = nbd->config;
265 int i;
266
267 if (config->num_connections == 0)
268 return;
269 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
270 return;
271
272 for (i = 0; i < config->num_connections; i++) {
273 struct nbd_sock *nsock = config->socks[i];
274 mutex_lock(&nsock->tx_lock);
275 nbd_mark_nsock_dead(nbd, nsock, 0);
276 mutex_unlock(&nsock->tx_lock);
277 }
278 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
279 }
280
281 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
282 bool reserved)
283 {
284 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
285 struct nbd_device *nbd = cmd->nbd;
286 struct nbd_config *config;
287
288 if (!refcount_inc_not_zero(&nbd->config_refs)) {
289 cmd->status = BLK_STS_TIMEOUT;
290 return BLK_EH_HANDLED;
291 }
292 config = nbd->config;
293
294 if (config->num_connections > 1) {
295 dev_err_ratelimited(nbd_to_dev(nbd),
296 "Connection timed out, retrying\n");
297 /*
298 * Hooray we have more connections, requeue this IO, the submit
299 * path will put it on a real connection.
300 */
301 if (config->socks && config->num_connections > 1) {
302 if (cmd->index < config->num_connections) {
303 struct nbd_sock *nsock =
304 config->socks[cmd->index];
305 mutex_lock(&nsock->tx_lock);
306 /* We can have multiple outstanding requests, so
307 * we don't want to mark the nsock dead if we've
308 * already reconnected with a new socket, so
309 * only mark it dead if its the same socket we
310 * were sent out on.
311 */
312 if (cmd->cookie == nsock->cookie)
313 nbd_mark_nsock_dead(nbd, nsock, 1);
314 mutex_unlock(&nsock->tx_lock);
315 }
316 blk_mq_requeue_request(req, true);
317 nbd_config_put(nbd);
318 return BLK_EH_NOT_HANDLED;
319 }
320 } else {
321 dev_err_ratelimited(nbd_to_dev(nbd),
322 "Connection timed out\n");
323 }
324 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
325 cmd->status = BLK_STS_IOERR;
326 sock_shutdown(nbd);
327 nbd_config_put(nbd);
328
329 return BLK_EH_HANDLED;
330 }
331
332 /*
333 * Send or receive packet.
334 */
335 static int sock_xmit(struct nbd_device *nbd, int index, int send,
336 struct iov_iter *iter, int msg_flags, int *sent)
337 {
338 struct nbd_config *config = nbd->config;
339 struct socket *sock = config->socks[index]->sock;
340 int result;
341 struct msghdr msg;
342 unsigned int noreclaim_flag;
343
344 if (unlikely(!sock)) {
345 dev_err_ratelimited(disk_to_dev(nbd->disk),
346 "Attempted %s on closed socket in sock_xmit\n",
347 (send ? "send" : "recv"));
348 return -EINVAL;
349 }
350
351 msg.msg_iter = *iter;
352
353 noreclaim_flag = memalloc_noreclaim_save();
354 do {
355 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
356 msg.msg_name = NULL;
357 msg.msg_namelen = 0;
358 msg.msg_control = NULL;
359 msg.msg_controllen = 0;
360 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
361
362 if (send)
363 result = sock_sendmsg(sock, &msg);
364 else
365 result = sock_recvmsg(sock, &msg, msg.msg_flags);
366
367 if (result <= 0) {
368 if (result == 0)
369 result = -EPIPE; /* short read */
370 break;
371 }
372 if (sent)
373 *sent += result;
374 } while (msg_data_left(&msg));
375
376 memalloc_noreclaim_restore(noreclaim_flag);
377
378 return result;
379 }
380
381 /* always call with the tx_lock held */
382 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
383 {
384 struct request *req = blk_mq_rq_from_pdu(cmd);
385 struct nbd_config *config = nbd->config;
386 struct nbd_sock *nsock = config->socks[index];
387 int result;
388 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
389 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
390 struct iov_iter from;
391 unsigned long size = blk_rq_bytes(req);
392 struct bio *bio;
393 u32 type;
394 u32 nbd_cmd_flags = 0;
395 u32 tag = blk_mq_unique_tag(req);
396 int sent = nsock->sent, skip = 0;
397
398 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
399
400 switch (req_op(req)) {
401 case REQ_OP_DISCARD:
402 type = NBD_CMD_TRIM;
403 break;
404 case REQ_OP_FLUSH:
405 type = NBD_CMD_FLUSH;
406 break;
407 case REQ_OP_WRITE:
408 type = NBD_CMD_WRITE;
409 break;
410 case REQ_OP_READ:
411 type = NBD_CMD_READ;
412 break;
413 default:
414 return -EIO;
415 }
416
417 if (rq_data_dir(req) == WRITE &&
418 (config->flags & NBD_FLAG_READ_ONLY)) {
419 dev_err_ratelimited(disk_to_dev(nbd->disk),
420 "Write on read-only\n");
421 return -EIO;
422 }
423
424 if (req->cmd_flags & REQ_FUA)
425 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
426
427 /* We did a partial send previously, and we at least sent the whole
428 * request struct, so just go and send the rest of the pages in the
429 * request.
430 */
431 if (sent) {
432 if (sent >= sizeof(request)) {
433 skip = sent - sizeof(request);
434 goto send_pages;
435 }
436 iov_iter_advance(&from, sent);
437 }
438 cmd->index = index;
439 cmd->cookie = nsock->cookie;
440 request.type = htonl(type | nbd_cmd_flags);
441 if (type != NBD_CMD_FLUSH) {
442 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
443 request.len = htonl(size);
444 }
445 memcpy(request.handle, &tag, sizeof(tag));
446
447 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
448 cmd, nbdcmd_to_ascii(type),
449 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
450 result = sock_xmit(nbd, index, 1, &from,
451 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
452 if (result <= 0) {
453 if (result == -ERESTARTSYS) {
454 /* If we havne't sent anything we can just return BUSY,
455 * however if we have sent something we need to make
456 * sure we only allow this req to be sent until we are
457 * completely done.
458 */
459 if (sent) {
460 nsock->pending = req;
461 nsock->sent = sent;
462 }
463 return BLK_STS_RESOURCE;
464 }
465 dev_err_ratelimited(disk_to_dev(nbd->disk),
466 "Send control failed (result %d)\n", result);
467 return -EAGAIN;
468 }
469 send_pages:
470 if (type != NBD_CMD_WRITE)
471 goto out;
472
473 bio = req->bio;
474 while (bio) {
475 struct bio *next = bio->bi_next;
476 struct bvec_iter iter;
477 struct bio_vec bvec;
478
479 bio_for_each_segment(bvec, bio, iter) {
480 bool is_last = !next && bio_iter_last(bvec, iter);
481 int flags = is_last ? 0 : MSG_MORE;
482
483 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
484 cmd, bvec.bv_len);
485 iov_iter_bvec(&from, ITER_BVEC | WRITE,
486 &bvec, 1, bvec.bv_len);
487 if (skip) {
488 if (skip >= iov_iter_count(&from)) {
489 skip -= iov_iter_count(&from);
490 continue;
491 }
492 iov_iter_advance(&from, skip);
493 skip = 0;
494 }
495 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
496 if (result <= 0) {
497 if (result == -ERESTARTSYS) {
498 /* We've already sent the header, we
499 * have no choice but to set pending and
500 * return BUSY.
501 */
502 nsock->pending = req;
503 nsock->sent = sent;
504 return BLK_STS_RESOURCE;
505 }
506 dev_err(disk_to_dev(nbd->disk),
507 "Send data failed (result %d)\n",
508 result);
509 return -EAGAIN;
510 }
511 /*
512 * The completion might already have come in,
513 * so break for the last one instead of letting
514 * the iterator do it. This prevents use-after-free
515 * of the bio.
516 */
517 if (is_last)
518 break;
519 }
520 bio = next;
521 }
522 out:
523 nsock->pending = NULL;
524 nsock->sent = 0;
525 return 0;
526 }
527
528 /* NULL returned = something went wrong, inform userspace */
529 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
530 {
531 struct nbd_config *config = nbd->config;
532 int result;
533 struct nbd_reply reply;
534 struct nbd_cmd *cmd;
535 struct request *req = NULL;
536 u16 hwq;
537 u32 tag;
538 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
539 struct iov_iter to;
540
541 reply.magic = 0;
542 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
543 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
544 if (result <= 0) {
545 if (!nbd_disconnected(config))
546 dev_err(disk_to_dev(nbd->disk),
547 "Receive control failed (result %d)\n", result);
548 return ERR_PTR(result);
549 }
550
551 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
552 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
553 (unsigned long)ntohl(reply.magic));
554 return ERR_PTR(-EPROTO);
555 }
556
557 memcpy(&tag, reply.handle, sizeof(u32));
558
559 hwq = blk_mq_unique_tag_to_hwq(tag);
560 if (hwq < nbd->tag_set.nr_hw_queues)
561 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
562 blk_mq_unique_tag_to_tag(tag));
563 if (!req || !blk_mq_request_started(req)) {
564 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
565 tag, req);
566 return ERR_PTR(-ENOENT);
567 }
568 cmd = blk_mq_rq_to_pdu(req);
569 if (ntohl(reply.error)) {
570 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
571 ntohl(reply.error));
572 cmd->status = BLK_STS_IOERR;
573 return cmd;
574 }
575
576 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
577 if (rq_data_dir(req) != WRITE) {
578 struct req_iterator iter;
579 struct bio_vec bvec;
580
581 rq_for_each_segment(bvec, req, iter) {
582 iov_iter_bvec(&to, ITER_BVEC | READ,
583 &bvec, 1, bvec.bv_len);
584 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
585 if (result <= 0) {
586 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
587 result);
588 /*
589 * If we've disconnected or we only have 1
590 * connection then we need to make sure we
591 * complete this request, otherwise error out
592 * and let the timeout stuff handle resubmitting
593 * this request onto another connection.
594 */
595 if (nbd_disconnected(config) ||
596 config->num_connections <= 1) {
597 cmd->status = BLK_STS_IOERR;
598 return cmd;
599 }
600 return ERR_PTR(-EIO);
601 }
602 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
603 cmd, bvec.bv_len);
604 }
605 } else {
606 /* See the comment in nbd_queue_rq. */
607 wait_for_completion(&cmd->send_complete);
608 }
609 return cmd;
610 }
611
612 static void recv_work(struct work_struct *work)
613 {
614 struct recv_thread_args *args = container_of(work,
615 struct recv_thread_args,
616 work);
617 struct nbd_device *nbd = args->nbd;
618 struct nbd_config *config = nbd->config;
619 struct nbd_cmd *cmd;
620
621 while (1) {
622 cmd = nbd_read_stat(nbd, args->index);
623 if (IS_ERR(cmd)) {
624 struct nbd_sock *nsock = config->socks[args->index];
625
626 mutex_lock(&nsock->tx_lock);
627 nbd_mark_nsock_dead(nbd, nsock, 1);
628 mutex_unlock(&nsock->tx_lock);
629 break;
630 }
631
632 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
633 }
634 atomic_dec(&config->recv_threads);
635 wake_up(&config->recv_wq);
636 nbd_config_put(nbd);
637 kfree(args);
638 }
639
640 static void nbd_clear_req(struct request *req, void *data, bool reserved)
641 {
642 struct nbd_cmd *cmd;
643
644 if (!blk_mq_request_started(req))
645 return;
646 cmd = blk_mq_rq_to_pdu(req);
647 cmd->status = BLK_STS_IOERR;
648 blk_mq_complete_request(req);
649 }
650
651 static void nbd_clear_que(struct nbd_device *nbd)
652 {
653 blk_mq_quiesce_queue(nbd->disk->queue);
654 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
655 blk_mq_unquiesce_queue(nbd->disk->queue);
656 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
657 }
658
659 static int find_fallback(struct nbd_device *nbd, int index)
660 {
661 struct nbd_config *config = nbd->config;
662 int new_index = -1;
663 struct nbd_sock *nsock = config->socks[index];
664 int fallback = nsock->fallback_index;
665
666 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
667 return new_index;
668
669 if (config->num_connections <= 1) {
670 dev_err_ratelimited(disk_to_dev(nbd->disk),
671 "Attempted send on invalid socket\n");
672 return new_index;
673 }
674
675 if (fallback >= 0 && fallback < config->num_connections &&
676 !config->socks[fallback]->dead)
677 return fallback;
678
679 if (nsock->fallback_index < 0 ||
680 nsock->fallback_index >= config->num_connections ||
681 config->socks[nsock->fallback_index]->dead) {
682 int i;
683 for (i = 0; i < config->num_connections; i++) {
684 if (i == index)
685 continue;
686 if (!config->socks[i]->dead) {
687 new_index = i;
688 break;
689 }
690 }
691 nsock->fallback_index = new_index;
692 if (new_index < 0) {
693 dev_err_ratelimited(disk_to_dev(nbd->disk),
694 "Dead connection, failed to find a fallback\n");
695 return new_index;
696 }
697 }
698 new_index = nsock->fallback_index;
699 return new_index;
700 }
701
702 static int wait_for_reconnect(struct nbd_device *nbd)
703 {
704 struct nbd_config *config = nbd->config;
705 if (!config->dead_conn_timeout)
706 return 0;
707 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
708 return 0;
709 wait_event_timeout(config->conn_wait,
710 atomic_read(&config->live_connections),
711 config->dead_conn_timeout);
712 return atomic_read(&config->live_connections);
713 }
714
715 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
716 {
717 struct request *req = blk_mq_rq_from_pdu(cmd);
718 struct nbd_device *nbd = cmd->nbd;
719 struct nbd_config *config;
720 struct nbd_sock *nsock;
721 int ret;
722
723 if (!refcount_inc_not_zero(&nbd->config_refs)) {
724 dev_err_ratelimited(disk_to_dev(nbd->disk),
725 "Socks array is empty\n");
726 blk_mq_start_request(req);
727 return -EINVAL;
728 }
729 config = nbd->config;
730
731 if (index >= config->num_connections) {
732 dev_err_ratelimited(disk_to_dev(nbd->disk),
733 "Attempted send on invalid socket\n");
734 nbd_config_put(nbd);
735 blk_mq_start_request(req);
736 return -EINVAL;
737 }
738 cmd->status = BLK_STS_OK;
739 again:
740 nsock = config->socks[index];
741 mutex_lock(&nsock->tx_lock);
742 if (nsock->dead) {
743 int old_index = index;
744 index = find_fallback(nbd, index);
745 mutex_unlock(&nsock->tx_lock);
746 if (index < 0) {
747 if (wait_for_reconnect(nbd)) {
748 index = old_index;
749 goto again;
750 }
751 /* All the sockets should already be down at this point,
752 * we just want to make sure that DISCONNECTED is set so
753 * any requests that come in that were queue'ed waiting
754 * for the reconnect timer don't trigger the timer again
755 * and instead just error out.
756 */
757 sock_shutdown(nbd);
758 nbd_config_put(nbd);
759 blk_mq_start_request(req);
760 return -EIO;
761 }
762 goto again;
763 }
764
765 /* Handle the case that we have a pending request that was partially
766 * transmitted that _has_ to be serviced first. We need to call requeue
767 * here so that it gets put _after_ the request that is already on the
768 * dispatch list.
769 */
770 blk_mq_start_request(req);
771 if (unlikely(nsock->pending && nsock->pending != req)) {
772 blk_mq_requeue_request(req, true);
773 ret = 0;
774 goto out;
775 }
776 /*
777 * Some failures are related to the link going down, so anything that
778 * returns EAGAIN can be retried on a different socket.
779 */
780 ret = nbd_send_cmd(nbd, cmd, index);
781 if (ret == -EAGAIN) {
782 dev_err_ratelimited(disk_to_dev(nbd->disk),
783 "Request send failed, requeueing\n");
784 nbd_mark_nsock_dead(nbd, nsock, 1);
785 blk_mq_requeue_request(req, true);
786 ret = 0;
787 }
788 out:
789 mutex_unlock(&nsock->tx_lock);
790 nbd_config_put(nbd);
791 return ret;
792 }
793
794 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
795 const struct blk_mq_queue_data *bd)
796 {
797 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
798 int ret;
799
800 /*
801 * Since we look at the bio's to send the request over the network we
802 * need to make sure the completion work doesn't mark this request done
803 * before we are done doing our send. This keeps us from dereferencing
804 * freed data if we have particularly fast completions (ie we get the
805 * completion before we exit sock_xmit on the last bvec) or in the case
806 * that the server is misbehaving (or there was an error) before we're
807 * done sending everything over the wire.
808 */
809 init_completion(&cmd->send_complete);
810
811 /* We can be called directly from the user space process, which means we
812 * could possibly have signals pending so our sendmsg will fail. In
813 * this case we need to return that we are busy, otherwise error out as
814 * appropriate.
815 */
816 ret = nbd_handle_cmd(cmd, hctx->queue_num);
817 complete(&cmd->send_complete);
818
819 return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
820 }
821
822 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
823 bool netlink)
824 {
825 struct nbd_config *config = nbd->config;
826 struct socket *sock;
827 struct nbd_sock **socks;
828 struct nbd_sock *nsock;
829 int err;
830
831 sock = sockfd_lookup(arg, &err);
832 if (!sock)
833 return err;
834
835 if (!netlink && !nbd->task_setup &&
836 !test_bit(NBD_BOUND, &config->runtime_flags))
837 nbd->task_setup = current;
838
839 if (!netlink &&
840 (nbd->task_setup != current ||
841 test_bit(NBD_BOUND, &config->runtime_flags))) {
842 dev_err(disk_to_dev(nbd->disk),
843 "Device being setup by another task");
844 sockfd_put(sock);
845 return -EBUSY;
846 }
847
848 socks = krealloc(config->socks, (config->num_connections + 1) *
849 sizeof(struct nbd_sock *), GFP_KERNEL);
850 if (!socks) {
851 sockfd_put(sock);
852 return -ENOMEM;
853 }
854 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
855 if (!nsock) {
856 sockfd_put(sock);
857 return -ENOMEM;
858 }
859
860 config->socks = socks;
861
862 nsock->fallback_index = -1;
863 nsock->dead = false;
864 mutex_init(&nsock->tx_lock);
865 nsock->sock = sock;
866 nsock->pending = NULL;
867 nsock->sent = 0;
868 nsock->cookie = 0;
869 socks[config->num_connections++] = nsock;
870 atomic_inc(&config->live_connections);
871
872 return 0;
873 }
874
875 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
876 {
877 struct nbd_config *config = nbd->config;
878 struct socket *sock, *old;
879 struct recv_thread_args *args;
880 int i;
881 int err;
882
883 sock = sockfd_lookup(arg, &err);
884 if (!sock)
885 return err;
886
887 args = kzalloc(sizeof(*args), GFP_KERNEL);
888 if (!args) {
889 sockfd_put(sock);
890 return -ENOMEM;
891 }
892
893 for (i = 0; i < config->num_connections; i++) {
894 struct nbd_sock *nsock = config->socks[i];
895
896 if (!nsock->dead)
897 continue;
898
899 mutex_lock(&nsock->tx_lock);
900 if (!nsock->dead) {
901 mutex_unlock(&nsock->tx_lock);
902 continue;
903 }
904 sk_set_memalloc(sock->sk);
905 if (nbd->tag_set.timeout)
906 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
907 atomic_inc(&config->recv_threads);
908 refcount_inc(&nbd->config_refs);
909 old = nsock->sock;
910 nsock->fallback_index = -1;
911 nsock->sock = sock;
912 nsock->dead = false;
913 INIT_WORK(&args->work, recv_work);
914 args->index = i;
915 args->nbd = nbd;
916 nsock->cookie++;
917 mutex_unlock(&nsock->tx_lock);
918 sockfd_put(old);
919
920 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
921
922 /* We take the tx_mutex in an error path in the recv_work, so we
923 * need to queue_work outside of the tx_mutex.
924 */
925 queue_work(recv_workqueue, &args->work);
926
927 atomic_inc(&config->live_connections);
928 wake_up(&config->conn_wait);
929 return 0;
930 }
931 sockfd_put(sock);
932 kfree(args);
933 return -ENOSPC;
934 }
935
936 static void nbd_bdev_reset(struct block_device *bdev)
937 {
938 if (bdev->bd_openers > 1)
939 return;
940 bd_set_size(bdev, 0);
941 if (max_part > 0) {
942 blkdev_reread_part(bdev);
943 bdev->bd_invalidated = 1;
944 }
945 }
946
947 static void nbd_parse_flags(struct nbd_device *nbd)
948 {
949 struct nbd_config *config = nbd->config;
950 if (config->flags & NBD_FLAG_READ_ONLY)
951 set_disk_ro(nbd->disk, true);
952 else
953 set_disk_ro(nbd->disk, false);
954 if (config->flags & NBD_FLAG_SEND_TRIM)
955 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
956 if (config->flags & NBD_FLAG_SEND_FLUSH) {
957 if (config->flags & NBD_FLAG_SEND_FUA)
958 blk_queue_write_cache(nbd->disk->queue, true, true);
959 else
960 blk_queue_write_cache(nbd->disk->queue, true, false);
961 }
962 else
963 blk_queue_write_cache(nbd->disk->queue, false, false);
964 }
965
966 static void send_disconnects(struct nbd_device *nbd)
967 {
968 struct nbd_config *config = nbd->config;
969 struct nbd_request request = {
970 .magic = htonl(NBD_REQUEST_MAGIC),
971 .type = htonl(NBD_CMD_DISC),
972 };
973 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
974 struct iov_iter from;
975 int i, ret;
976
977 for (i = 0; i < config->num_connections; i++) {
978 struct nbd_sock *nsock = config->socks[i];
979
980 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
981 mutex_lock(&nsock->tx_lock);
982 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
983 if (ret <= 0)
984 dev_err(disk_to_dev(nbd->disk),
985 "Send disconnect failed %d\n", ret);
986 mutex_unlock(&nsock->tx_lock);
987 }
988 }
989
990 static int nbd_disconnect(struct nbd_device *nbd)
991 {
992 struct nbd_config *config = nbd->config;
993
994 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
995 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
996 send_disconnects(nbd);
997 return 0;
998 }
999
1000 static void nbd_clear_sock(struct nbd_device *nbd)
1001 {
1002 sock_shutdown(nbd);
1003 nbd_clear_que(nbd);
1004 nbd->task_setup = NULL;
1005 }
1006
1007 static void nbd_config_put(struct nbd_device *nbd)
1008 {
1009 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1010 &nbd->config_lock)) {
1011 struct nbd_config *config = nbd->config;
1012 nbd_dev_dbg_close(nbd);
1013 nbd_size_clear(nbd);
1014 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1015 &config->runtime_flags))
1016 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1017 nbd->task_recv = NULL;
1018 nbd_clear_sock(nbd);
1019 if (config->num_connections) {
1020 int i;
1021 for (i = 0; i < config->num_connections; i++) {
1022 sockfd_put(config->socks[i]->sock);
1023 kfree(config->socks[i]);
1024 }
1025 kfree(config->socks);
1026 }
1027 kfree(nbd->config);
1028 nbd->config = NULL;
1029
1030 nbd->tag_set.timeout = 0;
1031 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1032
1033 mutex_unlock(&nbd->config_lock);
1034 nbd_put(nbd);
1035 module_put(THIS_MODULE);
1036 }
1037 }
1038
1039 static int nbd_start_device(struct nbd_device *nbd)
1040 {
1041 struct nbd_config *config = nbd->config;
1042 int num_connections = config->num_connections;
1043 int error = 0, i;
1044
1045 if (nbd->task_recv)
1046 return -EBUSY;
1047 if (!config->socks)
1048 return -EINVAL;
1049 if (num_connections > 1 &&
1050 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1051 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1052 return -EINVAL;
1053 }
1054
1055 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1056 nbd->task_recv = current;
1057
1058 nbd_parse_flags(nbd);
1059
1060 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1061 if (error) {
1062 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1063 return error;
1064 }
1065 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
1066
1067 nbd_dev_dbg_init(nbd);
1068 for (i = 0; i < num_connections; i++) {
1069 struct recv_thread_args *args;
1070
1071 args = kzalloc(sizeof(*args), GFP_KERNEL);
1072 if (!args) {
1073 sock_shutdown(nbd);
1074 return -ENOMEM;
1075 }
1076 sk_set_memalloc(config->socks[i]->sock->sk);
1077 if (nbd->tag_set.timeout)
1078 config->socks[i]->sock->sk->sk_sndtimeo =
1079 nbd->tag_set.timeout;
1080 atomic_inc(&config->recv_threads);
1081 refcount_inc(&nbd->config_refs);
1082 INIT_WORK(&args->work, recv_work);
1083 args->nbd = nbd;
1084 args->index = i;
1085 queue_work(recv_workqueue, &args->work);
1086 }
1087 return error;
1088 }
1089
1090 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1091 {
1092 struct nbd_config *config = nbd->config;
1093 int ret;
1094
1095 ret = nbd_start_device(nbd);
1096 if (ret)
1097 return ret;
1098
1099 bd_set_size(bdev, config->bytesize);
1100 if (max_part)
1101 bdev->bd_invalidated = 1;
1102 mutex_unlock(&nbd->config_lock);
1103 ret = wait_event_interruptible(config->recv_wq,
1104 atomic_read(&config->recv_threads) == 0);
1105 if (ret)
1106 sock_shutdown(nbd);
1107 mutex_lock(&nbd->config_lock);
1108 bd_set_size(bdev, 0);
1109 /* user requested, ignore socket errors */
1110 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
1111 ret = 0;
1112 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
1113 ret = -ETIMEDOUT;
1114 return ret;
1115 }
1116
1117 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1118 struct block_device *bdev)
1119 {
1120 sock_shutdown(nbd);
1121 kill_bdev(bdev);
1122 nbd_bdev_reset(bdev);
1123 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1124 &nbd->config->runtime_flags))
1125 nbd_config_put(nbd);
1126 }
1127
1128 /* Must be called with config_lock held */
1129 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1130 unsigned int cmd, unsigned long arg)
1131 {
1132 struct nbd_config *config = nbd->config;
1133
1134 switch (cmd) {
1135 case NBD_DISCONNECT:
1136 return nbd_disconnect(nbd);
1137 case NBD_CLEAR_SOCK:
1138 nbd_clear_sock_ioctl(nbd, bdev);
1139 return 0;
1140 case NBD_SET_SOCK:
1141 return nbd_add_socket(nbd, arg, false);
1142 case NBD_SET_BLKSIZE:
1143 nbd_size_set(nbd, arg,
1144 div_s64(config->bytesize, arg));
1145 return 0;
1146 case NBD_SET_SIZE:
1147 nbd_size_set(nbd, config->blksize,
1148 div_s64(arg, config->blksize));
1149 return 0;
1150 case NBD_SET_SIZE_BLOCKS:
1151 nbd_size_set(nbd, config->blksize, arg);
1152 return 0;
1153 case NBD_SET_TIMEOUT:
1154 if (arg) {
1155 nbd->tag_set.timeout = arg * HZ;
1156 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1157 }
1158 return 0;
1159
1160 case NBD_SET_FLAGS:
1161 config->flags = arg;
1162 return 0;
1163 case NBD_DO_IT:
1164 return nbd_start_device_ioctl(nbd, bdev);
1165 case NBD_CLEAR_QUE:
1166 /*
1167 * This is for compatibility only. The queue is always cleared
1168 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1169 */
1170 return 0;
1171 case NBD_PRINT_DEBUG:
1172 /*
1173 * For compatibility only, we no longer keep a list of
1174 * outstanding requests.
1175 */
1176 return 0;
1177 }
1178 return -ENOTTY;
1179 }
1180
1181 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1182 unsigned int cmd, unsigned long arg)
1183 {
1184 struct nbd_device *nbd = bdev->bd_disk->private_data;
1185 struct nbd_config *config = nbd->config;
1186 int error = -EINVAL;
1187
1188 if (!capable(CAP_SYS_ADMIN))
1189 return -EPERM;
1190
1191 /* The block layer will pass back some non-nbd ioctls in case we have
1192 * special handling for them, but we don't so just return an error.
1193 */
1194 if (_IOC_TYPE(cmd) != 0xab)
1195 return -EINVAL;
1196
1197 mutex_lock(&nbd->config_lock);
1198
1199 /* Don't allow ioctl operations on a nbd device that was created with
1200 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1201 */
1202 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1203 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1204 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1205 else
1206 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1207 mutex_unlock(&nbd->config_lock);
1208 return error;
1209 }
1210
1211 static struct nbd_config *nbd_alloc_config(void)
1212 {
1213 struct nbd_config *config;
1214
1215 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1216 if (!config)
1217 return NULL;
1218 atomic_set(&config->recv_threads, 0);
1219 init_waitqueue_head(&config->recv_wq);
1220 init_waitqueue_head(&config->conn_wait);
1221 config->blksize = 1024;
1222 atomic_set(&config->live_connections, 0);
1223 try_module_get(THIS_MODULE);
1224 return config;
1225 }
1226
1227 static int nbd_open(struct block_device *bdev, fmode_t mode)
1228 {
1229 struct nbd_device *nbd;
1230 int ret = 0;
1231
1232 mutex_lock(&nbd_index_mutex);
1233 nbd = bdev->bd_disk->private_data;
1234 if (!nbd) {
1235 ret = -ENXIO;
1236 goto out;
1237 }
1238 if (!refcount_inc_not_zero(&nbd->refs)) {
1239 ret = -ENXIO;
1240 goto out;
1241 }
1242 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1243 struct nbd_config *config;
1244
1245 mutex_lock(&nbd->config_lock);
1246 if (refcount_inc_not_zero(&nbd->config_refs)) {
1247 mutex_unlock(&nbd->config_lock);
1248 goto out;
1249 }
1250 config = nbd->config = nbd_alloc_config();
1251 if (!config) {
1252 ret = -ENOMEM;
1253 mutex_unlock(&nbd->config_lock);
1254 goto out;
1255 }
1256 refcount_set(&nbd->config_refs, 1);
1257 refcount_inc(&nbd->refs);
1258 mutex_unlock(&nbd->config_lock);
1259 }
1260 out:
1261 mutex_unlock(&nbd_index_mutex);
1262 return ret;
1263 }
1264
1265 static void nbd_release(struct gendisk *disk, fmode_t mode)
1266 {
1267 struct nbd_device *nbd = disk->private_data;
1268 nbd_config_put(nbd);
1269 nbd_put(nbd);
1270 }
1271
1272 static const struct block_device_operations nbd_fops =
1273 {
1274 .owner = THIS_MODULE,
1275 .open = nbd_open,
1276 .release = nbd_release,
1277 .ioctl = nbd_ioctl,
1278 .compat_ioctl = nbd_ioctl,
1279 };
1280
1281 #if IS_ENABLED(CONFIG_DEBUG_FS)
1282
1283 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1284 {
1285 struct nbd_device *nbd = s->private;
1286
1287 if (nbd->task_recv)
1288 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
1289
1290 return 0;
1291 }
1292
1293 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1294 {
1295 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1296 }
1297
1298 static const struct file_operations nbd_dbg_tasks_ops = {
1299 .open = nbd_dbg_tasks_open,
1300 .read = seq_read,
1301 .llseek = seq_lseek,
1302 .release = single_release,
1303 };
1304
1305 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1306 {
1307 struct nbd_device *nbd = s->private;
1308 u32 flags = nbd->config->flags;
1309
1310 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1311
1312 seq_puts(s, "Known flags:\n");
1313
1314 if (flags & NBD_FLAG_HAS_FLAGS)
1315 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1316 if (flags & NBD_FLAG_READ_ONLY)
1317 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1318 if (flags & NBD_FLAG_SEND_FLUSH)
1319 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1320 if (flags & NBD_FLAG_SEND_FUA)
1321 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1322 if (flags & NBD_FLAG_SEND_TRIM)
1323 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1324
1325 return 0;
1326 }
1327
1328 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1329 {
1330 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1331 }
1332
1333 static const struct file_operations nbd_dbg_flags_ops = {
1334 .open = nbd_dbg_flags_open,
1335 .read = seq_read,
1336 .llseek = seq_lseek,
1337 .release = single_release,
1338 };
1339
1340 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1341 {
1342 struct dentry *dir;
1343 struct nbd_config *config = nbd->config;
1344
1345 if (!nbd_dbg_dir)
1346 return -EIO;
1347
1348 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1349 if (!dir) {
1350 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1351 nbd_name(nbd));
1352 return -EIO;
1353 }
1354 config->dbg_dir = dir;
1355
1356 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
1357 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1358 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1359 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1360 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
1361
1362 return 0;
1363 }
1364
1365 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1366 {
1367 debugfs_remove_recursive(nbd->config->dbg_dir);
1368 }
1369
1370 static int nbd_dbg_init(void)
1371 {
1372 struct dentry *dbg_dir;
1373
1374 dbg_dir = debugfs_create_dir("nbd", NULL);
1375 if (!dbg_dir)
1376 return -EIO;
1377
1378 nbd_dbg_dir = dbg_dir;
1379
1380 return 0;
1381 }
1382
1383 static void nbd_dbg_close(void)
1384 {
1385 debugfs_remove_recursive(nbd_dbg_dir);
1386 }
1387
1388 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1389
1390 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1391 {
1392 return 0;
1393 }
1394
1395 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1396 {
1397 }
1398
1399 static int nbd_dbg_init(void)
1400 {
1401 return 0;
1402 }
1403
1404 static void nbd_dbg_close(void)
1405 {
1406 }
1407
1408 #endif
1409
1410 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1411 unsigned int hctx_idx, unsigned int numa_node)
1412 {
1413 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1414 cmd->nbd = set->driver_data;
1415 return 0;
1416 }
1417
1418 static const struct blk_mq_ops nbd_mq_ops = {
1419 .queue_rq = nbd_queue_rq,
1420 .complete = nbd_complete_rq,
1421 .init_request = nbd_init_request,
1422 .timeout = nbd_xmit_timeout,
1423 };
1424
1425 static int nbd_dev_add(int index)
1426 {
1427 struct nbd_device *nbd;
1428 struct gendisk *disk;
1429 struct request_queue *q;
1430 int err = -ENOMEM;
1431
1432 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1433 if (!nbd)
1434 goto out;
1435
1436 disk = alloc_disk(1 << part_shift);
1437 if (!disk)
1438 goto out_free_nbd;
1439
1440 if (index >= 0) {
1441 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1442 GFP_KERNEL);
1443 if (err == -ENOSPC)
1444 err = -EEXIST;
1445 } else {
1446 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1447 if (err >= 0)
1448 index = err;
1449 }
1450 if (err < 0)
1451 goto out_free_disk;
1452
1453 nbd->index = index;
1454 nbd->disk = disk;
1455 nbd->tag_set.ops = &nbd_mq_ops;
1456 nbd->tag_set.nr_hw_queues = 1;
1457 nbd->tag_set.queue_depth = 128;
1458 nbd->tag_set.numa_node = NUMA_NO_NODE;
1459 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1460 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1461 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1462 nbd->tag_set.driver_data = nbd;
1463
1464 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1465 if (err)
1466 goto out_free_idr;
1467
1468 q = blk_mq_init_queue(&nbd->tag_set);
1469 if (IS_ERR(q)) {
1470 err = PTR_ERR(q);
1471 goto out_free_tags;
1472 }
1473 disk->queue = q;
1474
1475 /*
1476 * Tell the block layer that we are not a rotational device
1477 */
1478 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1479 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1480 disk->queue->limits.discard_granularity = 512;
1481 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1482 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1483 blk_queue_max_segments(disk->queue, USHRT_MAX);
1484 blk_queue_max_hw_sectors(disk->queue, 65536);
1485 disk->queue->limits.max_sectors = 256;
1486
1487 mutex_init(&nbd->config_lock);
1488 refcount_set(&nbd->config_refs, 0);
1489 refcount_set(&nbd->refs, 1);
1490 INIT_LIST_HEAD(&nbd->list);
1491 disk->major = NBD_MAJOR;
1492 disk->first_minor = index << part_shift;
1493 disk->fops = &nbd_fops;
1494 disk->private_data = nbd;
1495 sprintf(disk->disk_name, "nbd%d", index);
1496 add_disk(disk);
1497 nbd_total_devices++;
1498 return index;
1499
1500 out_free_tags:
1501 blk_mq_free_tag_set(&nbd->tag_set);
1502 out_free_idr:
1503 idr_remove(&nbd_index_idr, index);
1504 out_free_disk:
1505 put_disk(disk);
1506 out_free_nbd:
1507 kfree(nbd);
1508 out:
1509 return err;
1510 }
1511
1512 static int find_free_cb(int id, void *ptr, void *data)
1513 {
1514 struct nbd_device *nbd = ptr;
1515 struct nbd_device **found = data;
1516
1517 if (!refcount_read(&nbd->config_refs)) {
1518 *found = nbd;
1519 return 1;
1520 }
1521 return 0;
1522 }
1523
1524 /* Netlink interface. */
1525 static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1526 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1527 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1528 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1529 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1530 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1531 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1532 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1533 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1534 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1535 };
1536
1537 static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1538 [NBD_SOCK_FD] = { .type = NLA_U32 },
1539 };
1540
1541 /* We don't use this right now since we don't parse the incoming list, but we
1542 * still want it here so userspace knows what to expect.
1543 */
1544 static struct nla_policy __attribute__((unused))
1545 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1546 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1547 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1548 };
1549
1550 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1551 {
1552 struct nbd_device *nbd = NULL;
1553 struct nbd_config *config;
1554 int index = -1;
1555 int ret;
1556 bool put_dev = false;
1557
1558 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1559 return -EPERM;
1560
1561 if (info->attrs[NBD_ATTR_INDEX])
1562 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1563 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1564 printk(KERN_ERR "nbd: must specify at least one socket\n");
1565 return -EINVAL;
1566 }
1567 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1568 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1569 return -EINVAL;
1570 }
1571 again:
1572 mutex_lock(&nbd_index_mutex);
1573 if (index == -1) {
1574 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1575 if (ret == 0) {
1576 int new_index;
1577 new_index = nbd_dev_add(-1);
1578 if (new_index < 0) {
1579 mutex_unlock(&nbd_index_mutex);
1580 printk(KERN_ERR "nbd: failed to add new device\n");
1581 return ret;
1582 }
1583 nbd = idr_find(&nbd_index_idr, new_index);
1584 }
1585 } else {
1586 nbd = idr_find(&nbd_index_idr, index);
1587 if (!nbd) {
1588 ret = nbd_dev_add(index);
1589 if (ret < 0) {
1590 mutex_unlock(&nbd_index_mutex);
1591 printk(KERN_ERR "nbd: failed to add new device\n");
1592 return ret;
1593 }
1594 nbd = idr_find(&nbd_index_idr, index);
1595 }
1596 }
1597 if (!nbd) {
1598 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1599 index);
1600 mutex_unlock(&nbd_index_mutex);
1601 return -EINVAL;
1602 }
1603 if (!refcount_inc_not_zero(&nbd->refs)) {
1604 mutex_unlock(&nbd_index_mutex);
1605 if (index == -1)
1606 goto again;
1607 printk(KERN_ERR "nbd: device at index %d is going down\n",
1608 index);
1609 return -EINVAL;
1610 }
1611 mutex_unlock(&nbd_index_mutex);
1612
1613 mutex_lock(&nbd->config_lock);
1614 if (refcount_read(&nbd->config_refs)) {
1615 mutex_unlock(&nbd->config_lock);
1616 nbd_put(nbd);
1617 if (index == -1)
1618 goto again;
1619 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1620 return -EBUSY;
1621 }
1622 if (WARN_ON(nbd->config)) {
1623 mutex_unlock(&nbd->config_lock);
1624 nbd_put(nbd);
1625 return -EINVAL;
1626 }
1627 config = nbd->config = nbd_alloc_config();
1628 if (!nbd->config) {
1629 mutex_unlock(&nbd->config_lock);
1630 nbd_put(nbd);
1631 printk(KERN_ERR "nbd: couldn't allocate config\n");
1632 return -ENOMEM;
1633 }
1634 refcount_set(&nbd->config_refs, 1);
1635 set_bit(NBD_BOUND, &config->runtime_flags);
1636
1637 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1638 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1639 nbd_size_set(nbd, config->blksize,
1640 div64_u64(bytes, config->blksize));
1641 }
1642 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1643 u64 bsize =
1644 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1645 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1646 }
1647 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1648 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1649 nbd->tag_set.timeout = timeout * HZ;
1650 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1651 }
1652 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1653 config->dead_conn_timeout =
1654 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1655 config->dead_conn_timeout *= HZ;
1656 }
1657 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1658 config->flags =
1659 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1660 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1661 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1662 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1663 set_bit(NBD_DESTROY_ON_DISCONNECT,
1664 &config->runtime_flags);
1665 put_dev = true;
1666 }
1667 }
1668
1669 if (info->attrs[NBD_ATTR_SOCKETS]) {
1670 struct nlattr *attr;
1671 int rem, fd;
1672
1673 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1674 rem) {
1675 struct nlattr *socks[NBD_SOCK_MAX+1];
1676
1677 if (nla_type(attr) != NBD_SOCK_ITEM) {
1678 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1679 ret = -EINVAL;
1680 goto out;
1681 }
1682 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1683 nbd_sock_policy, info->extack);
1684 if (ret != 0) {
1685 printk(KERN_ERR "nbd: error processing sock list\n");
1686 ret = -EINVAL;
1687 goto out;
1688 }
1689 if (!socks[NBD_SOCK_FD])
1690 continue;
1691 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1692 ret = nbd_add_socket(nbd, fd, true);
1693 if (ret)
1694 goto out;
1695 }
1696 }
1697 ret = nbd_start_device(nbd);
1698 out:
1699 mutex_unlock(&nbd->config_lock);
1700 if (!ret) {
1701 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1702 refcount_inc(&nbd->config_refs);
1703 nbd_connect_reply(info, nbd->index);
1704 }
1705 nbd_config_put(nbd);
1706 if (put_dev)
1707 nbd_put(nbd);
1708 return ret;
1709 }
1710
1711 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1712 {
1713 struct nbd_device *nbd;
1714 int index;
1715
1716 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1717 return -EPERM;
1718
1719 if (!info->attrs[NBD_ATTR_INDEX]) {
1720 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1721 return -EINVAL;
1722 }
1723 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1724 mutex_lock(&nbd_index_mutex);
1725 nbd = idr_find(&nbd_index_idr, index);
1726 if (!nbd) {
1727 mutex_unlock(&nbd_index_mutex);
1728 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1729 index);
1730 return -EINVAL;
1731 }
1732 if (!refcount_inc_not_zero(&nbd->refs)) {
1733 mutex_unlock(&nbd_index_mutex);
1734 printk(KERN_ERR "nbd: device at index %d is going down\n",
1735 index);
1736 return -EINVAL;
1737 }
1738 mutex_unlock(&nbd_index_mutex);
1739 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1740 nbd_put(nbd);
1741 return 0;
1742 }
1743 mutex_lock(&nbd->config_lock);
1744 nbd_disconnect(nbd);
1745 mutex_unlock(&nbd->config_lock);
1746 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1747 &nbd->config->runtime_flags))
1748 nbd_config_put(nbd);
1749 nbd_config_put(nbd);
1750 nbd_put(nbd);
1751 return 0;
1752 }
1753
1754 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1755 {
1756 struct nbd_device *nbd = NULL;
1757 struct nbd_config *config;
1758 int index;
1759 int ret = -EINVAL;
1760 bool put_dev = false;
1761
1762 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1763 return -EPERM;
1764
1765 if (!info->attrs[NBD_ATTR_INDEX]) {
1766 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1767 return -EINVAL;
1768 }
1769 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1770 mutex_lock(&nbd_index_mutex);
1771 nbd = idr_find(&nbd_index_idr, index);
1772 if (!nbd) {
1773 mutex_unlock(&nbd_index_mutex);
1774 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1775 index);
1776 return -EINVAL;
1777 }
1778 if (!refcount_inc_not_zero(&nbd->refs)) {
1779 mutex_unlock(&nbd_index_mutex);
1780 printk(KERN_ERR "nbd: device at index %d is going down\n",
1781 index);
1782 return -EINVAL;
1783 }
1784 mutex_unlock(&nbd_index_mutex);
1785
1786 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1787 dev_err(nbd_to_dev(nbd),
1788 "not configured, cannot reconfigure\n");
1789 nbd_put(nbd);
1790 return -EINVAL;
1791 }
1792
1793 mutex_lock(&nbd->config_lock);
1794 config = nbd->config;
1795 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1796 !nbd->task_recv) {
1797 dev_err(nbd_to_dev(nbd),
1798 "not configured, cannot reconfigure\n");
1799 goto out;
1800 }
1801
1802 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1803 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1804 nbd->tag_set.timeout = timeout * HZ;
1805 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1806 }
1807 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1808 config->dead_conn_timeout =
1809 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1810 config->dead_conn_timeout *= HZ;
1811 }
1812 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1813 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1814 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1815 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1816 &config->runtime_flags))
1817 put_dev = true;
1818 } else {
1819 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1820 &config->runtime_flags))
1821 refcount_inc(&nbd->refs);
1822 }
1823 }
1824
1825 if (info->attrs[NBD_ATTR_SOCKETS]) {
1826 struct nlattr *attr;
1827 int rem, fd;
1828
1829 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1830 rem) {
1831 struct nlattr *socks[NBD_SOCK_MAX+1];
1832
1833 if (nla_type(attr) != NBD_SOCK_ITEM) {
1834 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1835 ret = -EINVAL;
1836 goto out;
1837 }
1838 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1839 nbd_sock_policy, info->extack);
1840 if (ret != 0) {
1841 printk(KERN_ERR "nbd: error processing sock list\n");
1842 ret = -EINVAL;
1843 goto out;
1844 }
1845 if (!socks[NBD_SOCK_FD])
1846 continue;
1847 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1848 ret = nbd_reconnect_socket(nbd, fd);
1849 if (ret) {
1850 if (ret == -ENOSPC)
1851 ret = 0;
1852 goto out;
1853 }
1854 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
1855 }
1856 }
1857 out:
1858 mutex_unlock(&nbd->config_lock);
1859 nbd_config_put(nbd);
1860 nbd_put(nbd);
1861 if (put_dev)
1862 nbd_put(nbd);
1863 return ret;
1864 }
1865
1866 static const struct genl_ops nbd_connect_genl_ops[] = {
1867 {
1868 .cmd = NBD_CMD_CONNECT,
1869 .policy = nbd_attr_policy,
1870 .doit = nbd_genl_connect,
1871 },
1872 {
1873 .cmd = NBD_CMD_DISCONNECT,
1874 .policy = nbd_attr_policy,
1875 .doit = nbd_genl_disconnect,
1876 },
1877 {
1878 .cmd = NBD_CMD_RECONFIGURE,
1879 .policy = nbd_attr_policy,
1880 .doit = nbd_genl_reconfigure,
1881 },
1882 {
1883 .cmd = NBD_CMD_STATUS,
1884 .policy = nbd_attr_policy,
1885 .doit = nbd_genl_status,
1886 },
1887 };
1888
1889 static const struct genl_multicast_group nbd_mcast_grps[] = {
1890 { .name = NBD_GENL_MCAST_GROUP_NAME, },
1891 };
1892
1893 static struct genl_family nbd_genl_family __ro_after_init = {
1894 .hdrsize = 0,
1895 .name = NBD_GENL_FAMILY_NAME,
1896 .version = NBD_GENL_VERSION,
1897 .module = THIS_MODULE,
1898 .ops = nbd_connect_genl_ops,
1899 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
1900 .maxattr = NBD_ATTR_MAX,
1901 .mcgrps = nbd_mcast_grps,
1902 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
1903 };
1904
1905 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
1906 {
1907 struct nlattr *dev_opt;
1908 u8 connected = 0;
1909 int ret;
1910
1911 /* This is a little racey, but for status it's ok. The
1912 * reason we don't take a ref here is because we can't
1913 * take a ref in the index == -1 case as we would need
1914 * to put under the nbd_index_mutex, which could
1915 * deadlock if we are configured to remove ourselves
1916 * once we're disconnected.
1917 */
1918 if (refcount_read(&nbd->config_refs))
1919 connected = 1;
1920 dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
1921 if (!dev_opt)
1922 return -EMSGSIZE;
1923 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
1924 if (ret)
1925 return -EMSGSIZE;
1926 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
1927 connected);
1928 if (ret)
1929 return -EMSGSIZE;
1930 nla_nest_end(reply, dev_opt);
1931 return 0;
1932 }
1933
1934 static int status_cb(int id, void *ptr, void *data)
1935 {
1936 struct nbd_device *nbd = ptr;
1937 return populate_nbd_status(nbd, (struct sk_buff *)data);
1938 }
1939
1940 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
1941 {
1942 struct nlattr *dev_list;
1943 struct sk_buff *reply;
1944 void *reply_head;
1945 size_t msg_size;
1946 int index = -1;
1947 int ret = -ENOMEM;
1948
1949 if (info->attrs[NBD_ATTR_INDEX])
1950 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1951
1952 mutex_lock(&nbd_index_mutex);
1953
1954 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
1955 nla_attr_size(sizeof(u8)));
1956 msg_size *= (index == -1) ? nbd_total_devices : 1;
1957
1958 reply = genlmsg_new(msg_size, GFP_KERNEL);
1959 if (!reply)
1960 goto out;
1961 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
1962 NBD_CMD_STATUS);
1963 if (!reply_head) {
1964 nlmsg_free(reply);
1965 goto out;
1966 }
1967
1968 dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
1969 if (index == -1) {
1970 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
1971 if (ret) {
1972 nlmsg_free(reply);
1973 goto out;
1974 }
1975 } else {
1976 struct nbd_device *nbd;
1977 nbd = idr_find(&nbd_index_idr, index);
1978 if (nbd) {
1979 ret = populate_nbd_status(nbd, reply);
1980 if (ret) {
1981 nlmsg_free(reply);
1982 goto out;
1983 }
1984 }
1985 }
1986 nla_nest_end(reply, dev_list);
1987 genlmsg_end(reply, reply_head);
1988 genlmsg_reply(reply, info);
1989 ret = 0;
1990 out:
1991 mutex_unlock(&nbd_index_mutex);
1992 return ret;
1993 }
1994
1995 static void nbd_connect_reply(struct genl_info *info, int index)
1996 {
1997 struct sk_buff *skb;
1998 void *msg_head;
1999 int ret;
2000
2001 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2002 if (!skb)
2003 return;
2004 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2005 NBD_CMD_CONNECT);
2006 if (!msg_head) {
2007 nlmsg_free(skb);
2008 return;
2009 }
2010 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2011 if (ret) {
2012 nlmsg_free(skb);
2013 return;
2014 }
2015 genlmsg_end(skb, msg_head);
2016 genlmsg_reply(skb, info);
2017 }
2018
2019 static void nbd_mcast_index(int index)
2020 {
2021 struct sk_buff *skb;
2022 void *msg_head;
2023 int ret;
2024
2025 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2026 if (!skb)
2027 return;
2028 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2029 NBD_CMD_LINK_DEAD);
2030 if (!msg_head) {
2031 nlmsg_free(skb);
2032 return;
2033 }
2034 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2035 if (ret) {
2036 nlmsg_free(skb);
2037 return;
2038 }
2039 genlmsg_end(skb, msg_head);
2040 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2041 }
2042
2043 static void nbd_dead_link_work(struct work_struct *work)
2044 {
2045 struct link_dead_args *args = container_of(work, struct link_dead_args,
2046 work);
2047 nbd_mcast_index(args->index);
2048 kfree(args);
2049 }
2050
2051 static int __init nbd_init(void)
2052 {
2053 int i;
2054
2055 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2056
2057 if (max_part < 0) {
2058 printk(KERN_ERR "nbd: max_part must be >= 0\n");
2059 return -EINVAL;
2060 }
2061
2062 part_shift = 0;
2063 if (max_part > 0) {
2064 part_shift = fls(max_part);
2065
2066 /*
2067 * Adjust max_part according to part_shift as it is exported
2068 * to user space so that user can know the max number of
2069 * partition kernel should be able to manage.
2070 *
2071 * Note that -1 is required because partition 0 is reserved
2072 * for the whole disk.
2073 */
2074 max_part = (1UL << part_shift) - 1;
2075 }
2076
2077 if ((1UL << part_shift) > DISK_MAX_PARTS)
2078 return -EINVAL;
2079
2080 if (nbds_max > 1UL << (MINORBITS - part_shift))
2081 return -EINVAL;
2082 recv_workqueue = alloc_workqueue("knbd-recv",
2083 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2084 if (!recv_workqueue)
2085 return -ENOMEM;
2086
2087 if (register_blkdev(NBD_MAJOR, "nbd")) {
2088 destroy_workqueue(recv_workqueue);
2089 return -EIO;
2090 }
2091
2092 if (genl_register_family(&nbd_genl_family)) {
2093 unregister_blkdev(NBD_MAJOR, "nbd");
2094 destroy_workqueue(recv_workqueue);
2095 return -EINVAL;
2096 }
2097 nbd_dbg_init();
2098
2099 mutex_lock(&nbd_index_mutex);
2100 for (i = 0; i < nbds_max; i++)
2101 nbd_dev_add(i);
2102 mutex_unlock(&nbd_index_mutex);
2103 return 0;
2104 }
2105
2106 static int nbd_exit_cb(int id, void *ptr, void *data)
2107 {
2108 struct list_head *list = (struct list_head *)data;
2109 struct nbd_device *nbd = ptr;
2110
2111 list_add_tail(&nbd->list, list);
2112 return 0;
2113 }
2114
2115 static void __exit nbd_cleanup(void)
2116 {
2117 struct nbd_device *nbd;
2118 LIST_HEAD(del_list);
2119
2120 nbd_dbg_close();
2121
2122 mutex_lock(&nbd_index_mutex);
2123 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2124 mutex_unlock(&nbd_index_mutex);
2125
2126 while (!list_empty(&del_list)) {
2127 nbd = list_first_entry(&del_list, struct nbd_device, list);
2128 list_del_init(&nbd->list);
2129 if (refcount_read(&nbd->refs) != 1)
2130 printk(KERN_ERR "nbd: possibly leaking a device\n");
2131 nbd_put(nbd);
2132 }
2133
2134 idr_destroy(&nbd_index_idr);
2135 genl_unregister_family(&nbd_genl_family);
2136 destroy_workqueue(recv_workqueue);
2137 unregister_blkdev(NBD_MAJOR, "nbd");
2138 }
2139
2140 module_init(nbd_init);
2141 module_exit(nbd_cleanup);
2142
2143 MODULE_DESCRIPTION("Network Block Device");
2144 MODULE_LICENSE("GPL");
2145
2146 module_param(nbds_max, int, 0444);
2147 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2148 module_param(max_part, int, 0444);
2149 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");