2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
43 #include <linux/nbd-netlink.h>
44 #include <net/genetlink.h>
46 static DEFINE_IDR(nbd_index_idr
);
47 static DEFINE_MUTEX(nbd_index_mutex
);
48 static int nbd_total_devices
= 0;
53 struct request
*pending
;
60 struct recv_thread_args
{
61 struct work_struct work
;
62 struct nbd_device
*nbd
;
66 struct link_dead_args
{
67 struct work_struct work
;
71 #define NBD_TIMEDOUT 0
72 #define NBD_DISCONNECT_REQUESTED 1
73 #define NBD_DISCONNECTED 2
74 #define NBD_HAS_PID_FILE 3
75 #define NBD_HAS_CONFIG_REF 4
77 #define NBD_DESTROY_ON_DISCONNECT 6
81 unsigned long runtime_flags
;
82 u64 dead_conn_timeout
;
84 struct nbd_sock
**socks
;
86 atomic_t live_connections
;
87 wait_queue_head_t conn_wait
;
89 atomic_t recv_threads
;
90 wait_queue_head_t recv_wq
;
93 #if IS_ENABLED(CONFIG_DEBUG_FS)
94 struct dentry
*dbg_dir
;
99 struct blk_mq_tag_set tag_set
;
102 refcount_t config_refs
;
104 struct nbd_config
*config
;
105 struct mutex config_lock
;
106 struct gendisk
*disk
;
108 struct list_head list
;
109 struct task_struct
*task_recv
;
110 struct task_struct
*task_setup
;
114 struct nbd_device
*nbd
;
117 struct completion send_complete
;
121 #if IS_ENABLED(CONFIG_DEBUG_FS)
122 static struct dentry
*nbd_dbg_dir
;
125 #define nbd_name(nbd) ((nbd)->disk->disk_name)
127 #define NBD_MAGIC 0x68797548
129 static unsigned int nbds_max
= 16;
131 static struct workqueue_struct
*recv_workqueue
;
132 static int part_shift
;
134 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
135 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
136 static void nbd_config_put(struct nbd_device
*nbd
);
137 static void nbd_connect_reply(struct genl_info
*info
, int index
);
138 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
);
139 static void nbd_dead_link_work(struct work_struct
*work
);
141 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
143 return disk_to_dev(nbd
->disk
);
146 static const char *nbdcmd_to_ascii(int cmd
)
149 case NBD_CMD_READ
: return "read";
150 case NBD_CMD_WRITE
: return "write";
151 case NBD_CMD_DISC
: return "disconnect";
152 case NBD_CMD_FLUSH
: return "flush";
153 case NBD_CMD_TRIM
: return "trim/discard";
158 static ssize_t
pid_show(struct device
*dev
,
159 struct device_attribute
*attr
, char *buf
)
161 struct gendisk
*disk
= dev_to_disk(dev
);
162 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
164 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
167 static struct device_attribute pid_attr
= {
168 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
172 static void nbd_dev_remove(struct nbd_device
*nbd
)
174 struct gendisk
*disk
= nbd
->disk
;
177 blk_cleanup_queue(disk
->queue
);
178 blk_mq_free_tag_set(&nbd
->tag_set
);
179 disk
->private_data
= NULL
;
185 static void nbd_put(struct nbd_device
*nbd
)
187 if (refcount_dec_and_mutex_lock(&nbd
->refs
,
189 idr_remove(&nbd_index_idr
, nbd
->index
);
190 mutex_unlock(&nbd_index_mutex
);
195 static int nbd_disconnected(struct nbd_config
*config
)
197 return test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
) ||
198 test_bit(NBD_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
201 static void nbd_mark_nsock_dead(struct nbd_device
*nbd
, struct nbd_sock
*nsock
,
204 if (!nsock
->dead
&& notify
&& !nbd_disconnected(nbd
->config
)) {
205 struct link_dead_args
*args
;
206 args
= kmalloc(sizeof(struct link_dead_args
), GFP_NOIO
);
208 INIT_WORK(&args
->work
, nbd_dead_link_work
);
209 args
->index
= nbd
->index
;
210 queue_work(system_wq
, &args
->work
);
214 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
215 atomic_dec(&nbd
->config
->live_connections
);
218 nsock
->pending
= NULL
;
222 static void nbd_size_clear(struct nbd_device
*nbd
)
224 if (nbd
->config
->bytesize
) {
225 set_capacity(nbd
->disk
, 0);
226 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
230 static void nbd_size_update(struct nbd_device
*nbd
)
232 struct nbd_config
*config
= nbd
->config
;
233 blk_queue_logical_block_size(nbd
->disk
->queue
, config
->blksize
);
234 blk_queue_physical_block_size(nbd
->disk
->queue
, config
->blksize
);
235 set_capacity(nbd
->disk
, config
->bytesize
>> 9);
236 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
239 static void nbd_size_set(struct nbd_device
*nbd
, loff_t blocksize
,
242 struct nbd_config
*config
= nbd
->config
;
243 config
->blksize
= blocksize
;
244 config
->bytesize
= blocksize
* nr_blocks
;
245 nbd_size_update(nbd
);
248 static void nbd_complete_rq(struct request
*req
)
250 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
252 dev_dbg(nbd_to_dev(cmd
->nbd
), "request %p: %s\n", cmd
,
253 cmd
->status
? "failed" : "done");
255 blk_mq_end_request(req
, cmd
->status
);
259 * Forcibly shutdown the socket causing all listeners to error
261 static void sock_shutdown(struct nbd_device
*nbd
)
263 struct nbd_config
*config
= nbd
->config
;
266 if (config
->num_connections
== 0)
268 if (test_and_set_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
271 for (i
= 0; i
< config
->num_connections
; i
++) {
272 struct nbd_sock
*nsock
= config
->socks
[i
];
273 mutex_lock(&nsock
->tx_lock
);
274 nbd_mark_nsock_dead(nbd
, nsock
, 0);
275 mutex_unlock(&nsock
->tx_lock
);
277 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
280 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
283 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
284 struct nbd_device
*nbd
= cmd
->nbd
;
285 struct nbd_config
*config
;
287 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
289 return BLK_EH_HANDLED
;
292 /* If we are waiting on our dead timer then we could get timeout
293 * callbacks for our request. For this we just want to reset the timer
294 * and let the queue side take care of everything.
296 if (!completion_done(&cmd
->send_complete
)) {
298 return BLK_EH_RESET_TIMER
;
300 config
= nbd
->config
;
302 if (config
->num_connections
> 1) {
303 dev_err_ratelimited(nbd_to_dev(nbd
),
304 "Connection timed out, retrying\n");
306 * Hooray we have more connections, requeue this IO, the submit
307 * path will put it on a real connection.
309 if (config
->socks
&& config
->num_connections
> 1) {
310 if (cmd
->index
< config
->num_connections
) {
311 struct nbd_sock
*nsock
=
312 config
->socks
[cmd
->index
];
313 mutex_lock(&nsock
->tx_lock
);
314 /* We can have multiple outstanding requests, so
315 * we don't want to mark the nsock dead if we've
316 * already reconnected with a new socket, so
317 * only mark it dead if its the same socket we
320 if (cmd
->cookie
== nsock
->cookie
)
321 nbd_mark_nsock_dead(nbd
, nsock
, 1);
322 mutex_unlock(&nsock
->tx_lock
);
324 blk_mq_requeue_request(req
, true);
326 return BLK_EH_NOT_HANDLED
;
329 dev_err_ratelimited(nbd_to_dev(nbd
),
330 "Connection timed out\n");
332 set_bit(NBD_TIMEDOUT
, &config
->runtime_flags
);
337 return BLK_EH_HANDLED
;
341 * Send or receive packet.
343 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
,
344 struct iov_iter
*iter
, int msg_flags
, int *sent
)
346 struct nbd_config
*config
= nbd
->config
;
347 struct socket
*sock
= config
->socks
[index
]->sock
;
350 unsigned long pflags
= current
->flags
;
352 if (unlikely(!sock
)) {
353 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
354 "Attempted %s on closed socket in sock_xmit\n",
355 (send
? "send" : "recv"));
359 msg
.msg_iter
= *iter
;
361 current
->flags
|= PF_MEMALLOC
;
363 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
366 msg
.msg_control
= NULL
;
367 msg
.msg_controllen
= 0;
368 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
371 result
= sock_sendmsg(sock
, &msg
);
373 result
= sock_recvmsg(sock
, &msg
, msg
.msg_flags
);
377 result
= -EPIPE
; /* short read */
382 } while (msg_data_left(&msg
));
384 current_restore_flags(pflags
, PF_MEMALLOC
);
389 /* always call with the tx_lock held */
390 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
392 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
393 struct nbd_config
*config
= nbd
->config
;
394 struct nbd_sock
*nsock
= config
->socks
[index
];
396 struct nbd_request request
= {.magic
= htonl(NBD_REQUEST_MAGIC
)};
397 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
398 struct iov_iter from
;
399 unsigned long size
= blk_rq_bytes(req
);
402 u32 tag
= blk_mq_unique_tag(req
);
403 int sent
= nsock
->sent
, skip
= 0;
405 iov_iter_kvec(&from
, WRITE
| ITER_KVEC
, &iov
, 1, sizeof(request
));
407 switch (req_op(req
)) {
412 type
= NBD_CMD_FLUSH
;
415 type
= NBD_CMD_WRITE
;
424 if (rq_data_dir(req
) == WRITE
&&
425 (config
->flags
& NBD_FLAG_READ_ONLY
)) {
426 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
427 "Write on read-only\n");
431 /* We did a partial send previously, and we at least sent the whole
432 * request struct, so just go and send the rest of the pages in the
436 if (sent
>= sizeof(request
)) {
437 skip
= sent
- sizeof(request
);
440 iov_iter_advance(&from
, sent
);
443 cmd
->cookie
= nsock
->cookie
;
444 request
.type
= htonl(type
);
445 if (type
!= NBD_CMD_FLUSH
) {
446 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
447 request
.len
= htonl(size
);
449 memcpy(request
.handle
, &tag
, sizeof(tag
));
451 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
452 cmd
, nbdcmd_to_ascii(type
),
453 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
454 result
= sock_xmit(nbd
, index
, 1, &from
,
455 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0, &sent
);
457 if (result
== -ERESTARTSYS
) {
458 /* If we havne't sent anything we can just return BUSY,
459 * however if we have sent something we need to make
460 * sure we only allow this req to be sent until we are
464 nsock
->pending
= req
;
467 return BLK_MQ_RQ_QUEUE_BUSY
;
469 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
470 "Send control failed (result %d)\n", result
);
474 if (type
!= NBD_CMD_WRITE
)
479 struct bio
*next
= bio
->bi_next
;
480 struct bvec_iter iter
;
483 bio_for_each_segment(bvec
, bio
, iter
) {
484 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
485 int flags
= is_last
? 0 : MSG_MORE
;
487 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
489 iov_iter_bvec(&from
, ITER_BVEC
| WRITE
,
490 &bvec
, 1, bvec
.bv_len
);
492 if (skip
>= iov_iter_count(&from
)) {
493 skip
-= iov_iter_count(&from
);
496 iov_iter_advance(&from
, skip
);
499 result
= sock_xmit(nbd
, index
, 1, &from
, flags
, &sent
);
501 if (result
== -ERESTARTSYS
) {
502 /* We've already sent the header, we
503 * have no choice but to set pending and
506 nsock
->pending
= req
;
508 return BLK_MQ_RQ_QUEUE_BUSY
;
510 dev_err(disk_to_dev(nbd
->disk
),
511 "Send data failed (result %d)\n",
516 * The completion might already have come in,
517 * so break for the last one instead of letting
518 * the iterator do it. This prevents use-after-free
527 nsock
->pending
= NULL
;
532 /* NULL returned = something went wrong, inform userspace */
533 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
535 struct nbd_config
*config
= nbd
->config
;
537 struct nbd_reply reply
;
539 struct request
*req
= NULL
;
542 struct kvec iov
= {.iov_base
= &reply
, .iov_len
= sizeof(reply
)};
546 iov_iter_kvec(&to
, READ
| ITER_KVEC
, &iov
, 1, sizeof(reply
));
547 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
549 if (!nbd_disconnected(config
))
550 dev_err(disk_to_dev(nbd
->disk
),
551 "Receive control failed (result %d)\n", result
);
552 return ERR_PTR(result
);
555 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
556 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
557 (unsigned long)ntohl(reply
.magic
));
558 return ERR_PTR(-EPROTO
);
561 memcpy(&tag
, reply
.handle
, sizeof(u32
));
563 hwq
= blk_mq_unique_tag_to_hwq(tag
);
564 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
565 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
566 blk_mq_unique_tag_to_tag(tag
));
567 if (!req
|| !blk_mq_request_started(req
)) {
568 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
570 return ERR_PTR(-ENOENT
);
572 cmd
= blk_mq_rq_to_pdu(req
);
573 if (ntohl(reply
.error
)) {
574 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
580 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", cmd
);
581 if (rq_data_dir(req
) != WRITE
) {
582 struct req_iterator iter
;
585 rq_for_each_segment(bvec
, req
, iter
) {
586 iov_iter_bvec(&to
, ITER_BVEC
| READ
,
587 &bvec
, 1, bvec
.bv_len
);
588 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
590 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
593 * If we've disconnected or we only have 1
594 * connection then we need to make sure we
595 * complete this request, otherwise error out
596 * and let the timeout stuff handle resubmitting
597 * this request onto another connection.
599 if (nbd_disconnected(config
) ||
600 config
->num_connections
<= 1) {
604 return ERR_PTR(-EIO
);
606 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
610 /* See the comment in nbd_queue_rq. */
611 wait_for_completion(&cmd
->send_complete
);
616 static void recv_work(struct work_struct
*work
)
618 struct recv_thread_args
*args
= container_of(work
,
619 struct recv_thread_args
,
621 struct nbd_device
*nbd
= args
->nbd
;
622 struct nbd_config
*config
= nbd
->config
;
627 cmd
= nbd_read_stat(nbd
, args
->index
);
629 struct nbd_sock
*nsock
= config
->socks
[args
->index
];
631 mutex_lock(&nsock
->tx_lock
);
632 nbd_mark_nsock_dead(nbd
, nsock
, 1);
633 mutex_unlock(&nsock
->tx_lock
);
638 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd
));
640 atomic_dec(&config
->recv_threads
);
641 wake_up(&config
->recv_wq
);
646 static void nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
650 if (!blk_mq_request_started(req
))
652 cmd
= blk_mq_rq_to_pdu(req
);
654 blk_mq_complete_request(req
);
657 static void nbd_clear_que(struct nbd_device
*nbd
)
659 blk_mq_stop_hw_queues(nbd
->disk
->queue
);
660 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
661 blk_mq_start_hw_queues(nbd
->disk
->queue
);
662 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
665 static int find_fallback(struct nbd_device
*nbd
, int index
)
667 struct nbd_config
*config
= nbd
->config
;
669 struct nbd_sock
*nsock
= config
->socks
[index
];
670 int fallback
= nsock
->fallback_index
;
672 if (test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
675 if (config
->num_connections
<= 1) {
676 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
677 "Attempted send on invalid socket\n");
681 if (fallback
>= 0 && fallback
< config
->num_connections
&&
682 !config
->socks
[fallback
]->dead
)
685 if (nsock
->fallback_index
< 0 ||
686 nsock
->fallback_index
>= config
->num_connections
||
687 config
->socks
[nsock
->fallback_index
]->dead
) {
689 for (i
= 0; i
< config
->num_connections
; i
++) {
692 if (!config
->socks
[i
]->dead
) {
697 nsock
->fallback_index
= new_index
;
699 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
700 "Dead connection, failed to find a fallback\n");
704 new_index
= nsock
->fallback_index
;
708 static int wait_for_reconnect(struct nbd_device
*nbd
)
710 struct nbd_config
*config
= nbd
->config
;
711 if (!config
->dead_conn_timeout
)
713 if (test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
715 wait_event_interruptible_timeout(config
->conn_wait
,
716 atomic_read(&config
->live_connections
),
717 config
->dead_conn_timeout
);
718 return atomic_read(&config
->live_connections
);
721 static int nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
723 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
724 struct nbd_device
*nbd
= cmd
->nbd
;
725 struct nbd_config
*config
;
726 struct nbd_sock
*nsock
;
729 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
730 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
731 "Socks array is empty\n");
734 config
= nbd
->config
;
736 if (index
>= config
->num_connections
) {
737 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
738 "Attempted send on invalid socket\n");
744 nsock
= config
->socks
[index
];
745 mutex_lock(&nsock
->tx_lock
);
747 int old_index
= index
;
748 index
= find_fallback(nbd
, index
);
749 mutex_unlock(&nsock
->tx_lock
);
751 if (wait_for_reconnect(nbd
)) {
755 /* All the sockets should already be down at this point,
756 * we just want to make sure that DISCONNECTED is set so
757 * any requests that come in that were queue'ed waiting
758 * for the reconnect timer don't trigger the timer again
759 * and instead just error out.
768 /* Handle the case that we have a pending request that was partially
769 * transmitted that _has_ to be serviced first. We need to call requeue
770 * here so that it gets put _after_ the request that is already on the
773 if (unlikely(nsock
->pending
&& nsock
->pending
!= req
)) {
774 blk_mq_requeue_request(req
, true);
779 * Some failures are related to the link going down, so anything that
780 * returns EAGAIN can be retried on a different socket.
782 ret
= nbd_send_cmd(nbd
, cmd
, index
);
783 if (ret
== -EAGAIN
) {
784 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
785 "Request send failed trying another connection\n");
786 nbd_mark_nsock_dead(nbd
, nsock
, 1);
787 mutex_unlock(&nsock
->tx_lock
);
791 mutex_unlock(&nsock
->tx_lock
);
796 static int nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
797 const struct blk_mq_queue_data
*bd
)
799 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
803 * Since we look at the bio's to send the request over the network we
804 * need to make sure the completion work doesn't mark this request done
805 * before we are done doing our send. This keeps us from dereferencing
806 * freed data if we have particularly fast completions (ie we get the
807 * completion before we exit sock_xmit on the last bvec) or in the case
808 * that the server is misbehaving (or there was an error) before we're
809 * done sending everything over the wire.
811 init_completion(&cmd
->send_complete
);
812 blk_mq_start_request(bd
->rq
);
814 /* We can be called directly from the user space process, which means we
815 * could possibly have signals pending so our sendmsg will fail. In
816 * this case we need to return that we are busy, otherwise error out as
819 ret
= nbd_handle_cmd(cmd
, hctx
->queue_num
);
821 ret
= BLK_MQ_RQ_QUEUE_ERROR
;
823 ret
= BLK_MQ_RQ_QUEUE_OK
;
824 complete(&cmd
->send_complete
);
829 static int nbd_add_socket(struct nbd_device
*nbd
, unsigned long arg
,
832 struct nbd_config
*config
= nbd
->config
;
834 struct nbd_sock
**socks
;
835 struct nbd_sock
*nsock
;
838 sock
= sockfd_lookup(arg
, &err
);
842 if (!netlink
&& !nbd
->task_setup
&&
843 !test_bit(NBD_BOUND
, &config
->runtime_flags
))
844 nbd
->task_setup
= current
;
847 (nbd
->task_setup
!= current
||
848 test_bit(NBD_BOUND
, &config
->runtime_flags
))) {
849 dev_err(disk_to_dev(nbd
->disk
),
850 "Device being setup by another task");
855 socks
= krealloc(config
->socks
, (config
->num_connections
+ 1) *
856 sizeof(struct nbd_sock
*), GFP_KERNEL
);
861 nsock
= kzalloc(sizeof(struct nbd_sock
), GFP_KERNEL
);
867 config
->socks
= socks
;
869 nsock
->fallback_index
= -1;
871 mutex_init(&nsock
->tx_lock
);
873 nsock
->pending
= NULL
;
876 socks
[config
->num_connections
++] = nsock
;
877 atomic_inc(&config
->live_connections
);
882 static int nbd_reconnect_socket(struct nbd_device
*nbd
, unsigned long arg
)
884 struct nbd_config
*config
= nbd
->config
;
885 struct socket
*sock
, *old
;
886 struct recv_thread_args
*args
;
890 sock
= sockfd_lookup(arg
, &err
);
894 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
900 for (i
= 0; i
< config
->num_connections
; i
++) {
901 struct nbd_sock
*nsock
= config
->socks
[i
];
906 mutex_lock(&nsock
->tx_lock
);
908 mutex_unlock(&nsock
->tx_lock
);
911 sk_set_memalloc(sock
->sk
);
912 atomic_inc(&config
->recv_threads
);
913 refcount_inc(&nbd
->config_refs
);
915 nsock
->fallback_index
= -1;
918 INIT_WORK(&args
->work
, recv_work
);
922 mutex_unlock(&nsock
->tx_lock
);
925 /* We take the tx_mutex in an error path in the recv_work, so we
926 * need to queue_work outside of the tx_mutex.
928 queue_work(recv_workqueue
, &args
->work
);
930 atomic_inc(&config
->live_connections
);
931 wake_up(&config
->conn_wait
);
939 /* Reset all properties of an NBD device */
940 static void nbd_reset(struct nbd_device
*nbd
)
943 nbd
->tag_set
.timeout
= 0;
944 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
947 static void nbd_bdev_reset(struct block_device
*bdev
)
949 if (bdev
->bd_openers
> 1)
951 bd_set_size(bdev
, 0);
953 blkdev_reread_part(bdev
);
954 bdev
->bd_invalidated
= 1;
958 static void nbd_parse_flags(struct nbd_device
*nbd
)
960 struct nbd_config
*config
= nbd
->config
;
961 if (config
->flags
& NBD_FLAG_READ_ONLY
)
962 set_disk_ro(nbd
->disk
, true);
964 set_disk_ro(nbd
->disk
, false);
965 if (config
->flags
& NBD_FLAG_SEND_TRIM
)
966 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
967 if (config
->flags
& NBD_FLAG_SEND_FLUSH
)
968 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
970 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
973 static void send_disconnects(struct nbd_device
*nbd
)
975 struct nbd_config
*config
= nbd
->config
;
976 struct nbd_request request
= {
977 .magic
= htonl(NBD_REQUEST_MAGIC
),
978 .type
= htonl(NBD_CMD_DISC
),
980 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
981 struct iov_iter from
;
984 for (i
= 0; i
< config
->num_connections
; i
++) {
985 iov_iter_kvec(&from
, WRITE
| ITER_KVEC
, &iov
, 1, sizeof(request
));
986 ret
= sock_xmit(nbd
, i
, 1, &from
, 0, NULL
);
988 dev_err(disk_to_dev(nbd
->disk
),
989 "Send disconnect failed %d\n", ret
);
993 static int nbd_disconnect(struct nbd_device
*nbd
)
995 struct nbd_config
*config
= nbd
->config
;
997 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
998 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED
,
999 &config
->runtime_flags
))
1000 send_disconnects(nbd
);
1004 static void nbd_clear_sock(struct nbd_device
*nbd
)
1008 nbd
->task_setup
= NULL
;
1011 static void nbd_config_put(struct nbd_device
*nbd
)
1013 if (refcount_dec_and_mutex_lock(&nbd
->config_refs
,
1014 &nbd
->config_lock
)) {
1015 struct nbd_config
*config
= nbd
->config
;
1016 nbd_dev_dbg_close(nbd
);
1017 nbd_size_clear(nbd
);
1018 if (test_and_clear_bit(NBD_HAS_PID_FILE
,
1019 &config
->runtime_flags
))
1020 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1021 nbd
->task_recv
= NULL
;
1022 nbd_clear_sock(nbd
);
1023 if (config
->num_connections
) {
1025 for (i
= 0; i
< config
->num_connections
; i
++) {
1026 sockfd_put(config
->socks
[i
]->sock
);
1027 kfree(config
->socks
[i
]);
1029 kfree(config
->socks
);
1033 mutex_unlock(&nbd
->config_lock
);
1035 module_put(THIS_MODULE
);
1039 static int nbd_start_device(struct nbd_device
*nbd
)
1041 struct nbd_config
*config
= nbd
->config
;
1042 int num_connections
= config
->num_connections
;
1049 if (num_connections
> 1 &&
1050 !(config
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
1051 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
1055 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, config
->num_connections
);
1056 nbd
->task_recv
= current
;
1058 nbd_parse_flags(nbd
);
1060 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1062 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
1065 set_bit(NBD_HAS_PID_FILE
, &config
->runtime_flags
);
1067 nbd_dev_dbg_init(nbd
);
1068 for (i
= 0; i
< num_connections
; i
++) {
1069 struct recv_thread_args
*args
;
1071 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1076 sk_set_memalloc(config
->socks
[i
]->sock
->sk
);
1077 atomic_inc(&config
->recv_threads
);
1078 refcount_inc(&nbd
->config_refs
);
1079 INIT_WORK(&args
->work
, recv_work
);
1082 queue_work(recv_workqueue
, &args
->work
);
1087 static int nbd_start_device_ioctl(struct nbd_device
*nbd
, struct block_device
*bdev
)
1089 struct nbd_config
*config
= nbd
->config
;
1092 ret
= nbd_start_device(nbd
);
1096 bd_set_size(bdev
, config
->bytesize
);
1098 bdev
->bd_invalidated
= 1;
1099 mutex_unlock(&nbd
->config_lock
);
1100 ret
= wait_event_interruptible(config
->recv_wq
,
1101 atomic_read(&config
->recv_threads
) == 0);
1104 mutex_lock(&nbd
->config_lock
);
1105 bd_set_size(bdev
, 0);
1106 /* user requested, ignore socket errors */
1107 if (test_bit(NBD_DISCONNECT_REQUESTED
, &config
->runtime_flags
))
1109 if (test_bit(NBD_TIMEDOUT
, &config
->runtime_flags
))
1114 static void nbd_clear_sock_ioctl(struct nbd_device
*nbd
,
1115 struct block_device
*bdev
)
1119 nbd_bdev_reset(bdev
);
1120 if (test_and_clear_bit(NBD_HAS_CONFIG_REF
,
1121 &nbd
->config
->runtime_flags
))
1122 nbd_config_put(nbd
);
1125 /* Must be called with config_lock held */
1126 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
1127 unsigned int cmd
, unsigned long arg
)
1129 struct nbd_config
*config
= nbd
->config
;
1132 case NBD_DISCONNECT
:
1133 return nbd_disconnect(nbd
);
1134 case NBD_CLEAR_SOCK
:
1135 nbd_clear_sock_ioctl(nbd
, bdev
);
1138 return nbd_add_socket(nbd
, arg
, false);
1139 case NBD_SET_BLKSIZE
:
1140 nbd_size_set(nbd
, arg
,
1141 div_s64(config
->bytesize
, arg
));
1144 nbd_size_set(nbd
, config
->blksize
,
1145 div_s64(arg
, config
->blksize
));
1147 case NBD_SET_SIZE_BLOCKS
:
1148 nbd_size_set(nbd
, config
->blksize
, arg
);
1150 case NBD_SET_TIMEOUT
:
1152 nbd
->tag_set
.timeout
= arg
* HZ
;
1153 blk_queue_rq_timeout(nbd
->disk
->queue
, arg
* HZ
);
1158 config
->flags
= arg
;
1161 return nbd_start_device_ioctl(nbd
, bdev
);
1164 * This is for compatibility only. The queue is always cleared
1165 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1168 case NBD_PRINT_DEBUG
:
1170 * For compatibility only, we no longer keep a list of
1171 * outstanding requests.
1178 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
1179 unsigned int cmd
, unsigned long arg
)
1181 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
1182 struct nbd_config
*config
= nbd
->config
;
1183 int error
= -EINVAL
;
1185 if (!capable(CAP_SYS_ADMIN
))
1188 mutex_lock(&nbd
->config_lock
);
1190 /* Don't allow ioctl operations on a nbd device that was created with
1191 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1193 if (!test_bit(NBD_BOUND
, &config
->runtime_flags
) ||
1194 (cmd
== NBD_DISCONNECT
|| cmd
== NBD_CLEAR_SOCK
))
1195 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
1197 dev_err(nbd_to_dev(nbd
), "Cannot use ioctl interface on a netlink controlled device.\n");
1198 mutex_unlock(&nbd
->config_lock
);
1202 static struct nbd_config
*nbd_alloc_config(void)
1204 struct nbd_config
*config
;
1206 config
= kzalloc(sizeof(struct nbd_config
), GFP_NOFS
);
1209 atomic_set(&config
->recv_threads
, 0);
1210 init_waitqueue_head(&config
->recv_wq
);
1211 init_waitqueue_head(&config
->conn_wait
);
1212 config
->blksize
= 1024;
1213 atomic_set(&config
->live_connections
, 0);
1214 try_module_get(THIS_MODULE
);
1218 static int nbd_open(struct block_device
*bdev
, fmode_t mode
)
1220 struct nbd_device
*nbd
;
1223 mutex_lock(&nbd_index_mutex
);
1224 nbd
= bdev
->bd_disk
->private_data
;
1229 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1233 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1234 struct nbd_config
*config
;
1236 mutex_lock(&nbd
->config_lock
);
1237 if (refcount_inc_not_zero(&nbd
->config_refs
)) {
1238 mutex_unlock(&nbd
->config_lock
);
1241 config
= nbd
->config
= nbd_alloc_config();
1244 mutex_unlock(&nbd
->config_lock
);
1247 refcount_set(&nbd
->config_refs
, 1);
1248 refcount_inc(&nbd
->refs
);
1249 mutex_unlock(&nbd
->config_lock
);
1252 mutex_unlock(&nbd_index_mutex
);
1256 static void nbd_release(struct gendisk
*disk
, fmode_t mode
)
1258 struct nbd_device
*nbd
= disk
->private_data
;
1259 nbd_config_put(nbd
);
1263 static const struct block_device_operations nbd_fops
=
1265 .owner
= THIS_MODULE
,
1267 .release
= nbd_release
,
1269 .compat_ioctl
= nbd_ioctl
,
1272 #if IS_ENABLED(CONFIG_DEBUG_FS)
1274 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
1276 struct nbd_device
*nbd
= s
->private;
1279 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
1284 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
1286 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
1289 static const struct file_operations nbd_dbg_tasks_ops
= {
1290 .open
= nbd_dbg_tasks_open
,
1292 .llseek
= seq_lseek
,
1293 .release
= single_release
,
1296 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
1298 struct nbd_device
*nbd
= s
->private;
1299 u32 flags
= nbd
->config
->flags
;
1301 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
1303 seq_puts(s
, "Known flags:\n");
1305 if (flags
& NBD_FLAG_HAS_FLAGS
)
1306 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
1307 if (flags
& NBD_FLAG_READ_ONLY
)
1308 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
1309 if (flags
& NBD_FLAG_SEND_FLUSH
)
1310 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
1311 if (flags
& NBD_FLAG_SEND_TRIM
)
1312 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
1317 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
1319 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
1322 static const struct file_operations nbd_dbg_flags_ops
= {
1323 .open
= nbd_dbg_flags_open
,
1325 .llseek
= seq_lseek
,
1326 .release
= single_release
,
1329 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1332 struct nbd_config
*config
= nbd
->config
;
1337 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
1339 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
1343 config
->dbg_dir
= dir
;
1345 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
1346 debugfs_create_u64("size_bytes", 0444, dir
, &config
->bytesize
);
1347 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
1348 debugfs_create_u64("blocksize", 0444, dir
, &config
->blksize
);
1349 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
1354 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1356 debugfs_remove_recursive(nbd
->config
->dbg_dir
);
1359 static int nbd_dbg_init(void)
1361 struct dentry
*dbg_dir
;
1363 dbg_dir
= debugfs_create_dir("nbd", NULL
);
1367 nbd_dbg_dir
= dbg_dir
;
1372 static void nbd_dbg_close(void)
1374 debugfs_remove_recursive(nbd_dbg_dir
);
1377 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1379 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1384 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1388 static int nbd_dbg_init(void)
1393 static void nbd_dbg_close(void)
1399 static int nbd_init_request(void *data
, struct request
*rq
,
1400 unsigned int hctx_idx
, unsigned int request_idx
,
1401 unsigned int numa_node
)
1403 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
1408 static const struct blk_mq_ops nbd_mq_ops
= {
1409 .queue_rq
= nbd_queue_rq
,
1410 .complete
= nbd_complete_rq
,
1411 .init_request
= nbd_init_request
,
1412 .timeout
= nbd_xmit_timeout
,
1415 static int nbd_dev_add(int index
)
1417 struct nbd_device
*nbd
;
1418 struct gendisk
*disk
;
1419 struct request_queue
*q
;
1422 nbd
= kzalloc(sizeof(struct nbd_device
), GFP_KERNEL
);
1426 disk
= alloc_disk(1 << part_shift
);
1431 err
= idr_alloc(&nbd_index_idr
, nbd
, index
, index
+ 1,
1436 err
= idr_alloc(&nbd_index_idr
, nbd
, 0, 0, GFP_KERNEL
);
1445 nbd
->tag_set
.ops
= &nbd_mq_ops
;
1446 nbd
->tag_set
.nr_hw_queues
= 1;
1447 nbd
->tag_set
.queue_depth
= 128;
1448 nbd
->tag_set
.numa_node
= NUMA_NO_NODE
;
1449 nbd
->tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1450 nbd
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1451 BLK_MQ_F_SG_MERGE
| BLK_MQ_F_BLOCKING
;
1452 nbd
->tag_set
.driver_data
= nbd
;
1454 err
= blk_mq_alloc_tag_set(&nbd
->tag_set
);
1458 q
= blk_mq_init_queue(&nbd
->tag_set
);
1466 * Tell the block layer that we are not a rotational device
1468 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1469 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1470 disk
->queue
->limits
.discard_granularity
= 512;
1471 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1472 blk_queue_max_segment_size(disk
->queue
, UINT_MAX
);
1473 blk_queue_max_segments(disk
->queue
, USHRT_MAX
);
1474 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1475 disk
->queue
->limits
.max_sectors
= 256;
1477 mutex_init(&nbd
->config_lock
);
1478 refcount_set(&nbd
->config_refs
, 0);
1479 refcount_set(&nbd
->refs
, 1);
1480 INIT_LIST_HEAD(&nbd
->list
);
1481 disk
->major
= NBD_MAJOR
;
1482 disk
->first_minor
= index
<< part_shift
;
1483 disk
->fops
= &nbd_fops
;
1484 disk
->private_data
= nbd
;
1485 sprintf(disk
->disk_name
, "nbd%d", index
);
1488 nbd_total_devices
++;
1492 blk_mq_free_tag_set(&nbd
->tag_set
);
1494 idr_remove(&nbd_index_idr
, index
);
1503 static int find_free_cb(int id
, void *ptr
, void *data
)
1505 struct nbd_device
*nbd
= ptr
;
1506 struct nbd_device
**found
= data
;
1508 if (!refcount_read(&nbd
->config_refs
)) {
1515 /* Netlink interface. */
1516 static struct nla_policy nbd_attr_policy
[NBD_ATTR_MAX
+ 1] = {
1517 [NBD_ATTR_INDEX
] = { .type
= NLA_U32
},
1518 [NBD_ATTR_SIZE_BYTES
] = { .type
= NLA_U64
},
1519 [NBD_ATTR_BLOCK_SIZE_BYTES
] = { .type
= NLA_U64
},
1520 [NBD_ATTR_TIMEOUT
] = { .type
= NLA_U64
},
1521 [NBD_ATTR_SERVER_FLAGS
] = { .type
= NLA_U64
},
1522 [NBD_ATTR_CLIENT_FLAGS
] = { .type
= NLA_U64
},
1523 [NBD_ATTR_SOCKETS
] = { .type
= NLA_NESTED
},
1524 [NBD_ATTR_DEAD_CONN_TIMEOUT
] = { .type
= NLA_U64
},
1525 [NBD_ATTR_DEVICE_LIST
] = { .type
= NLA_NESTED
},
1528 static struct nla_policy nbd_sock_policy
[NBD_SOCK_MAX
+ 1] = {
1529 [NBD_SOCK_FD
] = { .type
= NLA_U32
},
1532 /* We don't use this right now since we don't parse the incoming list, but we
1533 * still want it here so userspace knows what to expect.
1535 static struct nla_policy
__attribute__((unused
))
1536 nbd_device_policy
[NBD_DEVICE_ATTR_MAX
+ 1] = {
1537 [NBD_DEVICE_INDEX
] = { .type
= NLA_U32
},
1538 [NBD_DEVICE_CONNECTED
] = { .type
= NLA_U8
},
1541 static int nbd_genl_connect(struct sk_buff
*skb
, struct genl_info
*info
)
1543 struct nbd_device
*nbd
= NULL
;
1544 struct nbd_config
*config
;
1547 bool put_dev
= false;
1549 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1552 if (info
->attrs
[NBD_ATTR_INDEX
])
1553 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1554 if (!info
->attrs
[NBD_ATTR_SOCKETS
]) {
1555 printk(KERN_ERR
"nbd: must specify at least one socket\n");
1558 if (!info
->attrs
[NBD_ATTR_SIZE_BYTES
]) {
1559 printk(KERN_ERR
"nbd: must specify a size in bytes for the device\n");
1563 mutex_lock(&nbd_index_mutex
);
1565 ret
= idr_for_each(&nbd_index_idr
, &find_free_cb
, &nbd
);
1568 new_index
= nbd_dev_add(-1);
1569 if (new_index
< 0) {
1570 mutex_unlock(&nbd_index_mutex
);
1571 printk(KERN_ERR
"nbd: failed to add new device\n");
1574 nbd
= idr_find(&nbd_index_idr
, new_index
);
1577 nbd
= idr_find(&nbd_index_idr
, index
);
1580 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
1582 mutex_unlock(&nbd_index_mutex
);
1585 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1586 mutex_unlock(&nbd_index_mutex
);
1589 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1593 mutex_unlock(&nbd_index_mutex
);
1595 mutex_lock(&nbd
->config_lock
);
1596 if (refcount_read(&nbd
->config_refs
)) {
1597 mutex_unlock(&nbd
->config_lock
);
1601 printk(KERN_ERR
"nbd: nbd%d already in use\n", index
);
1604 if (WARN_ON(nbd
->config
)) {
1605 mutex_unlock(&nbd
->config_lock
);
1609 config
= nbd
->config
= nbd_alloc_config();
1611 mutex_unlock(&nbd
->config_lock
);
1613 printk(KERN_ERR
"nbd: couldn't allocate config\n");
1616 refcount_set(&nbd
->config_refs
, 1);
1617 set_bit(NBD_BOUND
, &config
->runtime_flags
);
1619 if (info
->attrs
[NBD_ATTR_SIZE_BYTES
]) {
1620 u64 bytes
= nla_get_u64(info
->attrs
[NBD_ATTR_SIZE_BYTES
]);
1621 nbd_size_set(nbd
, config
->blksize
,
1622 div64_u64(bytes
, config
->blksize
));
1624 if (info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]) {
1626 nla_get_u64(info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]);
1627 nbd_size_set(nbd
, bsize
, div64_u64(config
->bytesize
, bsize
));
1629 if (info
->attrs
[NBD_ATTR_TIMEOUT
]) {
1630 u64 timeout
= nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]);
1631 nbd
->tag_set
.timeout
= timeout
* HZ
;
1632 blk_queue_rq_timeout(nbd
->disk
->queue
, timeout
* HZ
);
1634 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
1635 config
->dead_conn_timeout
=
1636 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
1637 config
->dead_conn_timeout
*= HZ
;
1639 if (info
->attrs
[NBD_ATTR_SERVER_FLAGS
])
1641 nla_get_u64(info
->attrs
[NBD_ATTR_SERVER_FLAGS
]);
1642 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
1643 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
1644 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
1645 set_bit(NBD_DESTROY_ON_DISCONNECT
,
1646 &config
->runtime_flags
);
1651 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
1652 struct nlattr
*attr
;
1655 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
1657 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
1659 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
1660 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
1664 ret
= nla_parse_nested(socks
, NBD_SOCK_MAX
, attr
,
1665 nbd_sock_policy
, info
->extack
);
1667 printk(KERN_ERR
"nbd: error processing sock list\n");
1671 if (!socks
[NBD_SOCK_FD
])
1673 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
1674 ret
= nbd_add_socket(nbd
, fd
, true);
1679 ret
= nbd_start_device(nbd
);
1681 mutex_unlock(&nbd
->config_lock
);
1683 set_bit(NBD_HAS_CONFIG_REF
, &config
->runtime_flags
);
1684 refcount_inc(&nbd
->config_refs
);
1685 nbd_connect_reply(info
, nbd
->index
);
1687 nbd_config_put(nbd
);
1693 static int nbd_genl_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
1695 struct nbd_device
*nbd
;
1698 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1701 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
1702 printk(KERN_ERR
"nbd: must specify an index to disconnect\n");
1705 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1706 mutex_lock(&nbd_index_mutex
);
1707 nbd
= idr_find(&nbd_index_idr
, index
);
1709 mutex_unlock(&nbd_index_mutex
);
1710 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
1714 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1715 mutex_unlock(&nbd_index_mutex
);
1716 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1720 mutex_unlock(&nbd_index_mutex
);
1721 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1725 mutex_lock(&nbd
->config_lock
);
1726 nbd_disconnect(nbd
);
1727 mutex_unlock(&nbd
->config_lock
);
1728 if (test_and_clear_bit(NBD_HAS_CONFIG_REF
,
1729 &nbd
->config
->runtime_flags
))
1730 nbd_config_put(nbd
);
1731 nbd_config_put(nbd
);
1736 static int nbd_genl_reconfigure(struct sk_buff
*skb
, struct genl_info
*info
)
1738 struct nbd_device
*nbd
= NULL
;
1739 struct nbd_config
*config
;
1742 bool put_dev
= false;
1744 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1747 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
1748 printk(KERN_ERR
"nbd: must specify a device to reconfigure\n");
1751 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1752 mutex_lock(&nbd_index_mutex
);
1753 nbd
= idr_find(&nbd_index_idr
, index
);
1755 mutex_unlock(&nbd_index_mutex
);
1756 printk(KERN_ERR
"nbd: couldn't find a device at index %d\n",
1760 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1761 mutex_unlock(&nbd_index_mutex
);
1762 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1766 mutex_unlock(&nbd_index_mutex
);
1768 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1769 dev_err(nbd_to_dev(nbd
),
1770 "not configured, cannot reconfigure\n");
1775 mutex_lock(&nbd
->config_lock
);
1776 config
= nbd
->config
;
1777 if (!test_bit(NBD_BOUND
, &config
->runtime_flags
) ||
1779 dev_err(nbd_to_dev(nbd
),
1780 "not configured, cannot reconfigure\n");
1784 if (info
->attrs
[NBD_ATTR_TIMEOUT
]) {
1785 u64 timeout
= nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]);
1786 nbd
->tag_set
.timeout
= timeout
* HZ
;
1787 blk_queue_rq_timeout(nbd
->disk
->queue
, timeout
* HZ
);
1789 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
1790 config
->dead_conn_timeout
=
1791 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
1792 config
->dead_conn_timeout
*= HZ
;
1794 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
1795 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
1796 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
1797 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT
,
1798 &config
->runtime_flags
))
1801 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT
,
1802 &config
->runtime_flags
))
1803 refcount_inc(&nbd
->refs
);
1807 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
1808 struct nlattr
*attr
;
1811 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
1813 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
1815 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
1816 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
1820 ret
= nla_parse_nested(socks
, NBD_SOCK_MAX
, attr
,
1821 nbd_sock_policy
, info
->extack
);
1823 printk(KERN_ERR
"nbd: error processing sock list\n");
1827 if (!socks
[NBD_SOCK_FD
])
1829 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
1830 ret
= nbd_reconnect_socket(nbd
, fd
);
1836 dev_info(nbd_to_dev(nbd
), "reconnected socket\n");
1840 mutex_unlock(&nbd
->config_lock
);
1841 nbd_config_put(nbd
);
1848 static const struct genl_ops nbd_connect_genl_ops
[] = {
1850 .cmd
= NBD_CMD_CONNECT
,
1851 .policy
= nbd_attr_policy
,
1852 .doit
= nbd_genl_connect
,
1855 .cmd
= NBD_CMD_DISCONNECT
,
1856 .policy
= nbd_attr_policy
,
1857 .doit
= nbd_genl_disconnect
,
1860 .cmd
= NBD_CMD_RECONFIGURE
,
1861 .policy
= nbd_attr_policy
,
1862 .doit
= nbd_genl_reconfigure
,
1865 .cmd
= NBD_CMD_STATUS
,
1866 .policy
= nbd_attr_policy
,
1867 .doit
= nbd_genl_status
,
1871 static const struct genl_multicast_group nbd_mcast_grps
[] = {
1872 { .name
= NBD_GENL_MCAST_GROUP_NAME
, },
1875 static struct genl_family nbd_genl_family __ro_after_init
= {
1877 .name
= NBD_GENL_FAMILY_NAME
,
1878 .version
= NBD_GENL_VERSION
,
1879 .module
= THIS_MODULE
,
1880 .ops
= nbd_connect_genl_ops
,
1881 .n_ops
= ARRAY_SIZE(nbd_connect_genl_ops
),
1882 .maxattr
= NBD_ATTR_MAX
,
1883 .mcgrps
= nbd_mcast_grps
,
1884 .n_mcgrps
= ARRAY_SIZE(nbd_mcast_grps
),
1887 static int populate_nbd_status(struct nbd_device
*nbd
, struct sk_buff
*reply
)
1889 struct nlattr
*dev_opt
;
1893 /* This is a little racey, but for status it's ok. The
1894 * reason we don't take a ref here is because we can't
1895 * take a ref in the index == -1 case as we would need
1896 * to put under the nbd_index_mutex, which could
1897 * deadlock if we are configured to remove ourselves
1898 * once we're disconnected.
1900 if (refcount_read(&nbd
->config_refs
))
1902 dev_opt
= nla_nest_start(reply
, NBD_DEVICE_ITEM
);
1905 ret
= nla_put_u32(reply
, NBD_DEVICE_INDEX
, nbd
->index
);
1908 ret
= nla_put_u8(reply
, NBD_DEVICE_CONNECTED
,
1912 nla_nest_end(reply
, dev_opt
);
1916 static int status_cb(int id
, void *ptr
, void *data
)
1918 struct nbd_device
*nbd
= ptr
;
1919 return populate_nbd_status(nbd
, (struct sk_buff
*)data
);
1922 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
)
1924 struct nlattr
*dev_list
;
1925 struct sk_buff
*reply
;
1931 if (info
->attrs
[NBD_ATTR_INDEX
])
1932 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1934 mutex_lock(&nbd_index_mutex
);
1936 msg_size
= nla_total_size(nla_attr_size(sizeof(u32
)) +
1937 nla_attr_size(sizeof(u8
)));
1938 msg_size
*= (index
== -1) ? nbd_total_devices
: 1;
1940 reply
= genlmsg_new(msg_size
, GFP_KERNEL
);
1943 reply_head
= genlmsg_put_reply(reply
, info
, &nbd_genl_family
, 0,
1950 dev_list
= nla_nest_start(reply
, NBD_ATTR_DEVICE_LIST
);
1952 ret
= idr_for_each(&nbd_index_idr
, &status_cb
, reply
);
1958 struct nbd_device
*nbd
;
1959 nbd
= idr_find(&nbd_index_idr
, index
);
1961 ret
= populate_nbd_status(nbd
, reply
);
1968 nla_nest_end(reply
, dev_list
);
1969 genlmsg_end(reply
, reply_head
);
1970 genlmsg_reply(reply
, info
);
1973 mutex_unlock(&nbd_index_mutex
);
1977 static void nbd_connect_reply(struct genl_info
*info
, int index
)
1979 struct sk_buff
*skb
;
1983 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
1986 msg_head
= genlmsg_put_reply(skb
, info
, &nbd_genl_family
, 0,
1992 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
1997 genlmsg_end(skb
, msg_head
);
1998 genlmsg_reply(skb
, info
);
2001 static void nbd_mcast_index(int index
)
2003 struct sk_buff
*skb
;
2007 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2010 msg_head
= genlmsg_put(skb
, 0, 0, &nbd_genl_family
, 0,
2016 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2021 genlmsg_end(skb
, msg_head
);
2022 genlmsg_multicast(&nbd_genl_family
, skb
, 0, 0, GFP_KERNEL
);
2025 static void nbd_dead_link_work(struct work_struct
*work
)
2027 struct link_dead_args
*args
= container_of(work
, struct link_dead_args
,
2029 nbd_mcast_index(args
->index
);
2033 static int __init
nbd_init(void)
2037 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
2040 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
2046 part_shift
= fls(max_part
);
2049 * Adjust max_part according to part_shift as it is exported
2050 * to user space so that user can know the max number of
2051 * partition kernel should be able to manage.
2053 * Note that -1 is required because partition 0 is reserved
2054 * for the whole disk.
2056 max_part
= (1UL << part_shift
) - 1;
2059 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
2062 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
2064 recv_workqueue
= alloc_workqueue("knbd-recv",
2065 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
2066 if (!recv_workqueue
)
2069 if (register_blkdev(NBD_MAJOR
, "nbd")) {
2070 destroy_workqueue(recv_workqueue
);
2074 if (genl_register_family(&nbd_genl_family
)) {
2075 unregister_blkdev(NBD_MAJOR
, "nbd");
2076 destroy_workqueue(recv_workqueue
);
2081 mutex_lock(&nbd_index_mutex
);
2082 for (i
= 0; i
< nbds_max
; i
++)
2084 mutex_unlock(&nbd_index_mutex
);
2088 static int nbd_exit_cb(int id
, void *ptr
, void *data
)
2090 struct list_head
*list
= (struct list_head
*)data
;
2091 struct nbd_device
*nbd
= ptr
;
2093 list_add_tail(&nbd
->list
, list
);
2097 static void __exit
nbd_cleanup(void)
2099 struct nbd_device
*nbd
;
2100 LIST_HEAD(del_list
);
2104 mutex_lock(&nbd_index_mutex
);
2105 idr_for_each(&nbd_index_idr
, &nbd_exit_cb
, &del_list
);
2106 mutex_unlock(&nbd_index_mutex
);
2108 while (!list_empty(&del_list
)) {
2109 nbd
= list_first_entry(&del_list
, struct nbd_device
, list
);
2110 list_del_init(&nbd
->list
);
2111 if (refcount_read(&nbd
->refs
) != 1)
2112 printk(KERN_ERR
"nbd: possibly leaking a device\n");
2116 idr_destroy(&nbd_index_idr
);
2117 genl_unregister_family(&nbd_genl_family
);
2118 destroy_workqueue(recv_workqueue
);
2119 unregister_blkdev(NBD_MAJOR
, "nbd");
2122 module_init(nbd_init
);
2123 module_exit(nbd_cleanup
);
2125 MODULE_DESCRIPTION("Network Block Device");
2126 MODULE_LICENSE("GPL");
2128 module_param(nbds_max
, int, 0444);
2129 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
2130 module_param(max_part
, int, 0444);
2131 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");