2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
23 #include <linux/bio.h>
24 #include <linux/stat.h>
25 #include <linux/errno.h>
26 #include <linux/file.h>
27 #include <linux/ioctl.h>
28 #include <linux/mutex.h>
29 #include <linux/compiler.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 static DEFINE_IDR(nbd_index_idr
);
48 static DEFINE_MUTEX(nbd_index_mutex
);
49 static int nbd_total_devices
= 0;
54 struct request
*pending
;
61 struct recv_thread_args
{
62 struct work_struct work
;
63 struct nbd_device
*nbd
;
67 struct link_dead_args
{
68 struct work_struct work
;
72 #define NBD_TIMEDOUT 0
73 #define NBD_DISCONNECT_REQUESTED 1
74 #define NBD_DISCONNECTED 2
75 #define NBD_HAS_PID_FILE 3
76 #define NBD_HAS_CONFIG_REF 4
78 #define NBD_DESTROY_ON_DISCONNECT 6
82 unsigned long runtime_flags
;
83 u64 dead_conn_timeout
;
85 struct nbd_sock
**socks
;
87 atomic_t live_connections
;
88 wait_queue_head_t conn_wait
;
90 atomic_t recv_threads
;
91 wait_queue_head_t recv_wq
;
94 #if IS_ENABLED(CONFIG_DEBUG_FS)
95 struct dentry
*dbg_dir
;
100 struct blk_mq_tag_set tag_set
;
103 refcount_t config_refs
;
105 struct nbd_config
*config
;
106 struct mutex config_lock
;
107 struct gendisk
*disk
;
109 struct list_head list
;
110 struct task_struct
*task_recv
;
111 struct task_struct
*task_setup
;
115 struct nbd_device
*nbd
;
118 struct completion send_complete
;
122 #if IS_ENABLED(CONFIG_DEBUG_FS)
123 static struct dentry
*nbd_dbg_dir
;
126 #define nbd_name(nbd) ((nbd)->disk->disk_name)
128 #define NBD_MAGIC 0x68797548
130 static unsigned int nbds_max
= 16;
131 static int max_part
= 16;
132 static struct workqueue_struct
*recv_workqueue
;
133 static int part_shift
;
135 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
136 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
137 static void nbd_config_put(struct nbd_device
*nbd
);
138 static void nbd_connect_reply(struct genl_info
*info
, int index
);
139 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
);
140 static void nbd_dead_link_work(struct work_struct
*work
);
142 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
144 return disk_to_dev(nbd
->disk
);
147 static const char *nbdcmd_to_ascii(int cmd
)
150 case NBD_CMD_READ
: return "read";
151 case NBD_CMD_WRITE
: return "write";
152 case NBD_CMD_DISC
: return "disconnect";
153 case NBD_CMD_FLUSH
: return "flush";
154 case NBD_CMD_TRIM
: return "trim/discard";
159 static ssize_t
pid_show(struct device
*dev
,
160 struct device_attribute
*attr
, char *buf
)
162 struct gendisk
*disk
= dev_to_disk(dev
);
163 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
165 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
168 static const struct device_attribute pid_attr
= {
169 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
173 static void nbd_dev_remove(struct nbd_device
*nbd
)
175 struct gendisk
*disk
= nbd
->disk
;
176 struct request_queue
*q
;
181 blk_cleanup_queue(q
);
182 blk_mq_free_tag_set(&nbd
->tag_set
);
183 disk
->private_data
= NULL
;
189 static void nbd_put(struct nbd_device
*nbd
)
191 if (refcount_dec_and_mutex_lock(&nbd
->refs
,
193 idr_remove(&nbd_index_idr
, nbd
->index
);
194 mutex_unlock(&nbd_index_mutex
);
199 static int nbd_disconnected(struct nbd_config
*config
)
201 return test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
) ||
202 test_bit(NBD_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
205 static void nbd_mark_nsock_dead(struct nbd_device
*nbd
, struct nbd_sock
*nsock
,
208 if (!nsock
->dead
&& notify
&& !nbd_disconnected(nbd
->config
)) {
209 struct link_dead_args
*args
;
210 args
= kmalloc(sizeof(struct link_dead_args
), GFP_NOIO
);
212 INIT_WORK(&args
->work
, nbd_dead_link_work
);
213 args
->index
= nbd
->index
;
214 queue_work(system_wq
, &args
->work
);
218 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
219 atomic_dec(&nbd
->config
->live_connections
);
222 nsock
->pending
= NULL
;
226 static void nbd_size_clear(struct nbd_device
*nbd
)
228 if (nbd
->config
->bytesize
) {
229 set_capacity(nbd
->disk
, 0);
230 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
234 static void nbd_size_update(struct nbd_device
*nbd
)
236 struct nbd_config
*config
= nbd
->config
;
237 blk_queue_logical_block_size(nbd
->disk
->queue
, config
->blksize
);
238 blk_queue_physical_block_size(nbd
->disk
->queue
, config
->blksize
);
239 set_capacity(nbd
->disk
, config
->bytesize
>> 9);
240 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
243 static void nbd_size_set(struct nbd_device
*nbd
, loff_t blocksize
,
246 struct nbd_config
*config
= nbd
->config
;
247 config
->blksize
= blocksize
;
248 config
->bytesize
= blocksize
* nr_blocks
;
249 if (nbd
->task_recv
!= NULL
)
250 nbd_size_update(nbd
);
253 static void nbd_complete_rq(struct request
*req
)
255 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
257 dev_dbg(nbd_to_dev(cmd
->nbd
), "request %p: %s\n", cmd
,
258 cmd
->status
? "failed" : "done");
260 blk_mq_end_request(req
, cmd
->status
);
264 * Forcibly shutdown the socket causing all listeners to error
266 static void sock_shutdown(struct nbd_device
*nbd
)
268 struct nbd_config
*config
= nbd
->config
;
271 if (config
->num_connections
== 0)
273 if (test_and_set_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
276 for (i
= 0; i
< config
->num_connections
; i
++) {
277 struct nbd_sock
*nsock
= config
->socks
[i
];
278 mutex_lock(&nsock
->tx_lock
);
279 nbd_mark_nsock_dead(nbd
, nsock
, 0);
280 mutex_unlock(&nsock
->tx_lock
);
282 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
285 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
288 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
289 struct nbd_device
*nbd
= cmd
->nbd
;
290 struct nbd_config
*config
;
292 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
293 cmd
->status
= BLK_STS_TIMEOUT
;
294 return BLK_EH_HANDLED
;
296 config
= nbd
->config
;
298 if (config
->num_connections
> 1) {
299 dev_err_ratelimited(nbd_to_dev(nbd
),
300 "Connection timed out, retrying\n");
302 * Hooray we have more connections, requeue this IO, the submit
303 * path will put it on a real connection.
305 if (config
->socks
&& config
->num_connections
> 1) {
306 if (cmd
->index
< config
->num_connections
) {
307 struct nbd_sock
*nsock
=
308 config
->socks
[cmd
->index
];
309 mutex_lock(&nsock
->tx_lock
);
310 /* We can have multiple outstanding requests, so
311 * we don't want to mark the nsock dead if we've
312 * already reconnected with a new socket, so
313 * only mark it dead if its the same socket we
316 if (cmd
->cookie
== nsock
->cookie
)
317 nbd_mark_nsock_dead(nbd
, nsock
, 1);
318 mutex_unlock(&nsock
->tx_lock
);
320 blk_mq_requeue_request(req
, true);
322 return BLK_EH_NOT_HANDLED
;
325 dev_err_ratelimited(nbd_to_dev(nbd
),
326 "Connection timed out\n");
328 set_bit(NBD_TIMEDOUT
, &config
->runtime_flags
);
329 cmd
->status
= BLK_STS_IOERR
;
333 return BLK_EH_HANDLED
;
337 * Send or receive packet.
339 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
,
340 struct iov_iter
*iter
, int msg_flags
, int *sent
)
342 struct nbd_config
*config
= nbd
->config
;
343 struct socket
*sock
= config
->socks
[index
]->sock
;
346 unsigned int noreclaim_flag
;
348 if (unlikely(!sock
)) {
349 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
350 "Attempted %s on closed socket in sock_xmit\n",
351 (send
? "send" : "recv"));
355 msg
.msg_iter
= *iter
;
357 noreclaim_flag
= memalloc_noreclaim_save();
359 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
362 msg
.msg_control
= NULL
;
363 msg
.msg_controllen
= 0;
364 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
367 result
= sock_sendmsg(sock
, &msg
);
369 result
= sock_recvmsg(sock
, &msg
, msg
.msg_flags
);
373 result
= -EPIPE
; /* short read */
378 } while (msg_data_left(&msg
));
380 memalloc_noreclaim_restore(noreclaim_flag
);
386 * Different settings for sk->sk_sndtimeo can result in different return values
387 * if there is a signal pending when we enter sendmsg, because reasons?
389 static inline int was_interrupted(int result
)
391 return result
== -ERESTARTSYS
|| result
== -EINTR
;
394 /* always call with the tx_lock held */
395 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
397 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
398 struct nbd_config
*config
= nbd
->config
;
399 struct nbd_sock
*nsock
= config
->socks
[index
];
401 struct nbd_request request
= {.magic
= htonl(NBD_REQUEST_MAGIC
)};
402 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
403 struct iov_iter from
;
404 unsigned long size
= blk_rq_bytes(req
);
407 u32 nbd_cmd_flags
= 0;
408 u32 tag
= blk_mq_unique_tag(req
);
409 int sent
= nsock
->sent
, skip
= 0;
411 iov_iter_kvec(&from
, WRITE
| ITER_KVEC
, &iov
, 1, sizeof(request
));
413 switch (req_op(req
)) {
418 type
= NBD_CMD_FLUSH
;
421 type
= NBD_CMD_WRITE
;
430 if (rq_data_dir(req
) == WRITE
&&
431 (config
->flags
& NBD_FLAG_READ_ONLY
)) {
432 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
433 "Write on read-only\n");
437 if (req
->cmd_flags
& REQ_FUA
)
438 nbd_cmd_flags
|= NBD_CMD_FLAG_FUA
;
440 /* We did a partial send previously, and we at least sent the whole
441 * request struct, so just go and send the rest of the pages in the
445 if (sent
>= sizeof(request
)) {
446 skip
= sent
- sizeof(request
);
449 iov_iter_advance(&from
, sent
);
452 cmd
->cookie
= nsock
->cookie
;
453 request
.type
= htonl(type
| nbd_cmd_flags
);
454 if (type
!= NBD_CMD_FLUSH
) {
455 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
456 request
.len
= htonl(size
);
458 memcpy(request
.handle
, &tag
, sizeof(tag
));
460 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
461 cmd
, nbdcmd_to_ascii(type
),
462 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
463 result
= sock_xmit(nbd
, index
, 1, &from
,
464 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0, &sent
);
466 if (was_interrupted(result
)) {
467 /* If we havne't sent anything we can just return BUSY,
468 * however if we have sent something we need to make
469 * sure we only allow this req to be sent until we are
473 nsock
->pending
= req
;
476 return BLK_STS_RESOURCE
;
478 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
479 "Send control failed (result %d)\n", result
);
483 if (type
!= NBD_CMD_WRITE
)
488 struct bio
*next
= bio
->bi_next
;
489 struct bvec_iter iter
;
492 bio_for_each_segment(bvec
, bio
, iter
) {
493 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
494 int flags
= is_last
? 0 : MSG_MORE
;
496 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
498 iov_iter_bvec(&from
, ITER_BVEC
| WRITE
,
499 &bvec
, 1, bvec
.bv_len
);
501 if (skip
>= iov_iter_count(&from
)) {
502 skip
-= iov_iter_count(&from
);
505 iov_iter_advance(&from
, skip
);
508 result
= sock_xmit(nbd
, index
, 1, &from
, flags
, &sent
);
510 if (was_interrupted(result
)) {
511 /* We've already sent the header, we
512 * have no choice but to set pending and
515 nsock
->pending
= req
;
517 return BLK_STS_RESOURCE
;
519 dev_err(disk_to_dev(nbd
->disk
),
520 "Send data failed (result %d)\n",
525 * The completion might already have come in,
526 * so break for the last one instead of letting
527 * the iterator do it. This prevents use-after-free
536 nsock
->pending
= NULL
;
541 /* NULL returned = something went wrong, inform userspace */
542 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
544 struct nbd_config
*config
= nbd
->config
;
546 struct nbd_reply reply
;
548 struct request
*req
= NULL
;
551 struct kvec iov
= {.iov_base
= &reply
, .iov_len
= sizeof(reply
)};
555 iov_iter_kvec(&to
, READ
| ITER_KVEC
, &iov
, 1, sizeof(reply
));
556 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
558 if (!nbd_disconnected(config
))
559 dev_err(disk_to_dev(nbd
->disk
),
560 "Receive control failed (result %d)\n", result
);
561 return ERR_PTR(result
);
564 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
565 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
566 (unsigned long)ntohl(reply
.magic
));
567 return ERR_PTR(-EPROTO
);
570 memcpy(&tag
, reply
.handle
, sizeof(u32
));
572 hwq
= blk_mq_unique_tag_to_hwq(tag
);
573 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
574 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
575 blk_mq_unique_tag_to_tag(tag
));
576 if (!req
|| !blk_mq_request_started(req
)) {
577 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
579 return ERR_PTR(-ENOENT
);
581 cmd
= blk_mq_rq_to_pdu(req
);
582 if (ntohl(reply
.error
)) {
583 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
585 cmd
->status
= BLK_STS_IOERR
;
589 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", cmd
);
590 if (rq_data_dir(req
) != WRITE
) {
591 struct req_iterator iter
;
594 rq_for_each_segment(bvec
, req
, iter
) {
595 iov_iter_bvec(&to
, ITER_BVEC
| READ
,
596 &bvec
, 1, bvec
.bv_len
);
597 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
599 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
602 * If we've disconnected or we only have 1
603 * connection then we need to make sure we
604 * complete this request, otherwise error out
605 * and let the timeout stuff handle resubmitting
606 * this request onto another connection.
608 if (nbd_disconnected(config
) ||
609 config
->num_connections
<= 1) {
610 cmd
->status
= BLK_STS_IOERR
;
613 return ERR_PTR(-EIO
);
615 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
619 /* See the comment in nbd_queue_rq. */
620 wait_for_completion(&cmd
->send_complete
);
625 static void recv_work(struct work_struct
*work
)
627 struct recv_thread_args
*args
= container_of(work
,
628 struct recv_thread_args
,
630 struct nbd_device
*nbd
= args
->nbd
;
631 struct nbd_config
*config
= nbd
->config
;
635 cmd
= nbd_read_stat(nbd
, args
->index
);
637 struct nbd_sock
*nsock
= config
->socks
[args
->index
];
639 mutex_lock(&nsock
->tx_lock
);
640 nbd_mark_nsock_dead(nbd
, nsock
, 1);
641 mutex_unlock(&nsock
->tx_lock
);
645 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd
));
647 atomic_dec(&config
->recv_threads
);
648 wake_up(&config
->recv_wq
);
653 static void nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
657 if (!blk_mq_request_started(req
))
659 cmd
= blk_mq_rq_to_pdu(req
);
660 cmd
->status
= BLK_STS_IOERR
;
661 blk_mq_complete_request(req
);
664 static void nbd_clear_que(struct nbd_device
*nbd
)
666 blk_mq_quiesce_queue(nbd
->disk
->queue
);
667 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
668 blk_mq_unquiesce_queue(nbd
->disk
->queue
);
669 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
672 static int find_fallback(struct nbd_device
*nbd
, int index
)
674 struct nbd_config
*config
= nbd
->config
;
676 struct nbd_sock
*nsock
= config
->socks
[index
];
677 int fallback
= nsock
->fallback_index
;
679 if (test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
682 if (config
->num_connections
<= 1) {
683 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
684 "Attempted send on invalid socket\n");
688 if (fallback
>= 0 && fallback
< config
->num_connections
&&
689 !config
->socks
[fallback
]->dead
)
692 if (nsock
->fallback_index
< 0 ||
693 nsock
->fallback_index
>= config
->num_connections
||
694 config
->socks
[nsock
->fallback_index
]->dead
) {
696 for (i
= 0; i
< config
->num_connections
; i
++) {
699 if (!config
->socks
[i
]->dead
) {
704 nsock
->fallback_index
= new_index
;
706 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
707 "Dead connection, failed to find a fallback\n");
711 new_index
= nsock
->fallback_index
;
715 static int wait_for_reconnect(struct nbd_device
*nbd
)
717 struct nbd_config
*config
= nbd
->config
;
718 if (!config
->dead_conn_timeout
)
720 if (test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
722 wait_event_timeout(config
->conn_wait
,
723 atomic_read(&config
->live_connections
),
724 config
->dead_conn_timeout
);
725 return atomic_read(&config
->live_connections
);
728 static int nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
730 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
731 struct nbd_device
*nbd
= cmd
->nbd
;
732 struct nbd_config
*config
;
733 struct nbd_sock
*nsock
;
736 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
737 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
738 "Socks array is empty\n");
739 blk_mq_start_request(req
);
742 config
= nbd
->config
;
744 if (index
>= config
->num_connections
) {
745 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
746 "Attempted send on invalid socket\n");
748 blk_mq_start_request(req
);
751 cmd
->status
= BLK_STS_OK
;
753 nsock
= config
->socks
[index
];
754 mutex_lock(&nsock
->tx_lock
);
756 int old_index
= index
;
757 index
= find_fallback(nbd
, index
);
758 mutex_unlock(&nsock
->tx_lock
);
760 if (wait_for_reconnect(nbd
)) {
764 /* All the sockets should already be down at this point,
765 * we just want to make sure that DISCONNECTED is set so
766 * any requests that come in that were queue'ed waiting
767 * for the reconnect timer don't trigger the timer again
768 * and instead just error out.
772 blk_mq_start_request(req
);
778 /* Handle the case that we have a pending request that was partially
779 * transmitted that _has_ to be serviced first. We need to call requeue
780 * here so that it gets put _after_ the request that is already on the
783 blk_mq_start_request(req
);
784 if (unlikely(nsock
->pending
&& nsock
->pending
!= req
)) {
785 blk_mq_requeue_request(req
, true);
790 * Some failures are related to the link going down, so anything that
791 * returns EAGAIN can be retried on a different socket.
793 ret
= nbd_send_cmd(nbd
, cmd
, index
);
794 if (ret
== -EAGAIN
) {
795 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
796 "Request send failed, requeueing\n");
797 nbd_mark_nsock_dead(nbd
, nsock
, 1);
798 blk_mq_requeue_request(req
, true);
802 mutex_unlock(&nsock
->tx_lock
);
807 static blk_status_t
nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
808 const struct blk_mq_queue_data
*bd
)
810 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
814 * Since we look at the bio's to send the request over the network we
815 * need to make sure the completion work doesn't mark this request done
816 * before we are done doing our send. This keeps us from dereferencing
817 * freed data if we have particularly fast completions (ie we get the
818 * completion before we exit sock_xmit on the last bvec) or in the case
819 * that the server is misbehaving (or there was an error) before we're
820 * done sending everything over the wire.
822 init_completion(&cmd
->send_complete
);
824 /* We can be called directly from the user space process, which means we
825 * could possibly have signals pending so our sendmsg will fail. In
826 * this case we need to return that we are busy, otherwise error out as
829 ret
= nbd_handle_cmd(cmd
, hctx
->queue_num
);
834 complete(&cmd
->send_complete
);
839 static int nbd_add_socket(struct nbd_device
*nbd
, unsigned long arg
,
842 struct nbd_config
*config
= nbd
->config
;
844 struct nbd_sock
**socks
;
845 struct nbd_sock
*nsock
;
848 sock
= sockfd_lookup(arg
, &err
);
852 if (!netlink
&& !nbd
->task_setup
&&
853 !test_bit(NBD_BOUND
, &config
->runtime_flags
))
854 nbd
->task_setup
= current
;
857 (nbd
->task_setup
!= current
||
858 test_bit(NBD_BOUND
, &config
->runtime_flags
))) {
859 dev_err(disk_to_dev(nbd
->disk
),
860 "Device being setup by another task");
865 socks
= krealloc(config
->socks
, (config
->num_connections
+ 1) *
866 sizeof(struct nbd_sock
*), GFP_KERNEL
);
871 nsock
= kzalloc(sizeof(struct nbd_sock
), GFP_KERNEL
);
877 config
->socks
= socks
;
879 nsock
->fallback_index
= -1;
881 mutex_init(&nsock
->tx_lock
);
883 nsock
->pending
= NULL
;
886 socks
[config
->num_connections
++] = nsock
;
887 atomic_inc(&config
->live_connections
);
892 static int nbd_reconnect_socket(struct nbd_device
*nbd
, unsigned long arg
)
894 struct nbd_config
*config
= nbd
->config
;
895 struct socket
*sock
, *old
;
896 struct recv_thread_args
*args
;
900 sock
= sockfd_lookup(arg
, &err
);
904 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
910 for (i
= 0; i
< config
->num_connections
; i
++) {
911 struct nbd_sock
*nsock
= config
->socks
[i
];
916 mutex_lock(&nsock
->tx_lock
);
918 mutex_unlock(&nsock
->tx_lock
);
921 sk_set_memalloc(sock
->sk
);
922 if (nbd
->tag_set
.timeout
)
923 sock
->sk
->sk_sndtimeo
= nbd
->tag_set
.timeout
;
924 atomic_inc(&config
->recv_threads
);
925 refcount_inc(&nbd
->config_refs
);
927 nsock
->fallback_index
= -1;
930 INIT_WORK(&args
->work
, recv_work
);
934 mutex_unlock(&nsock
->tx_lock
);
937 clear_bit(NBD_DISCONNECTED
, &config
->runtime_flags
);
939 /* We take the tx_mutex in an error path in the recv_work, so we
940 * need to queue_work outside of the tx_mutex.
942 queue_work(recv_workqueue
, &args
->work
);
944 atomic_inc(&config
->live_connections
);
945 wake_up(&config
->conn_wait
);
953 static void nbd_bdev_reset(struct block_device
*bdev
)
955 if (bdev
->bd_openers
> 1)
957 bd_set_size(bdev
, 0);
959 blkdev_reread_part(bdev
);
960 bdev
->bd_invalidated
= 1;
964 static void nbd_parse_flags(struct nbd_device
*nbd
)
966 struct nbd_config
*config
= nbd
->config
;
967 if (config
->flags
& NBD_FLAG_READ_ONLY
)
968 set_disk_ro(nbd
->disk
, true);
970 set_disk_ro(nbd
->disk
, false);
971 if (config
->flags
& NBD_FLAG_SEND_TRIM
)
972 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
973 if (config
->flags
& NBD_FLAG_SEND_FLUSH
) {
974 if (config
->flags
& NBD_FLAG_SEND_FUA
)
975 blk_queue_write_cache(nbd
->disk
->queue
, true, true);
977 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
980 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
983 static void send_disconnects(struct nbd_device
*nbd
)
985 struct nbd_config
*config
= nbd
->config
;
986 struct nbd_request request
= {
987 .magic
= htonl(NBD_REQUEST_MAGIC
),
988 .type
= htonl(NBD_CMD_DISC
),
990 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
991 struct iov_iter from
;
994 for (i
= 0; i
< config
->num_connections
; i
++) {
995 struct nbd_sock
*nsock
= config
->socks
[i
];
997 iov_iter_kvec(&from
, WRITE
| ITER_KVEC
, &iov
, 1, sizeof(request
));
998 mutex_lock(&nsock
->tx_lock
);
999 ret
= sock_xmit(nbd
, i
, 1, &from
, 0, NULL
);
1001 dev_err(disk_to_dev(nbd
->disk
),
1002 "Send disconnect failed %d\n", ret
);
1003 mutex_unlock(&nsock
->tx_lock
);
1007 static int nbd_disconnect(struct nbd_device
*nbd
)
1009 struct nbd_config
*config
= nbd
->config
;
1011 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
1012 set_bit(NBD_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
1013 send_disconnects(nbd
);
1017 static void nbd_clear_sock(struct nbd_device
*nbd
)
1021 nbd
->task_setup
= NULL
;
1024 static void nbd_config_put(struct nbd_device
*nbd
)
1026 if (refcount_dec_and_mutex_lock(&nbd
->config_refs
,
1027 &nbd
->config_lock
)) {
1028 struct nbd_config
*config
= nbd
->config
;
1029 nbd_dev_dbg_close(nbd
);
1030 nbd_size_clear(nbd
);
1031 if (test_and_clear_bit(NBD_HAS_PID_FILE
,
1032 &config
->runtime_flags
))
1033 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1034 nbd
->task_recv
= NULL
;
1035 nbd_clear_sock(nbd
);
1036 if (config
->num_connections
) {
1038 for (i
= 0; i
< config
->num_connections
; i
++) {
1039 sockfd_put(config
->socks
[i
]->sock
);
1040 kfree(config
->socks
[i
]);
1042 kfree(config
->socks
);
1047 nbd
->tag_set
.timeout
= 0;
1048 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
1050 mutex_unlock(&nbd
->config_lock
);
1052 module_put(THIS_MODULE
);
1056 static int nbd_start_device(struct nbd_device
*nbd
)
1058 struct nbd_config
*config
= nbd
->config
;
1059 int num_connections
= config
->num_connections
;
1066 if (num_connections
> 1 &&
1067 !(config
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
1068 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
1072 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, config
->num_connections
);
1073 nbd
->task_recv
= current
;
1075 nbd_parse_flags(nbd
);
1077 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1079 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
1082 set_bit(NBD_HAS_PID_FILE
, &config
->runtime_flags
);
1084 nbd_dev_dbg_init(nbd
);
1085 for (i
= 0; i
< num_connections
; i
++) {
1086 struct recv_thread_args
*args
;
1088 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1093 sk_set_memalloc(config
->socks
[i
]->sock
->sk
);
1094 if (nbd
->tag_set
.timeout
)
1095 config
->socks
[i
]->sock
->sk
->sk_sndtimeo
=
1096 nbd
->tag_set
.timeout
;
1097 atomic_inc(&config
->recv_threads
);
1098 refcount_inc(&nbd
->config_refs
);
1099 INIT_WORK(&args
->work
, recv_work
);
1102 queue_work(recv_workqueue
, &args
->work
);
1104 nbd_size_update(nbd
);
1108 static int nbd_start_device_ioctl(struct nbd_device
*nbd
, struct block_device
*bdev
)
1110 struct nbd_config
*config
= nbd
->config
;
1113 ret
= nbd_start_device(nbd
);
1117 bd_set_size(bdev
, config
->bytesize
);
1119 bdev
->bd_invalidated
= 1;
1120 mutex_unlock(&nbd
->config_lock
);
1121 ret
= wait_event_interruptible(config
->recv_wq
,
1122 atomic_read(&config
->recv_threads
) == 0);
1125 mutex_lock(&nbd
->config_lock
);
1126 bd_set_size(bdev
, 0);
1127 /* user requested, ignore socket errors */
1128 if (test_bit(NBD_DISCONNECT_REQUESTED
, &config
->runtime_flags
))
1130 if (test_bit(NBD_TIMEDOUT
, &config
->runtime_flags
))
1135 static void nbd_clear_sock_ioctl(struct nbd_device
*nbd
,
1136 struct block_device
*bdev
)
1140 nbd_bdev_reset(bdev
);
1141 if (test_and_clear_bit(NBD_HAS_CONFIG_REF
,
1142 &nbd
->config
->runtime_flags
))
1143 nbd_config_put(nbd
);
1146 /* Must be called with config_lock held */
1147 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
1148 unsigned int cmd
, unsigned long arg
)
1150 struct nbd_config
*config
= nbd
->config
;
1153 case NBD_DISCONNECT
:
1154 return nbd_disconnect(nbd
);
1155 case NBD_CLEAR_SOCK
:
1156 nbd_clear_sock_ioctl(nbd
, bdev
);
1159 return nbd_add_socket(nbd
, arg
, false);
1160 case NBD_SET_BLKSIZE
:
1161 nbd_size_set(nbd
, arg
,
1162 div_s64(config
->bytesize
, arg
));
1165 nbd_size_set(nbd
, config
->blksize
,
1166 div_s64(arg
, config
->blksize
));
1168 case NBD_SET_SIZE_BLOCKS
:
1169 nbd_size_set(nbd
, config
->blksize
, arg
);
1171 case NBD_SET_TIMEOUT
:
1173 nbd
->tag_set
.timeout
= arg
* HZ
;
1174 blk_queue_rq_timeout(nbd
->disk
->queue
, arg
* HZ
);
1179 config
->flags
= arg
;
1182 return nbd_start_device_ioctl(nbd
, bdev
);
1185 * This is for compatibility only. The queue is always cleared
1186 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1189 case NBD_PRINT_DEBUG
:
1191 * For compatibility only, we no longer keep a list of
1192 * outstanding requests.
1199 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
1200 unsigned int cmd
, unsigned long arg
)
1202 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
1203 struct nbd_config
*config
= nbd
->config
;
1204 int error
= -EINVAL
;
1206 if (!capable(CAP_SYS_ADMIN
))
1209 /* The block layer will pass back some non-nbd ioctls in case we have
1210 * special handling for them, but we don't so just return an error.
1212 if (_IOC_TYPE(cmd
) != 0xab)
1215 mutex_lock(&nbd
->config_lock
);
1217 /* Don't allow ioctl operations on a nbd device that was created with
1218 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1220 if (!test_bit(NBD_BOUND
, &config
->runtime_flags
) ||
1221 (cmd
== NBD_DISCONNECT
|| cmd
== NBD_CLEAR_SOCK
))
1222 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
1224 dev_err(nbd_to_dev(nbd
), "Cannot use ioctl interface on a netlink controlled device.\n");
1225 mutex_unlock(&nbd
->config_lock
);
1229 static struct nbd_config
*nbd_alloc_config(void)
1231 struct nbd_config
*config
;
1233 config
= kzalloc(sizeof(struct nbd_config
), GFP_NOFS
);
1236 atomic_set(&config
->recv_threads
, 0);
1237 init_waitqueue_head(&config
->recv_wq
);
1238 init_waitqueue_head(&config
->conn_wait
);
1239 config
->blksize
= 1024;
1240 atomic_set(&config
->live_connections
, 0);
1241 try_module_get(THIS_MODULE
);
1245 static int nbd_open(struct block_device
*bdev
, fmode_t mode
)
1247 struct nbd_device
*nbd
;
1250 mutex_lock(&nbd_index_mutex
);
1251 nbd
= bdev
->bd_disk
->private_data
;
1256 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1260 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1261 struct nbd_config
*config
;
1263 mutex_lock(&nbd
->config_lock
);
1264 if (refcount_inc_not_zero(&nbd
->config_refs
)) {
1265 mutex_unlock(&nbd
->config_lock
);
1268 config
= nbd
->config
= nbd_alloc_config();
1271 mutex_unlock(&nbd
->config_lock
);
1274 refcount_set(&nbd
->config_refs
, 1);
1275 refcount_inc(&nbd
->refs
);
1276 mutex_unlock(&nbd
->config_lock
);
1279 mutex_unlock(&nbd_index_mutex
);
1283 static void nbd_release(struct gendisk
*disk
, fmode_t mode
)
1285 struct nbd_device
*nbd
= disk
->private_data
;
1286 nbd_config_put(nbd
);
1290 static const struct block_device_operations nbd_fops
=
1292 .owner
= THIS_MODULE
,
1294 .release
= nbd_release
,
1296 .compat_ioctl
= nbd_ioctl
,
1299 #if IS_ENABLED(CONFIG_DEBUG_FS)
1301 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
1303 struct nbd_device
*nbd
= s
->private;
1306 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
1311 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
1313 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
1316 static const struct file_operations nbd_dbg_tasks_ops
= {
1317 .open
= nbd_dbg_tasks_open
,
1319 .llseek
= seq_lseek
,
1320 .release
= single_release
,
1323 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
1325 struct nbd_device
*nbd
= s
->private;
1326 u32 flags
= nbd
->config
->flags
;
1328 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
1330 seq_puts(s
, "Known flags:\n");
1332 if (flags
& NBD_FLAG_HAS_FLAGS
)
1333 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
1334 if (flags
& NBD_FLAG_READ_ONLY
)
1335 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
1336 if (flags
& NBD_FLAG_SEND_FLUSH
)
1337 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
1338 if (flags
& NBD_FLAG_SEND_FUA
)
1339 seq_puts(s
, "NBD_FLAG_SEND_FUA\n");
1340 if (flags
& NBD_FLAG_SEND_TRIM
)
1341 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
1346 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
1348 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
1351 static const struct file_operations nbd_dbg_flags_ops
= {
1352 .open
= nbd_dbg_flags_open
,
1354 .llseek
= seq_lseek
,
1355 .release
= single_release
,
1358 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1361 struct nbd_config
*config
= nbd
->config
;
1366 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
1368 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
1372 config
->dbg_dir
= dir
;
1374 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
1375 debugfs_create_u64("size_bytes", 0444, dir
, &config
->bytesize
);
1376 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
1377 debugfs_create_u64("blocksize", 0444, dir
, &config
->blksize
);
1378 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
1383 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1385 debugfs_remove_recursive(nbd
->config
->dbg_dir
);
1388 static int nbd_dbg_init(void)
1390 struct dentry
*dbg_dir
;
1392 dbg_dir
= debugfs_create_dir("nbd", NULL
);
1396 nbd_dbg_dir
= dbg_dir
;
1401 static void nbd_dbg_close(void)
1403 debugfs_remove_recursive(nbd_dbg_dir
);
1406 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1408 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1413 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1417 static int nbd_dbg_init(void)
1422 static void nbd_dbg_close(void)
1428 static int nbd_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1429 unsigned int hctx_idx
, unsigned int numa_node
)
1431 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
1432 cmd
->nbd
= set
->driver_data
;
1436 static const struct blk_mq_ops nbd_mq_ops
= {
1437 .queue_rq
= nbd_queue_rq
,
1438 .complete
= nbd_complete_rq
,
1439 .init_request
= nbd_init_request
,
1440 .timeout
= nbd_xmit_timeout
,
1443 static int nbd_dev_add(int index
)
1445 struct nbd_device
*nbd
;
1446 struct gendisk
*disk
;
1447 struct request_queue
*q
;
1450 nbd
= kzalloc(sizeof(struct nbd_device
), GFP_KERNEL
);
1454 disk
= alloc_disk(1 << part_shift
);
1459 err
= idr_alloc(&nbd_index_idr
, nbd
, index
, index
+ 1,
1464 err
= idr_alloc(&nbd_index_idr
, nbd
, 0, 0, GFP_KERNEL
);
1473 nbd
->tag_set
.ops
= &nbd_mq_ops
;
1474 nbd
->tag_set
.nr_hw_queues
= 1;
1475 nbd
->tag_set
.queue_depth
= 128;
1476 nbd
->tag_set
.numa_node
= NUMA_NO_NODE
;
1477 nbd
->tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1478 nbd
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1479 BLK_MQ_F_SG_MERGE
| BLK_MQ_F_BLOCKING
;
1480 nbd
->tag_set
.driver_data
= nbd
;
1482 err
= blk_mq_alloc_tag_set(&nbd
->tag_set
);
1486 q
= blk_mq_init_queue(&nbd
->tag_set
);
1494 * Tell the block layer that we are not a rotational device
1496 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1497 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1498 disk
->queue
->limits
.discard_granularity
= 512;
1499 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1500 blk_queue_max_segment_size(disk
->queue
, UINT_MAX
);
1501 blk_queue_max_segments(disk
->queue
, USHRT_MAX
);
1502 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1503 disk
->queue
->limits
.max_sectors
= 256;
1505 mutex_init(&nbd
->config_lock
);
1506 refcount_set(&nbd
->config_refs
, 0);
1507 refcount_set(&nbd
->refs
, 1);
1508 INIT_LIST_HEAD(&nbd
->list
);
1509 disk
->major
= NBD_MAJOR
;
1510 disk
->first_minor
= index
<< part_shift
;
1511 disk
->fops
= &nbd_fops
;
1512 disk
->private_data
= nbd
;
1513 sprintf(disk
->disk_name
, "nbd%d", index
);
1515 nbd_total_devices
++;
1519 blk_mq_free_tag_set(&nbd
->tag_set
);
1521 idr_remove(&nbd_index_idr
, index
);
1530 static int find_free_cb(int id
, void *ptr
, void *data
)
1532 struct nbd_device
*nbd
= ptr
;
1533 struct nbd_device
**found
= data
;
1535 if (!refcount_read(&nbd
->config_refs
)) {
1542 /* Netlink interface. */
1543 static struct nla_policy nbd_attr_policy
[NBD_ATTR_MAX
+ 1] = {
1544 [NBD_ATTR_INDEX
] = { .type
= NLA_U32
},
1545 [NBD_ATTR_SIZE_BYTES
] = { .type
= NLA_U64
},
1546 [NBD_ATTR_BLOCK_SIZE_BYTES
] = { .type
= NLA_U64
},
1547 [NBD_ATTR_TIMEOUT
] = { .type
= NLA_U64
},
1548 [NBD_ATTR_SERVER_FLAGS
] = { .type
= NLA_U64
},
1549 [NBD_ATTR_CLIENT_FLAGS
] = { .type
= NLA_U64
},
1550 [NBD_ATTR_SOCKETS
] = { .type
= NLA_NESTED
},
1551 [NBD_ATTR_DEAD_CONN_TIMEOUT
] = { .type
= NLA_U64
},
1552 [NBD_ATTR_DEVICE_LIST
] = { .type
= NLA_NESTED
},
1555 static struct nla_policy nbd_sock_policy
[NBD_SOCK_MAX
+ 1] = {
1556 [NBD_SOCK_FD
] = { .type
= NLA_U32
},
1559 /* We don't use this right now since we don't parse the incoming list, but we
1560 * still want it here so userspace knows what to expect.
1562 static struct nla_policy
__attribute__((unused
))
1563 nbd_device_policy
[NBD_DEVICE_ATTR_MAX
+ 1] = {
1564 [NBD_DEVICE_INDEX
] = { .type
= NLA_U32
},
1565 [NBD_DEVICE_CONNECTED
] = { .type
= NLA_U8
},
1568 static int nbd_genl_connect(struct sk_buff
*skb
, struct genl_info
*info
)
1570 struct nbd_device
*nbd
= NULL
;
1571 struct nbd_config
*config
;
1574 bool put_dev
= false;
1576 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1579 if (info
->attrs
[NBD_ATTR_INDEX
])
1580 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1581 if (!info
->attrs
[NBD_ATTR_SOCKETS
]) {
1582 printk(KERN_ERR
"nbd: must specify at least one socket\n");
1585 if (!info
->attrs
[NBD_ATTR_SIZE_BYTES
]) {
1586 printk(KERN_ERR
"nbd: must specify a size in bytes for the device\n");
1590 mutex_lock(&nbd_index_mutex
);
1592 ret
= idr_for_each(&nbd_index_idr
, &find_free_cb
, &nbd
);
1595 new_index
= nbd_dev_add(-1);
1596 if (new_index
< 0) {
1597 mutex_unlock(&nbd_index_mutex
);
1598 printk(KERN_ERR
"nbd: failed to add new device\n");
1601 nbd
= idr_find(&nbd_index_idr
, new_index
);
1604 nbd
= idr_find(&nbd_index_idr
, index
);
1606 ret
= nbd_dev_add(index
);
1608 mutex_unlock(&nbd_index_mutex
);
1609 printk(KERN_ERR
"nbd: failed to add new device\n");
1612 nbd
= idr_find(&nbd_index_idr
, index
);
1616 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
1618 mutex_unlock(&nbd_index_mutex
);
1621 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1622 mutex_unlock(&nbd_index_mutex
);
1625 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1629 mutex_unlock(&nbd_index_mutex
);
1631 mutex_lock(&nbd
->config_lock
);
1632 if (refcount_read(&nbd
->config_refs
)) {
1633 mutex_unlock(&nbd
->config_lock
);
1637 printk(KERN_ERR
"nbd: nbd%d already in use\n", index
);
1640 if (WARN_ON(nbd
->config
)) {
1641 mutex_unlock(&nbd
->config_lock
);
1645 config
= nbd
->config
= nbd_alloc_config();
1647 mutex_unlock(&nbd
->config_lock
);
1649 printk(KERN_ERR
"nbd: couldn't allocate config\n");
1652 refcount_set(&nbd
->config_refs
, 1);
1653 set_bit(NBD_BOUND
, &config
->runtime_flags
);
1655 if (info
->attrs
[NBD_ATTR_SIZE_BYTES
]) {
1656 u64 bytes
= nla_get_u64(info
->attrs
[NBD_ATTR_SIZE_BYTES
]);
1657 nbd_size_set(nbd
, config
->blksize
,
1658 div64_u64(bytes
, config
->blksize
));
1660 if (info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]) {
1662 nla_get_u64(info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]);
1663 nbd_size_set(nbd
, bsize
, div64_u64(config
->bytesize
, bsize
));
1665 if (info
->attrs
[NBD_ATTR_TIMEOUT
]) {
1666 u64 timeout
= nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]);
1667 nbd
->tag_set
.timeout
= timeout
* HZ
;
1668 blk_queue_rq_timeout(nbd
->disk
->queue
, timeout
* HZ
);
1670 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
1671 config
->dead_conn_timeout
=
1672 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
1673 config
->dead_conn_timeout
*= HZ
;
1675 if (info
->attrs
[NBD_ATTR_SERVER_FLAGS
])
1677 nla_get_u64(info
->attrs
[NBD_ATTR_SERVER_FLAGS
]);
1678 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
1679 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
1680 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
1681 set_bit(NBD_DESTROY_ON_DISCONNECT
,
1682 &config
->runtime_flags
);
1687 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
1688 struct nlattr
*attr
;
1691 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
1693 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
1695 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
1696 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
1700 ret
= nla_parse_nested(socks
, NBD_SOCK_MAX
, attr
,
1701 nbd_sock_policy
, info
->extack
);
1703 printk(KERN_ERR
"nbd: error processing sock list\n");
1707 if (!socks
[NBD_SOCK_FD
])
1709 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
1710 ret
= nbd_add_socket(nbd
, fd
, true);
1715 ret
= nbd_start_device(nbd
);
1717 mutex_unlock(&nbd
->config_lock
);
1719 set_bit(NBD_HAS_CONFIG_REF
, &config
->runtime_flags
);
1720 refcount_inc(&nbd
->config_refs
);
1721 nbd_connect_reply(info
, nbd
->index
);
1723 nbd_config_put(nbd
);
1729 static int nbd_genl_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
1731 struct nbd_device
*nbd
;
1734 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1737 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
1738 printk(KERN_ERR
"nbd: must specify an index to disconnect\n");
1741 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1742 mutex_lock(&nbd_index_mutex
);
1743 nbd
= idr_find(&nbd_index_idr
, index
);
1745 mutex_unlock(&nbd_index_mutex
);
1746 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
1750 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1751 mutex_unlock(&nbd_index_mutex
);
1752 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1756 mutex_unlock(&nbd_index_mutex
);
1757 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1761 mutex_lock(&nbd
->config_lock
);
1762 nbd_disconnect(nbd
);
1763 mutex_unlock(&nbd
->config_lock
);
1764 if (test_and_clear_bit(NBD_HAS_CONFIG_REF
,
1765 &nbd
->config
->runtime_flags
))
1766 nbd_config_put(nbd
);
1767 nbd_config_put(nbd
);
1772 static int nbd_genl_reconfigure(struct sk_buff
*skb
, struct genl_info
*info
)
1774 struct nbd_device
*nbd
= NULL
;
1775 struct nbd_config
*config
;
1778 bool put_dev
= false;
1780 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1783 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
1784 printk(KERN_ERR
"nbd: must specify a device to reconfigure\n");
1787 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1788 mutex_lock(&nbd_index_mutex
);
1789 nbd
= idr_find(&nbd_index_idr
, index
);
1791 mutex_unlock(&nbd_index_mutex
);
1792 printk(KERN_ERR
"nbd: couldn't find a device at index %d\n",
1796 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1797 mutex_unlock(&nbd_index_mutex
);
1798 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1802 mutex_unlock(&nbd_index_mutex
);
1804 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1805 dev_err(nbd_to_dev(nbd
),
1806 "not configured, cannot reconfigure\n");
1811 mutex_lock(&nbd
->config_lock
);
1812 config
= nbd
->config
;
1813 if (!test_bit(NBD_BOUND
, &config
->runtime_flags
) ||
1815 dev_err(nbd_to_dev(nbd
),
1816 "not configured, cannot reconfigure\n");
1820 if (info
->attrs
[NBD_ATTR_TIMEOUT
]) {
1821 u64 timeout
= nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]);
1822 nbd
->tag_set
.timeout
= timeout
* HZ
;
1823 blk_queue_rq_timeout(nbd
->disk
->queue
, timeout
* HZ
);
1825 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
1826 config
->dead_conn_timeout
=
1827 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
1828 config
->dead_conn_timeout
*= HZ
;
1830 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
1831 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
1832 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
1833 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT
,
1834 &config
->runtime_flags
))
1837 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT
,
1838 &config
->runtime_flags
))
1839 refcount_inc(&nbd
->refs
);
1843 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
1844 struct nlattr
*attr
;
1847 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
1849 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
1851 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
1852 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
1856 ret
= nla_parse_nested(socks
, NBD_SOCK_MAX
, attr
,
1857 nbd_sock_policy
, info
->extack
);
1859 printk(KERN_ERR
"nbd: error processing sock list\n");
1863 if (!socks
[NBD_SOCK_FD
])
1865 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
1866 ret
= nbd_reconnect_socket(nbd
, fd
);
1872 dev_info(nbd_to_dev(nbd
), "reconnected socket\n");
1876 mutex_unlock(&nbd
->config_lock
);
1877 nbd_config_put(nbd
);
1884 static const struct genl_ops nbd_connect_genl_ops
[] = {
1886 .cmd
= NBD_CMD_CONNECT
,
1887 .policy
= nbd_attr_policy
,
1888 .doit
= nbd_genl_connect
,
1891 .cmd
= NBD_CMD_DISCONNECT
,
1892 .policy
= nbd_attr_policy
,
1893 .doit
= nbd_genl_disconnect
,
1896 .cmd
= NBD_CMD_RECONFIGURE
,
1897 .policy
= nbd_attr_policy
,
1898 .doit
= nbd_genl_reconfigure
,
1901 .cmd
= NBD_CMD_STATUS
,
1902 .policy
= nbd_attr_policy
,
1903 .doit
= nbd_genl_status
,
1907 static const struct genl_multicast_group nbd_mcast_grps
[] = {
1908 { .name
= NBD_GENL_MCAST_GROUP_NAME
, },
1911 static struct genl_family nbd_genl_family __ro_after_init
= {
1913 .name
= NBD_GENL_FAMILY_NAME
,
1914 .version
= NBD_GENL_VERSION
,
1915 .module
= THIS_MODULE
,
1916 .ops
= nbd_connect_genl_ops
,
1917 .n_ops
= ARRAY_SIZE(nbd_connect_genl_ops
),
1918 .maxattr
= NBD_ATTR_MAX
,
1919 .mcgrps
= nbd_mcast_grps
,
1920 .n_mcgrps
= ARRAY_SIZE(nbd_mcast_grps
),
1923 static int populate_nbd_status(struct nbd_device
*nbd
, struct sk_buff
*reply
)
1925 struct nlattr
*dev_opt
;
1929 /* This is a little racey, but for status it's ok. The
1930 * reason we don't take a ref here is because we can't
1931 * take a ref in the index == -1 case as we would need
1932 * to put under the nbd_index_mutex, which could
1933 * deadlock if we are configured to remove ourselves
1934 * once we're disconnected.
1936 if (refcount_read(&nbd
->config_refs
))
1938 dev_opt
= nla_nest_start(reply
, NBD_DEVICE_ITEM
);
1941 ret
= nla_put_u32(reply
, NBD_DEVICE_INDEX
, nbd
->index
);
1944 ret
= nla_put_u8(reply
, NBD_DEVICE_CONNECTED
,
1948 nla_nest_end(reply
, dev_opt
);
1952 static int status_cb(int id
, void *ptr
, void *data
)
1954 struct nbd_device
*nbd
= ptr
;
1955 return populate_nbd_status(nbd
, (struct sk_buff
*)data
);
1958 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
)
1960 struct nlattr
*dev_list
;
1961 struct sk_buff
*reply
;
1967 if (info
->attrs
[NBD_ATTR_INDEX
])
1968 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1970 mutex_lock(&nbd_index_mutex
);
1972 msg_size
= nla_total_size(nla_attr_size(sizeof(u32
)) +
1973 nla_attr_size(sizeof(u8
)));
1974 msg_size
*= (index
== -1) ? nbd_total_devices
: 1;
1976 reply
= genlmsg_new(msg_size
, GFP_KERNEL
);
1979 reply_head
= genlmsg_put_reply(reply
, info
, &nbd_genl_family
, 0,
1986 dev_list
= nla_nest_start(reply
, NBD_ATTR_DEVICE_LIST
);
1988 ret
= idr_for_each(&nbd_index_idr
, &status_cb
, reply
);
1994 struct nbd_device
*nbd
;
1995 nbd
= idr_find(&nbd_index_idr
, index
);
1997 ret
= populate_nbd_status(nbd
, reply
);
2004 nla_nest_end(reply
, dev_list
);
2005 genlmsg_end(reply
, reply_head
);
2006 genlmsg_reply(reply
, info
);
2009 mutex_unlock(&nbd_index_mutex
);
2013 static void nbd_connect_reply(struct genl_info
*info
, int index
)
2015 struct sk_buff
*skb
;
2019 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2022 msg_head
= genlmsg_put_reply(skb
, info
, &nbd_genl_family
, 0,
2028 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2033 genlmsg_end(skb
, msg_head
);
2034 genlmsg_reply(skb
, info
);
2037 static void nbd_mcast_index(int index
)
2039 struct sk_buff
*skb
;
2043 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2046 msg_head
= genlmsg_put(skb
, 0, 0, &nbd_genl_family
, 0,
2052 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2057 genlmsg_end(skb
, msg_head
);
2058 genlmsg_multicast(&nbd_genl_family
, skb
, 0, 0, GFP_KERNEL
);
2061 static void nbd_dead_link_work(struct work_struct
*work
)
2063 struct link_dead_args
*args
= container_of(work
, struct link_dead_args
,
2065 nbd_mcast_index(args
->index
);
2069 static int __init
nbd_init(void)
2073 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
2076 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
2082 part_shift
= fls(max_part
);
2085 * Adjust max_part according to part_shift as it is exported
2086 * to user space so that user can know the max number of
2087 * partition kernel should be able to manage.
2089 * Note that -1 is required because partition 0 is reserved
2090 * for the whole disk.
2092 max_part
= (1UL << part_shift
) - 1;
2095 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
2098 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
2100 recv_workqueue
= alloc_workqueue("knbd-recv",
2101 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
2102 if (!recv_workqueue
)
2105 if (register_blkdev(NBD_MAJOR
, "nbd")) {
2106 destroy_workqueue(recv_workqueue
);
2110 if (genl_register_family(&nbd_genl_family
)) {
2111 unregister_blkdev(NBD_MAJOR
, "nbd");
2112 destroy_workqueue(recv_workqueue
);
2117 mutex_lock(&nbd_index_mutex
);
2118 for (i
= 0; i
< nbds_max
; i
++)
2120 mutex_unlock(&nbd_index_mutex
);
2124 static int nbd_exit_cb(int id
, void *ptr
, void *data
)
2126 struct list_head
*list
= (struct list_head
*)data
;
2127 struct nbd_device
*nbd
= ptr
;
2129 list_add_tail(&nbd
->list
, list
);
2133 static void __exit
nbd_cleanup(void)
2135 struct nbd_device
*nbd
;
2136 LIST_HEAD(del_list
);
2140 mutex_lock(&nbd_index_mutex
);
2141 idr_for_each(&nbd_index_idr
, &nbd_exit_cb
, &del_list
);
2142 mutex_unlock(&nbd_index_mutex
);
2144 while (!list_empty(&del_list
)) {
2145 nbd
= list_first_entry(&del_list
, struct nbd_device
, list
);
2146 list_del_init(&nbd
->list
);
2147 if (refcount_read(&nbd
->refs
) != 1)
2148 printk(KERN_ERR
"nbd: possibly leaking a device\n");
2152 idr_destroy(&nbd_index_idr
);
2153 genl_unregister_family(&nbd_genl_family
);
2154 destroy_workqueue(recv_workqueue
);
2155 unregister_blkdev(NBD_MAJOR
, "nbd");
2158 module_init(nbd_init
);
2159 module_exit(nbd_cleanup
);
2161 MODULE_DESCRIPTION("Network Block Device");
2162 MODULE_LICENSE("GPL");
2164 module_param(nbds_max
, int, 0444);
2165 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
2166 module_param(max_part
, int, 0444);
2167 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 16)");