2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
23 #include <linux/bio.h>
24 #include <linux/stat.h>
25 #include <linux/errno.h>
26 #include <linux/file.h>
27 #include <linux/ioctl.h>
28 #include <linux/mutex.h>
29 #include <linux/compiler.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 static DEFINE_IDR(nbd_index_idr
);
48 static DEFINE_MUTEX(nbd_index_mutex
);
49 static int nbd_total_devices
= 0;
54 struct request
*pending
;
61 struct recv_thread_args
{
62 struct work_struct work
;
63 struct nbd_device
*nbd
;
67 struct link_dead_args
{
68 struct work_struct work
;
72 #define NBD_TIMEDOUT 0
73 #define NBD_DISCONNECT_REQUESTED 1
74 #define NBD_DISCONNECTED 2
75 #define NBD_HAS_PID_FILE 3
76 #define NBD_HAS_CONFIG_REF 4
78 #define NBD_DESTROY_ON_DISCONNECT 6
79 #define NBD_DISCONNECT_ON_CLOSE 7
83 unsigned long runtime_flags
;
84 u64 dead_conn_timeout
;
86 struct nbd_sock
**socks
;
88 atomic_t live_connections
;
89 wait_queue_head_t conn_wait
;
91 atomic_t recv_threads
;
92 wait_queue_head_t recv_wq
;
95 #if IS_ENABLED(CONFIG_DEBUG_FS)
96 struct dentry
*dbg_dir
;
101 struct blk_mq_tag_set tag_set
;
104 refcount_t config_refs
;
106 struct nbd_config
*config
;
107 struct mutex config_lock
;
108 struct gendisk
*disk
;
109 struct workqueue_struct
*recv_workq
;
111 struct list_head list
;
112 struct task_struct
*task_recv
;
113 struct task_struct
*task_setup
;
116 #define NBD_CMD_REQUEUED 1
119 struct nbd_device
*nbd
;
128 #if IS_ENABLED(CONFIG_DEBUG_FS)
129 static struct dentry
*nbd_dbg_dir
;
132 #define nbd_name(nbd) ((nbd)->disk->disk_name)
134 #define NBD_MAGIC 0x68797548
136 #define NBD_DEF_BLKSIZE 1024
138 static unsigned int nbds_max
= 16;
139 static int max_part
= 16;
140 static int part_shift
;
142 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
143 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
144 static void nbd_config_put(struct nbd_device
*nbd
);
145 static void nbd_connect_reply(struct genl_info
*info
, int index
);
146 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
);
147 static void nbd_dead_link_work(struct work_struct
*work
);
148 static void nbd_disconnect_and_put(struct nbd_device
*nbd
);
150 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
152 return disk_to_dev(nbd
->disk
);
155 static void nbd_requeue_cmd(struct nbd_cmd
*cmd
)
157 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
159 if (!test_and_set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
))
160 blk_mq_requeue_request(req
, true);
163 #define NBD_COOKIE_BITS 32
165 static u64
nbd_cmd_handle(struct nbd_cmd
*cmd
)
167 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
168 u32 tag
= blk_mq_unique_tag(req
);
169 u64 cookie
= cmd
->cmd_cookie
;
171 return (cookie
<< NBD_COOKIE_BITS
) | tag
;
174 static u32
nbd_handle_to_tag(u64 handle
)
179 static u32
nbd_handle_to_cookie(u64 handle
)
181 return (u32
)(handle
>> NBD_COOKIE_BITS
);
184 static const char *nbdcmd_to_ascii(int cmd
)
187 case NBD_CMD_READ
: return "read";
188 case NBD_CMD_WRITE
: return "write";
189 case NBD_CMD_DISC
: return "disconnect";
190 case NBD_CMD_FLUSH
: return "flush";
191 case NBD_CMD_TRIM
: return "trim/discard";
196 static ssize_t
pid_show(struct device
*dev
,
197 struct device_attribute
*attr
, char *buf
)
199 struct gendisk
*disk
= dev_to_disk(dev
);
200 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
202 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
205 static const struct device_attribute pid_attr
= {
206 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
210 static void nbd_dev_remove(struct nbd_device
*nbd
)
212 struct gendisk
*disk
= nbd
->disk
;
213 struct request_queue
*q
;
218 blk_cleanup_queue(q
);
219 blk_mq_free_tag_set(&nbd
->tag_set
);
220 disk
->private_data
= NULL
;
226 static void nbd_put(struct nbd_device
*nbd
)
228 if (refcount_dec_and_mutex_lock(&nbd
->refs
,
230 idr_remove(&nbd_index_idr
, nbd
->index
);
232 mutex_unlock(&nbd_index_mutex
);
236 static int nbd_disconnected(struct nbd_config
*config
)
238 return test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
) ||
239 test_bit(NBD_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
242 static void nbd_mark_nsock_dead(struct nbd_device
*nbd
, struct nbd_sock
*nsock
,
245 if (!nsock
->dead
&& notify
&& !nbd_disconnected(nbd
->config
)) {
246 struct link_dead_args
*args
;
247 args
= kmalloc(sizeof(struct link_dead_args
), GFP_NOIO
);
249 INIT_WORK(&args
->work
, nbd_dead_link_work
);
250 args
->index
= nbd
->index
;
251 queue_work(system_wq
, &args
->work
);
255 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
256 atomic_dec(&nbd
->config
->live_connections
);
259 nsock
->pending
= NULL
;
263 static void nbd_size_clear(struct nbd_device
*nbd
)
265 if (nbd
->config
->bytesize
) {
266 set_capacity(nbd
->disk
, 0);
267 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
271 static void nbd_size_update(struct nbd_device
*nbd
)
273 struct nbd_config
*config
= nbd
->config
;
274 struct block_device
*bdev
= bdget_disk(nbd
->disk
, 0);
276 blk_queue_logical_block_size(nbd
->disk
->queue
, config
->blksize
);
277 blk_queue_physical_block_size(nbd
->disk
->queue
, config
->blksize
);
278 set_capacity(nbd
->disk
, config
->bytesize
>> 9);
281 bd_set_size(bdev
, config
->bytesize
);
282 set_blocksize(bdev
, config
->blksize
);
284 bdev
->bd_invalidated
= 1;
287 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
290 static void nbd_size_set(struct nbd_device
*nbd
, loff_t blocksize
,
293 struct nbd_config
*config
= nbd
->config
;
294 config
->blksize
= blocksize
;
295 config
->bytesize
= blocksize
* nr_blocks
;
296 if (nbd
->task_recv
!= NULL
)
297 nbd_size_update(nbd
);
300 static void nbd_complete_rq(struct request
*req
)
302 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
304 dev_dbg(nbd_to_dev(cmd
->nbd
), "request %p: %s\n", cmd
,
305 cmd
->status
? "failed" : "done");
307 blk_mq_end_request(req
, cmd
->status
);
311 * Forcibly shutdown the socket causing all listeners to error
313 static void sock_shutdown(struct nbd_device
*nbd
)
315 struct nbd_config
*config
= nbd
->config
;
318 if (config
->num_connections
== 0)
320 if (test_and_set_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
323 for (i
= 0; i
< config
->num_connections
; i
++) {
324 struct nbd_sock
*nsock
= config
->socks
[i
];
325 mutex_lock(&nsock
->tx_lock
);
326 nbd_mark_nsock_dead(nbd
, nsock
, 0);
327 mutex_unlock(&nsock
->tx_lock
);
329 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
332 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
335 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
336 struct nbd_device
*nbd
= cmd
->nbd
;
337 struct nbd_config
*config
;
339 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
340 cmd
->status
= BLK_STS_TIMEOUT
;
341 return BLK_EH_HANDLED
;
343 config
= nbd
->config
;
345 if (!mutex_trylock(&cmd
->lock
)) {
347 return BLK_EH_RESET_TIMER
;
350 if (config
->num_connections
> 1) {
351 dev_err_ratelimited(nbd_to_dev(nbd
),
352 "Connection timed out, retrying\n");
354 * Hooray we have more connections, requeue this IO, the submit
355 * path will put it on a real connection.
357 if (config
->socks
&& config
->num_connections
> 1) {
358 if (cmd
->index
< config
->num_connections
) {
359 struct nbd_sock
*nsock
=
360 config
->socks
[cmd
->index
];
361 mutex_lock(&nsock
->tx_lock
);
362 /* We can have multiple outstanding requests, so
363 * we don't want to mark the nsock dead if we've
364 * already reconnected with a new socket, so
365 * only mark it dead if its the same socket we
368 if (cmd
->cookie
== nsock
->cookie
)
369 nbd_mark_nsock_dead(nbd
, nsock
, 1);
370 mutex_unlock(&nsock
->tx_lock
);
372 mutex_unlock(&cmd
->lock
);
373 nbd_requeue_cmd(cmd
);
375 return BLK_EH_NOT_HANDLED
;
378 dev_err_ratelimited(nbd_to_dev(nbd
),
379 "Connection timed out\n");
381 set_bit(NBD_TIMEDOUT
, &config
->runtime_flags
);
382 cmd
->status
= BLK_STS_IOERR
;
383 mutex_unlock(&cmd
->lock
);
387 return BLK_EH_HANDLED
;
391 * Send or receive packet.
393 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
,
394 struct iov_iter
*iter
, int msg_flags
, int *sent
)
396 struct nbd_config
*config
= nbd
->config
;
397 struct socket
*sock
= config
->socks
[index
]->sock
;
400 unsigned int noreclaim_flag
;
402 if (unlikely(!sock
)) {
403 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
404 "Attempted %s on closed socket in sock_xmit\n",
405 (send
? "send" : "recv"));
409 msg
.msg_iter
= *iter
;
411 noreclaim_flag
= memalloc_noreclaim_save();
413 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
416 msg
.msg_control
= NULL
;
417 msg
.msg_controllen
= 0;
418 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
421 result
= sock_sendmsg(sock
, &msg
);
423 result
= sock_recvmsg(sock
, &msg
, msg
.msg_flags
);
427 result
= -EPIPE
; /* short read */
432 } while (msg_data_left(&msg
));
434 memalloc_noreclaim_restore(noreclaim_flag
);
440 * Different settings for sk->sk_sndtimeo can result in different return values
441 * if there is a signal pending when we enter sendmsg, because reasons?
443 static inline int was_interrupted(int result
)
445 return result
== -ERESTARTSYS
|| result
== -EINTR
;
448 /* always call with the tx_lock held */
449 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
451 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
452 struct nbd_config
*config
= nbd
->config
;
453 struct nbd_sock
*nsock
= config
->socks
[index
];
455 struct nbd_request request
= {.magic
= htonl(NBD_REQUEST_MAGIC
)};
456 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
457 struct iov_iter from
;
458 unsigned long size
= blk_rq_bytes(req
);
462 u32 nbd_cmd_flags
= 0;
463 int sent
= nsock
->sent
, skip
= 0;
465 iov_iter_kvec(&from
, WRITE
| ITER_KVEC
, &iov
, 1, sizeof(request
));
467 switch (req_op(req
)) {
472 type
= NBD_CMD_FLUSH
;
475 type
= NBD_CMD_WRITE
;
484 if (rq_data_dir(req
) == WRITE
&&
485 (config
->flags
& NBD_FLAG_READ_ONLY
)) {
486 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
487 "Write on read-only\n");
491 if (req
->cmd_flags
& REQ_FUA
)
492 nbd_cmd_flags
|= NBD_CMD_FLAG_FUA
;
494 /* We did a partial send previously, and we at least sent the whole
495 * request struct, so just go and send the rest of the pages in the
499 if (sent
>= sizeof(request
)) {
500 skip
= sent
- sizeof(request
);
503 iov_iter_advance(&from
, sent
);
508 cmd
->cookie
= nsock
->cookie
;
509 request
.type
= htonl(type
| nbd_cmd_flags
);
510 if (type
!= NBD_CMD_FLUSH
) {
511 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
512 request
.len
= htonl(size
);
514 handle
= nbd_cmd_handle(cmd
);
515 memcpy(request
.handle
, &handle
, sizeof(handle
));
517 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
518 cmd
, nbdcmd_to_ascii(type
),
519 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
520 result
= sock_xmit(nbd
, index
, 1, &from
,
521 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0, &sent
);
523 if (was_interrupted(result
)) {
524 /* If we havne't sent anything we can just return BUSY,
525 * however if we have sent something we need to make
526 * sure we only allow this req to be sent until we are
530 nsock
->pending
= req
;
533 set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
534 return BLK_STS_RESOURCE
;
536 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
537 "Send control failed (result %d)\n", result
);
541 if (type
!= NBD_CMD_WRITE
)
546 struct bio
*next
= bio
->bi_next
;
547 struct bvec_iter iter
;
550 bio_for_each_segment(bvec
, bio
, iter
) {
551 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
552 int flags
= is_last
? 0 : MSG_MORE
;
554 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
556 iov_iter_bvec(&from
, ITER_BVEC
| WRITE
,
557 &bvec
, 1, bvec
.bv_len
);
559 if (skip
>= iov_iter_count(&from
)) {
560 skip
-= iov_iter_count(&from
);
563 iov_iter_advance(&from
, skip
);
566 result
= sock_xmit(nbd
, index
, 1, &from
, flags
, &sent
);
568 if (was_interrupted(result
)) {
569 /* We've already sent the header, we
570 * have no choice but to set pending and
573 nsock
->pending
= req
;
575 set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
576 return BLK_STS_RESOURCE
;
578 dev_err(disk_to_dev(nbd
->disk
),
579 "Send data failed (result %d)\n",
584 * The completion might already have come in,
585 * so break for the last one instead of letting
586 * the iterator do it. This prevents use-after-free
595 nsock
->pending
= NULL
;
600 /* NULL returned = something went wrong, inform userspace */
601 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
603 struct nbd_config
*config
= nbd
->config
;
605 struct nbd_reply reply
;
607 struct request
*req
= NULL
;
611 struct kvec iov
= {.iov_base
= &reply
, .iov_len
= sizeof(reply
)};
616 iov_iter_kvec(&to
, READ
| ITER_KVEC
, &iov
, 1, sizeof(reply
));
617 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
619 if (!nbd_disconnected(config
))
620 dev_err(disk_to_dev(nbd
->disk
),
621 "Receive control failed (result %d)\n", result
);
622 return ERR_PTR(result
);
625 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
626 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
627 (unsigned long)ntohl(reply
.magic
));
628 return ERR_PTR(-EPROTO
);
631 memcpy(&handle
, reply
.handle
, sizeof(handle
));
632 tag
= nbd_handle_to_tag(handle
);
633 hwq
= blk_mq_unique_tag_to_hwq(tag
);
634 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
635 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
636 blk_mq_unique_tag_to_tag(tag
));
637 if (!req
|| !blk_mq_request_started(req
)) {
638 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
640 return ERR_PTR(-ENOENT
);
642 cmd
= blk_mq_rq_to_pdu(req
);
644 mutex_lock(&cmd
->lock
);
645 if (cmd
->cmd_cookie
!= nbd_handle_to_cookie(handle
)) {
646 dev_err(disk_to_dev(nbd
->disk
), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
647 req
, cmd
->cmd_cookie
, nbd_handle_to_cookie(handle
));
651 if (cmd
->status
!= BLK_STS_OK
) {
652 dev_err(disk_to_dev(nbd
->disk
), "Command already handled %p\n",
657 if (test_bit(NBD_CMD_REQUEUED
, &cmd
->flags
)) {
658 dev_err(disk_to_dev(nbd
->disk
), "Raced with timeout on req %p\n",
663 if (ntohl(reply
.error
)) {
664 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
666 cmd
->status
= BLK_STS_IOERR
;
670 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", cmd
);
671 if (rq_data_dir(req
) != WRITE
) {
672 struct req_iterator iter
;
675 rq_for_each_segment(bvec
, req
, iter
) {
676 iov_iter_bvec(&to
, ITER_BVEC
| READ
,
677 &bvec
, 1, bvec
.bv_len
);
678 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
680 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
683 * If we've disconnected or we only have 1
684 * connection then we need to make sure we
685 * complete this request, otherwise error out
686 * and let the timeout stuff handle resubmitting
687 * this request onto another connection.
689 if (nbd_disconnected(config
) ||
690 config
->num_connections
<= 1) {
691 cmd
->status
= BLK_STS_IOERR
;
697 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
702 mutex_unlock(&cmd
->lock
);
703 return ret
? ERR_PTR(ret
) : cmd
;
706 static void recv_work(struct work_struct
*work
)
708 struct recv_thread_args
*args
= container_of(work
,
709 struct recv_thread_args
,
711 struct nbd_device
*nbd
= args
->nbd
;
712 struct nbd_config
*config
= nbd
->config
;
716 cmd
= nbd_read_stat(nbd
, args
->index
);
718 struct nbd_sock
*nsock
= config
->socks
[args
->index
];
720 mutex_lock(&nsock
->tx_lock
);
721 nbd_mark_nsock_dead(nbd
, nsock
, 1);
722 mutex_unlock(&nsock
->tx_lock
);
726 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd
));
728 atomic_dec(&config
->recv_threads
);
729 wake_up(&config
->recv_wq
);
734 static void nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
738 if (!blk_mq_request_started(req
))
740 cmd
= blk_mq_rq_to_pdu(req
);
741 cmd
->status
= BLK_STS_IOERR
;
742 blk_mq_complete_request(req
);
745 static void nbd_clear_que(struct nbd_device
*nbd
)
747 blk_mq_quiesce_queue(nbd
->disk
->queue
);
748 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
749 blk_mq_unquiesce_queue(nbd
->disk
->queue
);
750 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
753 static int find_fallback(struct nbd_device
*nbd
, int index
)
755 struct nbd_config
*config
= nbd
->config
;
757 struct nbd_sock
*nsock
= config
->socks
[index
];
758 int fallback
= nsock
->fallback_index
;
760 if (test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
763 if (config
->num_connections
<= 1) {
764 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
765 "Attempted send on invalid socket\n");
769 if (fallback
>= 0 && fallback
< config
->num_connections
&&
770 !config
->socks
[fallback
]->dead
)
773 if (nsock
->fallback_index
< 0 ||
774 nsock
->fallback_index
>= config
->num_connections
||
775 config
->socks
[nsock
->fallback_index
]->dead
) {
777 for (i
= 0; i
< config
->num_connections
; i
++) {
780 if (!config
->socks
[i
]->dead
) {
785 nsock
->fallback_index
= new_index
;
787 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
788 "Dead connection, failed to find a fallback\n");
792 new_index
= nsock
->fallback_index
;
796 static int wait_for_reconnect(struct nbd_device
*nbd
)
798 struct nbd_config
*config
= nbd
->config
;
799 if (!config
->dead_conn_timeout
)
801 if (test_bit(NBD_DISCONNECTED
, &config
->runtime_flags
))
803 wait_event_timeout(config
->conn_wait
,
804 atomic_read(&config
->live_connections
),
805 config
->dead_conn_timeout
);
806 return atomic_read(&config
->live_connections
);
809 static int nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
811 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
812 struct nbd_device
*nbd
= cmd
->nbd
;
813 struct nbd_config
*config
;
814 struct nbd_sock
*nsock
;
817 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
818 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
819 "Socks array is empty\n");
820 blk_mq_start_request(req
);
823 config
= nbd
->config
;
825 if (index
>= config
->num_connections
) {
826 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
827 "Attempted send on invalid socket\n");
829 blk_mq_start_request(req
);
832 cmd
->status
= BLK_STS_OK
;
834 nsock
= config
->socks
[index
];
835 mutex_lock(&nsock
->tx_lock
);
837 int old_index
= index
;
838 index
= find_fallback(nbd
, index
);
839 mutex_unlock(&nsock
->tx_lock
);
841 if (wait_for_reconnect(nbd
)) {
845 /* All the sockets should already be down at this point,
846 * we just want to make sure that DISCONNECTED is set so
847 * any requests that come in that were queue'ed waiting
848 * for the reconnect timer don't trigger the timer again
849 * and instead just error out.
853 blk_mq_start_request(req
);
859 /* Handle the case that we have a pending request that was partially
860 * transmitted that _has_ to be serviced first. We need to call requeue
861 * here so that it gets put _after_ the request that is already on the
864 blk_mq_start_request(req
);
865 if (unlikely(nsock
->pending
&& nsock
->pending
!= req
)) {
866 nbd_requeue_cmd(cmd
);
871 * Some failures are related to the link going down, so anything that
872 * returns EAGAIN can be retried on a different socket.
874 ret
= nbd_send_cmd(nbd
, cmd
, index
);
875 if (ret
== -EAGAIN
) {
876 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
877 "Request send failed, requeueing\n");
878 nbd_mark_nsock_dead(nbd
, nsock
, 1);
879 nbd_requeue_cmd(cmd
);
883 mutex_unlock(&nsock
->tx_lock
);
888 static blk_status_t
nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
889 const struct blk_mq_queue_data
*bd
)
891 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
895 * Since we look at the bio's to send the request over the network we
896 * need to make sure the completion work doesn't mark this request done
897 * before we are done doing our send. This keeps us from dereferencing
898 * freed data if we have particularly fast completions (ie we get the
899 * completion before we exit sock_xmit on the last bvec) or in the case
900 * that the server is misbehaving (or there was an error) before we're
901 * done sending everything over the wire.
903 mutex_lock(&cmd
->lock
);
904 clear_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
906 /* We can be called directly from the user space process, which means we
907 * could possibly have signals pending so our sendmsg will fail. In
908 * this case we need to return that we are busy, otherwise error out as
911 ret
= nbd_handle_cmd(cmd
, hctx
->queue_num
);
916 mutex_unlock(&cmd
->lock
);
921 static struct socket
*nbd_get_socket(struct nbd_device
*nbd
, unsigned long fd
,
927 sock
= sockfd_lookup(fd
, err
);
931 if (sock
->ops
->shutdown
== sock_no_shutdown
) {
932 dev_err(disk_to_dev(nbd
->disk
), "Unsupported socket: shutdown callout must be supported.\n");
941 static int nbd_add_socket(struct nbd_device
*nbd
, unsigned long arg
,
944 struct nbd_config
*config
= nbd
->config
;
946 struct nbd_sock
**socks
;
947 struct nbd_sock
*nsock
;
950 sock
= nbd_get_socket(nbd
, arg
, &err
);
954 if (!netlink
&& !nbd
->task_setup
&&
955 !test_bit(NBD_BOUND
, &config
->runtime_flags
))
956 nbd
->task_setup
= current
;
959 (nbd
->task_setup
!= current
||
960 test_bit(NBD_BOUND
, &config
->runtime_flags
))) {
961 dev_err(disk_to_dev(nbd
->disk
),
962 "Device being setup by another task");
967 socks
= krealloc(config
->socks
, (config
->num_connections
+ 1) *
968 sizeof(struct nbd_sock
*), GFP_KERNEL
);
974 config
->socks
= socks
;
976 nsock
= kzalloc(sizeof(struct nbd_sock
), GFP_KERNEL
);
982 nsock
->fallback_index
= -1;
984 mutex_init(&nsock
->tx_lock
);
986 nsock
->pending
= NULL
;
989 socks
[config
->num_connections
++] = nsock
;
990 atomic_inc(&config
->live_connections
);
995 static int nbd_reconnect_socket(struct nbd_device
*nbd
, unsigned long arg
)
997 struct nbd_config
*config
= nbd
->config
;
998 struct socket
*sock
, *old
;
999 struct recv_thread_args
*args
;
1003 sock
= nbd_get_socket(nbd
, arg
, &err
);
1007 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1013 for (i
= 0; i
< config
->num_connections
; i
++) {
1014 struct nbd_sock
*nsock
= config
->socks
[i
];
1019 mutex_lock(&nsock
->tx_lock
);
1021 mutex_unlock(&nsock
->tx_lock
);
1024 sk_set_memalloc(sock
->sk
);
1025 if (nbd
->tag_set
.timeout
)
1026 sock
->sk
->sk_sndtimeo
= nbd
->tag_set
.timeout
;
1027 atomic_inc(&config
->recv_threads
);
1028 refcount_inc(&nbd
->config_refs
);
1030 nsock
->fallback_index
= -1;
1032 nsock
->dead
= false;
1033 INIT_WORK(&args
->work
, recv_work
);
1037 mutex_unlock(&nsock
->tx_lock
);
1040 clear_bit(NBD_DISCONNECTED
, &config
->runtime_flags
);
1042 /* We take the tx_mutex in an error path in the recv_work, so we
1043 * need to queue_work outside of the tx_mutex.
1045 queue_work(nbd
->recv_workq
, &args
->work
);
1047 atomic_inc(&config
->live_connections
);
1048 wake_up(&config
->conn_wait
);
1056 static void nbd_bdev_reset(struct block_device
*bdev
)
1058 if (bdev
->bd_openers
> 1)
1060 bd_set_size(bdev
, 0);
1063 static void nbd_parse_flags(struct nbd_device
*nbd
)
1065 struct nbd_config
*config
= nbd
->config
;
1066 if (config
->flags
& NBD_FLAG_READ_ONLY
)
1067 set_disk_ro(nbd
->disk
, true);
1069 set_disk_ro(nbd
->disk
, false);
1070 if (config
->flags
& NBD_FLAG_SEND_TRIM
)
1071 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
1072 if (config
->flags
& NBD_FLAG_SEND_FLUSH
) {
1073 if (config
->flags
& NBD_FLAG_SEND_FUA
)
1074 blk_queue_write_cache(nbd
->disk
->queue
, true, true);
1076 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
1079 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
1082 static void send_disconnects(struct nbd_device
*nbd
)
1084 struct nbd_config
*config
= nbd
->config
;
1085 struct nbd_request request
= {
1086 .magic
= htonl(NBD_REQUEST_MAGIC
),
1087 .type
= htonl(NBD_CMD_DISC
),
1089 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
1090 struct iov_iter from
;
1093 for (i
= 0; i
< config
->num_connections
; i
++) {
1094 struct nbd_sock
*nsock
= config
->socks
[i
];
1096 iov_iter_kvec(&from
, WRITE
| ITER_KVEC
, &iov
, 1, sizeof(request
));
1097 mutex_lock(&nsock
->tx_lock
);
1098 ret
= sock_xmit(nbd
, i
, 1, &from
, 0, NULL
);
1100 dev_err(disk_to_dev(nbd
->disk
),
1101 "Send disconnect failed %d\n", ret
);
1102 mutex_unlock(&nsock
->tx_lock
);
1106 static int nbd_disconnect(struct nbd_device
*nbd
)
1108 struct nbd_config
*config
= nbd
->config
;
1110 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
1111 set_bit(NBD_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
1112 send_disconnects(nbd
);
1116 static void nbd_clear_sock(struct nbd_device
*nbd
)
1120 nbd
->task_setup
= NULL
;
1123 static void nbd_config_put(struct nbd_device
*nbd
)
1125 if (refcount_dec_and_mutex_lock(&nbd
->config_refs
,
1126 &nbd
->config_lock
)) {
1127 struct nbd_config
*config
= nbd
->config
;
1128 nbd_dev_dbg_close(nbd
);
1129 nbd_size_clear(nbd
);
1130 if (test_and_clear_bit(NBD_HAS_PID_FILE
,
1131 &config
->runtime_flags
))
1132 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1133 nbd
->task_recv
= NULL
;
1134 nbd_clear_sock(nbd
);
1135 if (config
->num_connections
) {
1137 for (i
= 0; i
< config
->num_connections
; i
++) {
1138 sockfd_put(config
->socks
[i
]->sock
);
1139 kfree(config
->socks
[i
]);
1141 kfree(config
->socks
);
1146 if (nbd
->recv_workq
)
1147 destroy_workqueue(nbd
->recv_workq
);
1148 nbd
->recv_workq
= NULL
;
1150 nbd
->tag_set
.timeout
= 0;
1151 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
1153 mutex_unlock(&nbd
->config_lock
);
1155 module_put(THIS_MODULE
);
1159 static int nbd_start_device(struct nbd_device
*nbd
)
1161 struct nbd_config
*config
= nbd
->config
;
1162 int num_connections
= config
->num_connections
;
1169 if (num_connections
> 1 &&
1170 !(config
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
1171 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
1175 nbd
->recv_workq
= alloc_workqueue("knbd%d-recv",
1176 WQ_MEM_RECLAIM
| WQ_HIGHPRI
|
1177 WQ_UNBOUND
, 0, nbd
->index
);
1178 if (!nbd
->recv_workq
) {
1179 dev_err(disk_to_dev(nbd
->disk
), "Could not allocate knbd recv work queue.\n");
1183 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, config
->num_connections
);
1184 nbd
->task_recv
= current
;
1186 nbd_parse_flags(nbd
);
1188 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1190 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
1193 set_bit(NBD_HAS_PID_FILE
, &config
->runtime_flags
);
1195 nbd_dev_dbg_init(nbd
);
1196 for (i
= 0; i
< num_connections
; i
++) {
1197 struct recv_thread_args
*args
;
1199 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1204 sk_set_memalloc(config
->socks
[i
]->sock
->sk
);
1205 if (nbd
->tag_set
.timeout
)
1206 config
->socks
[i
]->sock
->sk
->sk_sndtimeo
=
1207 nbd
->tag_set
.timeout
;
1208 atomic_inc(&config
->recv_threads
);
1209 refcount_inc(&nbd
->config_refs
);
1210 INIT_WORK(&args
->work
, recv_work
);
1213 queue_work(nbd
->recv_workq
, &args
->work
);
1215 nbd_size_update(nbd
);
1219 static int nbd_start_device_ioctl(struct nbd_device
*nbd
, struct block_device
*bdev
)
1221 struct nbd_config
*config
= nbd
->config
;
1224 ret
= nbd_start_device(nbd
);
1229 bdev
->bd_invalidated
= 1;
1230 mutex_unlock(&nbd
->config_lock
);
1231 ret
= wait_event_interruptible(config
->recv_wq
,
1232 atomic_read(&config
->recv_threads
) == 0);
1235 flush_workqueue(nbd
->recv_workq
);
1237 mutex_lock(&nbd
->config_lock
);
1238 bd_set_size(bdev
, 0);
1239 /* user requested, ignore socket errors */
1240 if (test_bit(NBD_DISCONNECT_REQUESTED
, &config
->runtime_flags
))
1242 if (test_bit(NBD_TIMEDOUT
, &config
->runtime_flags
))
1247 static void nbd_clear_sock_ioctl(struct nbd_device
*nbd
,
1248 struct block_device
*bdev
)
1251 __invalidate_device(bdev
, true);
1252 nbd_bdev_reset(bdev
);
1253 if (test_and_clear_bit(NBD_HAS_CONFIG_REF
,
1254 &nbd
->config
->runtime_flags
))
1255 nbd_config_put(nbd
);
1258 static bool nbd_is_valid_blksize(unsigned long blksize
)
1260 if (!blksize
|| !is_power_of_2(blksize
) || blksize
< 512 ||
1261 blksize
> PAGE_SIZE
)
1266 /* Must be called with config_lock held */
1267 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
1268 unsigned int cmd
, unsigned long arg
)
1270 struct nbd_config
*config
= nbd
->config
;
1273 case NBD_DISCONNECT
:
1274 return nbd_disconnect(nbd
);
1275 case NBD_CLEAR_SOCK
:
1276 nbd_clear_sock_ioctl(nbd
, bdev
);
1279 return nbd_add_socket(nbd
, arg
, false);
1280 case NBD_SET_BLKSIZE
:
1282 arg
= NBD_DEF_BLKSIZE
;
1283 if (!nbd_is_valid_blksize(arg
))
1285 nbd_size_set(nbd
, arg
,
1286 div_s64(config
->bytesize
, arg
));
1289 nbd_size_set(nbd
, config
->blksize
,
1290 div_s64(arg
, config
->blksize
));
1292 case NBD_SET_SIZE_BLOCKS
:
1293 nbd_size_set(nbd
, config
->blksize
, arg
);
1295 case NBD_SET_TIMEOUT
:
1297 nbd
->tag_set
.timeout
= arg
* HZ
;
1298 blk_queue_rq_timeout(nbd
->disk
->queue
, arg
* HZ
);
1303 config
->flags
= arg
;
1306 return nbd_start_device_ioctl(nbd
, bdev
);
1309 * This is for compatibility only. The queue is always cleared
1310 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1313 case NBD_PRINT_DEBUG
:
1315 * For compatibility only, we no longer keep a list of
1316 * outstanding requests.
1323 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
1324 unsigned int cmd
, unsigned long arg
)
1326 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
1327 struct nbd_config
*config
= nbd
->config
;
1328 int error
= -EINVAL
;
1330 if (!capable(CAP_SYS_ADMIN
))
1333 /* The block layer will pass back some non-nbd ioctls in case we have
1334 * special handling for them, but we don't so just return an error.
1336 if (_IOC_TYPE(cmd
) != 0xab)
1339 mutex_lock(&nbd
->config_lock
);
1341 /* Don't allow ioctl operations on a nbd device that was created with
1342 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1344 if (!test_bit(NBD_BOUND
, &config
->runtime_flags
) ||
1345 (cmd
== NBD_DISCONNECT
|| cmd
== NBD_CLEAR_SOCK
))
1346 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
1348 dev_err(nbd_to_dev(nbd
), "Cannot use ioctl interface on a netlink controlled device.\n");
1349 mutex_unlock(&nbd
->config_lock
);
1353 static struct nbd_config
*nbd_alloc_config(void)
1355 struct nbd_config
*config
;
1357 config
= kzalloc(sizeof(struct nbd_config
), GFP_NOFS
);
1360 atomic_set(&config
->recv_threads
, 0);
1361 init_waitqueue_head(&config
->recv_wq
);
1362 init_waitqueue_head(&config
->conn_wait
);
1363 config
->blksize
= NBD_DEF_BLKSIZE
;
1364 atomic_set(&config
->live_connections
, 0);
1365 try_module_get(THIS_MODULE
);
1369 static int nbd_open(struct block_device
*bdev
, fmode_t mode
)
1371 struct nbd_device
*nbd
;
1374 mutex_lock(&nbd_index_mutex
);
1375 nbd
= bdev
->bd_disk
->private_data
;
1380 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1384 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1385 struct nbd_config
*config
;
1387 mutex_lock(&nbd
->config_lock
);
1388 if (refcount_inc_not_zero(&nbd
->config_refs
)) {
1389 mutex_unlock(&nbd
->config_lock
);
1392 config
= nbd
->config
= nbd_alloc_config();
1395 mutex_unlock(&nbd
->config_lock
);
1398 refcount_set(&nbd
->config_refs
, 1);
1399 refcount_inc(&nbd
->refs
);
1400 mutex_unlock(&nbd
->config_lock
);
1401 bdev
->bd_invalidated
= 1;
1402 } else if (nbd_disconnected(nbd
->config
)) {
1403 bdev
->bd_invalidated
= 1;
1406 mutex_unlock(&nbd_index_mutex
);
1410 static void nbd_release(struct gendisk
*disk
, fmode_t mode
)
1412 struct nbd_device
*nbd
= disk
->private_data
;
1413 struct block_device
*bdev
= bdget_disk(disk
, 0);
1415 if (test_bit(NBD_DISCONNECT_ON_CLOSE
, &nbd
->config
->runtime_flags
) &&
1416 bdev
->bd_openers
== 0)
1417 nbd_disconnect_and_put(nbd
);
1419 nbd_config_put(nbd
);
1423 static const struct block_device_operations nbd_fops
=
1425 .owner
= THIS_MODULE
,
1427 .release
= nbd_release
,
1429 .compat_ioctl
= nbd_ioctl
,
1432 #if IS_ENABLED(CONFIG_DEBUG_FS)
1434 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
1436 struct nbd_device
*nbd
= s
->private;
1439 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
1444 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
1446 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
1449 static const struct file_operations nbd_dbg_tasks_ops
= {
1450 .open
= nbd_dbg_tasks_open
,
1452 .llseek
= seq_lseek
,
1453 .release
= single_release
,
1456 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
1458 struct nbd_device
*nbd
= s
->private;
1459 u32 flags
= nbd
->config
->flags
;
1461 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
1463 seq_puts(s
, "Known flags:\n");
1465 if (flags
& NBD_FLAG_HAS_FLAGS
)
1466 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
1467 if (flags
& NBD_FLAG_READ_ONLY
)
1468 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
1469 if (flags
& NBD_FLAG_SEND_FLUSH
)
1470 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
1471 if (flags
& NBD_FLAG_SEND_FUA
)
1472 seq_puts(s
, "NBD_FLAG_SEND_FUA\n");
1473 if (flags
& NBD_FLAG_SEND_TRIM
)
1474 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
1479 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
1481 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
1484 static const struct file_operations nbd_dbg_flags_ops
= {
1485 .open
= nbd_dbg_flags_open
,
1487 .llseek
= seq_lseek
,
1488 .release
= single_release
,
1491 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1494 struct nbd_config
*config
= nbd
->config
;
1499 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
1501 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
1505 config
->dbg_dir
= dir
;
1507 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
1508 debugfs_create_u64("size_bytes", 0444, dir
, &config
->bytesize
);
1509 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
1510 debugfs_create_u64("blocksize", 0444, dir
, &config
->blksize
);
1511 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
1516 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1518 debugfs_remove_recursive(nbd
->config
->dbg_dir
);
1521 static int nbd_dbg_init(void)
1523 struct dentry
*dbg_dir
;
1525 dbg_dir
= debugfs_create_dir("nbd", NULL
);
1529 nbd_dbg_dir
= dbg_dir
;
1534 static void nbd_dbg_close(void)
1536 debugfs_remove_recursive(nbd_dbg_dir
);
1539 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1541 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1546 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1550 static int nbd_dbg_init(void)
1555 static void nbd_dbg_close(void)
1561 static int nbd_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1562 unsigned int hctx_idx
, unsigned int numa_node
)
1564 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
1565 cmd
->nbd
= set
->driver_data
;
1567 mutex_init(&cmd
->lock
);
1571 static const struct blk_mq_ops nbd_mq_ops
= {
1572 .queue_rq
= nbd_queue_rq
,
1573 .complete
= nbd_complete_rq
,
1574 .init_request
= nbd_init_request
,
1575 .timeout
= nbd_xmit_timeout
,
1578 static int nbd_dev_add(int index
)
1580 struct nbd_device
*nbd
;
1581 struct gendisk
*disk
;
1582 struct request_queue
*q
;
1585 nbd
= kzalloc(sizeof(struct nbd_device
), GFP_KERNEL
);
1589 disk
= alloc_disk(1 << part_shift
);
1594 err
= idr_alloc(&nbd_index_idr
, nbd
, index
, index
+ 1,
1599 err
= idr_alloc(&nbd_index_idr
, nbd
, 0, 0, GFP_KERNEL
);
1608 nbd
->tag_set
.ops
= &nbd_mq_ops
;
1609 nbd
->tag_set
.nr_hw_queues
= 1;
1610 nbd
->tag_set
.queue_depth
= 128;
1611 nbd
->tag_set
.numa_node
= NUMA_NO_NODE
;
1612 nbd
->tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1613 nbd
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1614 BLK_MQ_F_SG_MERGE
| BLK_MQ_F_BLOCKING
;
1615 nbd
->tag_set
.driver_data
= nbd
;
1617 err
= blk_mq_alloc_tag_set(&nbd
->tag_set
);
1621 q
= blk_mq_init_queue(&nbd
->tag_set
);
1629 * Tell the block layer that we are not a rotational device
1631 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1632 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1633 disk
->queue
->limits
.discard_granularity
= 512;
1634 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1635 blk_queue_max_segment_size(disk
->queue
, UINT_MAX
);
1636 blk_queue_max_segments(disk
->queue
, USHRT_MAX
);
1637 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1638 disk
->queue
->limits
.max_sectors
= 256;
1640 mutex_init(&nbd
->config_lock
);
1641 refcount_set(&nbd
->config_refs
, 0);
1642 refcount_set(&nbd
->refs
, 1);
1643 INIT_LIST_HEAD(&nbd
->list
);
1644 disk
->major
= NBD_MAJOR
;
1645 disk
->first_minor
= index
<< part_shift
;
1646 disk
->fops
= &nbd_fops
;
1647 disk
->private_data
= nbd
;
1648 sprintf(disk
->disk_name
, "nbd%d", index
);
1650 nbd_total_devices
++;
1654 blk_mq_free_tag_set(&nbd
->tag_set
);
1656 idr_remove(&nbd_index_idr
, index
);
1665 static int find_free_cb(int id
, void *ptr
, void *data
)
1667 struct nbd_device
*nbd
= ptr
;
1668 struct nbd_device
**found
= data
;
1670 if (!refcount_read(&nbd
->config_refs
)) {
1677 /* Netlink interface. */
1678 static struct nla_policy nbd_attr_policy
[NBD_ATTR_MAX
+ 1] = {
1679 [NBD_ATTR_INDEX
] = { .type
= NLA_U32
},
1680 [NBD_ATTR_SIZE_BYTES
] = { .type
= NLA_U64
},
1681 [NBD_ATTR_BLOCK_SIZE_BYTES
] = { .type
= NLA_U64
},
1682 [NBD_ATTR_TIMEOUT
] = { .type
= NLA_U64
},
1683 [NBD_ATTR_SERVER_FLAGS
] = { .type
= NLA_U64
},
1684 [NBD_ATTR_CLIENT_FLAGS
] = { .type
= NLA_U64
},
1685 [NBD_ATTR_SOCKETS
] = { .type
= NLA_NESTED
},
1686 [NBD_ATTR_DEAD_CONN_TIMEOUT
] = { .type
= NLA_U64
},
1687 [NBD_ATTR_DEVICE_LIST
] = { .type
= NLA_NESTED
},
1690 static struct nla_policy nbd_sock_policy
[NBD_SOCK_MAX
+ 1] = {
1691 [NBD_SOCK_FD
] = { .type
= NLA_U32
},
1694 /* We don't use this right now since we don't parse the incoming list, but we
1695 * still want it here so userspace knows what to expect.
1697 static struct nla_policy
__attribute__((unused
))
1698 nbd_device_policy
[NBD_DEVICE_ATTR_MAX
+ 1] = {
1699 [NBD_DEVICE_INDEX
] = { .type
= NLA_U32
},
1700 [NBD_DEVICE_CONNECTED
] = { .type
= NLA_U8
},
1703 static int nbd_genl_connect(struct sk_buff
*skb
, struct genl_info
*info
)
1705 struct nbd_device
*nbd
= NULL
;
1706 struct nbd_config
*config
;
1709 bool put_dev
= false;
1711 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1714 if (info
->attrs
[NBD_ATTR_INDEX
])
1715 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1716 if (!info
->attrs
[NBD_ATTR_SOCKETS
]) {
1717 printk(KERN_ERR
"nbd: must specify at least one socket\n");
1720 if (!info
->attrs
[NBD_ATTR_SIZE_BYTES
]) {
1721 printk(KERN_ERR
"nbd: must specify a size in bytes for the device\n");
1725 mutex_lock(&nbd_index_mutex
);
1727 ret
= idr_for_each(&nbd_index_idr
, &find_free_cb
, &nbd
);
1730 new_index
= nbd_dev_add(-1);
1731 if (new_index
< 0) {
1732 mutex_unlock(&nbd_index_mutex
);
1733 printk(KERN_ERR
"nbd: failed to add new device\n");
1736 nbd
= idr_find(&nbd_index_idr
, new_index
);
1739 nbd
= idr_find(&nbd_index_idr
, index
);
1741 ret
= nbd_dev_add(index
);
1743 mutex_unlock(&nbd_index_mutex
);
1744 printk(KERN_ERR
"nbd: failed to add new device\n");
1747 nbd
= idr_find(&nbd_index_idr
, index
);
1751 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
1753 mutex_unlock(&nbd_index_mutex
);
1756 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1757 mutex_unlock(&nbd_index_mutex
);
1760 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1764 mutex_unlock(&nbd_index_mutex
);
1766 mutex_lock(&nbd
->config_lock
);
1767 if (refcount_read(&nbd
->config_refs
)) {
1768 mutex_unlock(&nbd
->config_lock
);
1772 printk(KERN_ERR
"nbd: nbd%d already in use\n", index
);
1775 if (WARN_ON(nbd
->config
)) {
1776 mutex_unlock(&nbd
->config_lock
);
1780 config
= nbd
->config
= nbd_alloc_config();
1782 mutex_unlock(&nbd
->config_lock
);
1784 printk(KERN_ERR
"nbd: couldn't allocate config\n");
1787 refcount_set(&nbd
->config_refs
, 1);
1788 set_bit(NBD_BOUND
, &config
->runtime_flags
);
1790 if (info
->attrs
[NBD_ATTR_SIZE_BYTES
]) {
1791 u64 bytes
= nla_get_u64(info
->attrs
[NBD_ATTR_SIZE_BYTES
]);
1792 nbd_size_set(nbd
, config
->blksize
,
1793 div64_u64(bytes
, config
->blksize
));
1795 if (info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]) {
1797 nla_get_u64(info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]);
1799 bsize
= NBD_DEF_BLKSIZE
;
1800 if (!nbd_is_valid_blksize(bsize
)) {
1804 nbd_size_set(nbd
, bsize
, div64_u64(config
->bytesize
, bsize
));
1806 if (info
->attrs
[NBD_ATTR_TIMEOUT
]) {
1807 u64 timeout
= nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]);
1808 nbd
->tag_set
.timeout
= timeout
* HZ
;
1809 blk_queue_rq_timeout(nbd
->disk
->queue
, timeout
* HZ
);
1811 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
1812 config
->dead_conn_timeout
=
1813 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
1814 config
->dead_conn_timeout
*= HZ
;
1816 if (info
->attrs
[NBD_ATTR_SERVER_FLAGS
])
1818 nla_get_u64(info
->attrs
[NBD_ATTR_SERVER_FLAGS
]);
1819 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
1820 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
1821 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
1822 set_bit(NBD_DESTROY_ON_DISCONNECT
,
1823 &config
->runtime_flags
);
1826 if (flags
& NBD_CFLAG_DISCONNECT_ON_CLOSE
) {
1827 set_bit(NBD_DISCONNECT_ON_CLOSE
,
1828 &config
->runtime_flags
);
1832 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
1833 struct nlattr
*attr
;
1836 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
1838 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
1840 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
1841 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
1845 ret
= nla_parse_nested(socks
, NBD_SOCK_MAX
, attr
,
1846 nbd_sock_policy
, info
->extack
);
1848 printk(KERN_ERR
"nbd: error processing sock list\n");
1852 if (!socks
[NBD_SOCK_FD
])
1854 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
1855 ret
= nbd_add_socket(nbd
, fd
, true);
1860 ret
= nbd_start_device(nbd
);
1862 mutex_unlock(&nbd
->config_lock
);
1864 set_bit(NBD_HAS_CONFIG_REF
, &config
->runtime_flags
);
1865 refcount_inc(&nbd
->config_refs
);
1866 nbd_connect_reply(info
, nbd
->index
);
1868 nbd_config_put(nbd
);
1874 static void nbd_disconnect_and_put(struct nbd_device
*nbd
)
1876 mutex_lock(&nbd
->config_lock
);
1877 nbd_disconnect(nbd
);
1878 mutex_unlock(&nbd
->config_lock
);
1880 * Make sure recv thread has finished, so it does not drop the last
1881 * config ref and try to destroy the workqueue from inside the work
1884 flush_workqueue(nbd
->recv_workq
);
1885 if (test_and_clear_bit(NBD_HAS_CONFIG_REF
,
1886 &nbd
->config
->runtime_flags
))
1887 nbd_config_put(nbd
);
1890 static int nbd_genl_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
1892 struct nbd_device
*nbd
;
1895 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1898 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
1899 printk(KERN_ERR
"nbd: must specify an index to disconnect\n");
1902 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1903 mutex_lock(&nbd_index_mutex
);
1904 nbd
= idr_find(&nbd_index_idr
, index
);
1906 mutex_unlock(&nbd_index_mutex
);
1907 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
1911 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1912 mutex_unlock(&nbd_index_mutex
);
1913 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1917 mutex_unlock(&nbd_index_mutex
);
1918 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1922 nbd_disconnect_and_put(nbd
);
1923 nbd_config_put(nbd
);
1928 static int nbd_genl_reconfigure(struct sk_buff
*skb
, struct genl_info
*info
)
1930 struct nbd_device
*nbd
= NULL
;
1931 struct nbd_config
*config
;
1934 bool put_dev
= false;
1936 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1939 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
1940 printk(KERN_ERR
"nbd: must specify a device to reconfigure\n");
1943 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1944 mutex_lock(&nbd_index_mutex
);
1945 nbd
= idr_find(&nbd_index_idr
, index
);
1947 mutex_unlock(&nbd_index_mutex
);
1948 printk(KERN_ERR
"nbd: couldn't find a device at index %d\n",
1952 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1953 mutex_unlock(&nbd_index_mutex
);
1954 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1958 mutex_unlock(&nbd_index_mutex
);
1960 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1961 dev_err(nbd_to_dev(nbd
),
1962 "not configured, cannot reconfigure\n");
1967 mutex_lock(&nbd
->config_lock
);
1968 config
= nbd
->config
;
1969 if (!test_bit(NBD_BOUND
, &config
->runtime_flags
) ||
1971 dev_err(nbd_to_dev(nbd
),
1972 "not configured, cannot reconfigure\n");
1977 if (info
->attrs
[NBD_ATTR_TIMEOUT
]) {
1978 u64 timeout
= nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]);
1979 nbd
->tag_set
.timeout
= timeout
* HZ
;
1980 blk_queue_rq_timeout(nbd
->disk
->queue
, timeout
* HZ
);
1982 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
1983 config
->dead_conn_timeout
=
1984 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
1985 config
->dead_conn_timeout
*= HZ
;
1987 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
1988 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
1989 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
1990 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT
,
1991 &config
->runtime_flags
))
1994 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT
,
1995 &config
->runtime_flags
))
1996 refcount_inc(&nbd
->refs
);
1999 if (flags
& NBD_CFLAG_DISCONNECT_ON_CLOSE
) {
2000 set_bit(NBD_DISCONNECT_ON_CLOSE
,
2001 &config
->runtime_flags
);
2003 clear_bit(NBD_DISCONNECT_ON_CLOSE
,
2004 &config
->runtime_flags
);
2008 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
2009 struct nlattr
*attr
;
2012 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
2014 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
2016 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
2017 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
2021 ret
= nla_parse_nested(socks
, NBD_SOCK_MAX
, attr
,
2022 nbd_sock_policy
, info
->extack
);
2024 printk(KERN_ERR
"nbd: error processing sock list\n");
2028 if (!socks
[NBD_SOCK_FD
])
2030 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
2031 ret
= nbd_reconnect_socket(nbd
, fd
);
2037 dev_info(nbd_to_dev(nbd
), "reconnected socket\n");
2041 mutex_unlock(&nbd
->config_lock
);
2042 nbd_config_put(nbd
);
2049 static const struct genl_ops nbd_connect_genl_ops
[] = {
2051 .cmd
= NBD_CMD_CONNECT
,
2052 .policy
= nbd_attr_policy
,
2053 .doit
= nbd_genl_connect
,
2056 .cmd
= NBD_CMD_DISCONNECT
,
2057 .policy
= nbd_attr_policy
,
2058 .doit
= nbd_genl_disconnect
,
2061 .cmd
= NBD_CMD_RECONFIGURE
,
2062 .policy
= nbd_attr_policy
,
2063 .doit
= nbd_genl_reconfigure
,
2066 .cmd
= NBD_CMD_STATUS
,
2067 .policy
= nbd_attr_policy
,
2068 .doit
= nbd_genl_status
,
2072 static const struct genl_multicast_group nbd_mcast_grps
[] = {
2073 { .name
= NBD_GENL_MCAST_GROUP_NAME
, },
2076 static struct genl_family nbd_genl_family __ro_after_init
= {
2078 .name
= NBD_GENL_FAMILY_NAME
,
2079 .version
= NBD_GENL_VERSION
,
2080 .module
= THIS_MODULE
,
2081 .ops
= nbd_connect_genl_ops
,
2082 .n_ops
= ARRAY_SIZE(nbd_connect_genl_ops
),
2083 .maxattr
= NBD_ATTR_MAX
,
2084 .mcgrps
= nbd_mcast_grps
,
2085 .n_mcgrps
= ARRAY_SIZE(nbd_mcast_grps
),
2088 static int populate_nbd_status(struct nbd_device
*nbd
, struct sk_buff
*reply
)
2090 struct nlattr
*dev_opt
;
2094 /* This is a little racey, but for status it's ok. The
2095 * reason we don't take a ref here is because we can't
2096 * take a ref in the index == -1 case as we would need
2097 * to put under the nbd_index_mutex, which could
2098 * deadlock if we are configured to remove ourselves
2099 * once we're disconnected.
2101 if (refcount_read(&nbd
->config_refs
))
2103 dev_opt
= nla_nest_start(reply
, NBD_DEVICE_ITEM
);
2106 ret
= nla_put_u32(reply
, NBD_DEVICE_INDEX
, nbd
->index
);
2109 ret
= nla_put_u8(reply
, NBD_DEVICE_CONNECTED
,
2113 nla_nest_end(reply
, dev_opt
);
2117 static int status_cb(int id
, void *ptr
, void *data
)
2119 struct nbd_device
*nbd
= ptr
;
2120 return populate_nbd_status(nbd
, (struct sk_buff
*)data
);
2123 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
)
2125 struct nlattr
*dev_list
;
2126 struct sk_buff
*reply
;
2132 if (info
->attrs
[NBD_ATTR_INDEX
])
2133 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
2135 mutex_lock(&nbd_index_mutex
);
2137 msg_size
= nla_total_size(nla_attr_size(sizeof(u32
)) +
2138 nla_attr_size(sizeof(u8
)));
2139 msg_size
*= (index
== -1) ? nbd_total_devices
: 1;
2141 reply
= genlmsg_new(msg_size
, GFP_KERNEL
);
2144 reply_head
= genlmsg_put_reply(reply
, info
, &nbd_genl_family
, 0,
2151 dev_list
= nla_nest_start(reply
, NBD_ATTR_DEVICE_LIST
);
2153 ret
= idr_for_each(&nbd_index_idr
, &status_cb
, reply
);
2159 struct nbd_device
*nbd
;
2160 nbd
= idr_find(&nbd_index_idr
, index
);
2162 ret
= populate_nbd_status(nbd
, reply
);
2169 nla_nest_end(reply
, dev_list
);
2170 genlmsg_end(reply
, reply_head
);
2171 genlmsg_reply(reply
, info
);
2174 mutex_unlock(&nbd_index_mutex
);
2178 static void nbd_connect_reply(struct genl_info
*info
, int index
)
2180 struct sk_buff
*skb
;
2184 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2187 msg_head
= genlmsg_put_reply(skb
, info
, &nbd_genl_family
, 0,
2193 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2198 genlmsg_end(skb
, msg_head
);
2199 genlmsg_reply(skb
, info
);
2202 static void nbd_mcast_index(int index
)
2204 struct sk_buff
*skb
;
2208 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2211 msg_head
= genlmsg_put(skb
, 0, 0, &nbd_genl_family
, 0,
2217 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2222 genlmsg_end(skb
, msg_head
);
2223 genlmsg_multicast(&nbd_genl_family
, skb
, 0, 0, GFP_KERNEL
);
2226 static void nbd_dead_link_work(struct work_struct
*work
)
2228 struct link_dead_args
*args
= container_of(work
, struct link_dead_args
,
2230 nbd_mcast_index(args
->index
);
2234 static int __init
nbd_init(void)
2238 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
2241 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
2247 part_shift
= fls(max_part
);
2250 * Adjust max_part according to part_shift as it is exported
2251 * to user space so that user can know the max number of
2252 * partition kernel should be able to manage.
2254 * Note that -1 is required because partition 0 is reserved
2255 * for the whole disk.
2257 max_part
= (1UL << part_shift
) - 1;
2260 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
2263 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
2266 if (register_blkdev(NBD_MAJOR
, "nbd"))
2269 if (genl_register_family(&nbd_genl_family
)) {
2270 unregister_blkdev(NBD_MAJOR
, "nbd");
2275 mutex_lock(&nbd_index_mutex
);
2276 for (i
= 0; i
< nbds_max
; i
++)
2278 mutex_unlock(&nbd_index_mutex
);
2282 static int nbd_exit_cb(int id
, void *ptr
, void *data
)
2284 struct list_head
*list
= (struct list_head
*)data
;
2285 struct nbd_device
*nbd
= ptr
;
2287 list_add_tail(&nbd
->list
, list
);
2291 static void __exit
nbd_cleanup(void)
2293 struct nbd_device
*nbd
;
2294 LIST_HEAD(del_list
);
2298 mutex_lock(&nbd_index_mutex
);
2299 idr_for_each(&nbd_index_idr
, &nbd_exit_cb
, &del_list
);
2300 mutex_unlock(&nbd_index_mutex
);
2302 while (!list_empty(&del_list
)) {
2303 nbd
= list_first_entry(&del_list
, struct nbd_device
, list
);
2304 list_del_init(&nbd
->list
);
2305 if (refcount_read(&nbd
->refs
) != 1)
2306 printk(KERN_ERR
"nbd: possibly leaking a device\n");
2310 idr_destroy(&nbd_index_idr
);
2311 genl_unregister_family(&nbd_genl_family
);
2312 unregister_blkdev(NBD_MAJOR
, "nbd");
2315 module_init(nbd_init
);
2316 module_exit(nbd_cleanup
);
2318 MODULE_DESCRIPTION("Network Block Device");
2319 MODULE_LICENSE("GPL");
2321 module_param(nbds_max
, int, 0444);
2322 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
2323 module_param(max_part
, int, 0444);
2324 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 16)");