1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
11 * (part of code stolen from loop.c)
14 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/completion.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/nbd.h>
50 static DEFINE_IDR(nbd_index_idr
);
51 static DEFINE_MUTEX(nbd_index_mutex
);
52 static int nbd_total_devices
= 0;
57 struct request
*pending
;
64 struct recv_thread_args
{
65 struct work_struct work
;
66 struct nbd_device
*nbd
;
70 struct link_dead_args
{
71 struct work_struct work
;
75 #define NBD_RT_TIMEDOUT 0
76 #define NBD_RT_DISCONNECT_REQUESTED 1
77 #define NBD_RT_DISCONNECTED 2
78 #define NBD_RT_HAS_PID_FILE 3
79 #define NBD_RT_HAS_CONFIG_REF 4
80 #define NBD_RT_BOUND 5
81 #define NBD_RT_DESTROY_ON_DISCONNECT 6
82 #define NBD_RT_DISCONNECT_ON_CLOSE 7
84 #define NBD_DESTROY_ON_DISCONNECT 0
85 #define NBD_DISCONNECT_REQUESTED 1
89 unsigned long runtime_flags
;
90 u64 dead_conn_timeout
;
92 struct nbd_sock
**socks
;
94 atomic_t live_connections
;
95 wait_queue_head_t conn_wait
;
97 atomic_t recv_threads
;
98 wait_queue_head_t recv_wq
;
101 #if IS_ENABLED(CONFIG_DEBUG_FS)
102 struct dentry
*dbg_dir
;
107 struct blk_mq_tag_set tag_set
;
110 refcount_t config_refs
;
112 struct nbd_config
*config
;
113 struct mutex config_lock
;
114 struct gendisk
*disk
;
115 struct workqueue_struct
*recv_workq
;
117 struct list_head list
;
118 struct task_struct
*task_recv
;
119 struct task_struct
*task_setup
;
121 struct completion
*destroy_complete
;
125 #define NBD_CMD_REQUEUED 1
128 struct nbd_device
*nbd
;
138 #if IS_ENABLED(CONFIG_DEBUG_FS)
139 static struct dentry
*nbd_dbg_dir
;
142 #define nbd_name(nbd) ((nbd)->disk->disk_name)
144 #define NBD_MAGIC 0x68797548
146 #define NBD_DEF_BLKSIZE 1024
148 static unsigned int nbds_max
= 16;
149 static int max_part
= 16;
150 static int part_shift
;
152 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
153 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
154 static void nbd_config_put(struct nbd_device
*nbd
);
155 static void nbd_connect_reply(struct genl_info
*info
, int index
);
156 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
);
157 static void nbd_dead_link_work(struct work_struct
*work
);
158 static void nbd_disconnect_and_put(struct nbd_device
*nbd
);
160 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
162 return disk_to_dev(nbd
->disk
);
165 static void nbd_requeue_cmd(struct nbd_cmd
*cmd
)
167 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
169 if (!test_and_set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
))
170 blk_mq_requeue_request(req
, true);
173 #define NBD_COOKIE_BITS 32
175 static u64
nbd_cmd_handle(struct nbd_cmd
*cmd
)
177 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
178 u32 tag
= blk_mq_unique_tag(req
);
179 u64 cookie
= cmd
->cmd_cookie
;
181 return (cookie
<< NBD_COOKIE_BITS
) | tag
;
184 static u32
nbd_handle_to_tag(u64 handle
)
189 static u32
nbd_handle_to_cookie(u64 handle
)
191 return (u32
)(handle
>> NBD_COOKIE_BITS
);
194 static const char *nbdcmd_to_ascii(int cmd
)
197 case NBD_CMD_READ
: return "read";
198 case NBD_CMD_WRITE
: return "write";
199 case NBD_CMD_DISC
: return "disconnect";
200 case NBD_CMD_FLUSH
: return "flush";
201 case NBD_CMD_TRIM
: return "trim/discard";
206 static ssize_t
pid_show(struct device
*dev
,
207 struct device_attribute
*attr
, char *buf
)
209 struct gendisk
*disk
= dev_to_disk(dev
);
210 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
212 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
215 static const struct device_attribute pid_attr
= {
216 .attr
= { .name
= "pid", .mode
= 0444},
220 static void nbd_dev_remove(struct nbd_device
*nbd
)
222 struct gendisk
*disk
= nbd
->disk
;
223 struct request_queue
*q
;
228 blk_cleanup_queue(q
);
229 blk_mq_free_tag_set(&nbd
->tag_set
);
230 disk
->private_data
= NULL
;
235 * Place this in the last just before the nbd is freed to
236 * make sure that the disk and the related kobject are also
237 * totally removed to avoid duplicate creation of the same
240 if (test_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
) && nbd
->destroy_complete
)
241 complete(nbd
->destroy_complete
);
246 static void nbd_put(struct nbd_device
*nbd
)
248 if (refcount_dec_and_mutex_lock(&nbd
->refs
,
250 idr_remove(&nbd_index_idr
, nbd
->index
);
252 mutex_unlock(&nbd_index_mutex
);
256 static int nbd_disconnected(struct nbd_config
*config
)
258 return test_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
) ||
259 test_bit(NBD_RT_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
262 static void nbd_mark_nsock_dead(struct nbd_device
*nbd
, struct nbd_sock
*nsock
,
265 if (!nsock
->dead
&& notify
&& !nbd_disconnected(nbd
->config
)) {
266 struct link_dead_args
*args
;
267 args
= kmalloc(sizeof(struct link_dead_args
), GFP_NOIO
);
269 INIT_WORK(&args
->work
, nbd_dead_link_work
);
270 args
->index
= nbd
->index
;
271 queue_work(system_wq
, &args
->work
);
275 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
276 if (atomic_dec_return(&nbd
->config
->live_connections
) == 0) {
277 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED
,
278 &nbd
->config
->runtime_flags
)) {
279 set_bit(NBD_RT_DISCONNECTED
,
280 &nbd
->config
->runtime_flags
);
281 dev_info(nbd_to_dev(nbd
),
282 "Disconnected due to user request.\n");
287 nsock
->pending
= NULL
;
291 static void nbd_size_clear(struct nbd_device
*nbd
)
293 if (nbd
->config
->bytesize
) {
294 set_capacity(nbd
->disk
, 0);
295 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
299 static void nbd_size_update(struct nbd_device
*nbd
, bool start
)
301 struct nbd_config
*config
= nbd
->config
;
302 struct block_device
*bdev
= bdget_disk(nbd
->disk
, 0);
303 sector_t nr_sectors
= config
->bytesize
>> 9;
305 if (config
->flags
& NBD_FLAG_SEND_TRIM
) {
306 nbd
->disk
->queue
->limits
.discard_granularity
= config
->blksize
;
307 nbd
->disk
->queue
->limits
.discard_alignment
= config
->blksize
;
308 blk_queue_max_discard_sectors(nbd
->disk
->queue
, UINT_MAX
);
310 blk_queue_logical_block_size(nbd
->disk
->queue
, config
->blksize
);
311 blk_queue_physical_block_size(nbd
->disk
->queue
, config
->blksize
);
312 set_capacity(nbd
->disk
, nr_sectors
);
315 bd_set_nr_sectors(bdev
, nr_sectors
);
317 set_blocksize(bdev
, config
->blksize
);
319 set_bit(GD_NEED_PART_SCAN
, &nbd
->disk
->state
);
322 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
325 static void nbd_size_set(struct nbd_device
*nbd
, loff_t blocksize
,
328 struct nbd_config
*config
= nbd
->config
;
329 config
->blksize
= blocksize
;
330 config
->bytesize
= blocksize
* nr_blocks
;
331 if (nbd
->task_recv
!= NULL
)
332 nbd_size_update(nbd
, false);
335 static void nbd_complete_rq(struct request
*req
)
337 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
339 dev_dbg(nbd_to_dev(cmd
->nbd
), "request %p: %s\n", req
,
340 cmd
->status
? "failed" : "done");
342 blk_mq_end_request(req
, cmd
->status
);
346 * Forcibly shutdown the socket causing all listeners to error
348 static void sock_shutdown(struct nbd_device
*nbd
)
350 struct nbd_config
*config
= nbd
->config
;
353 if (config
->num_connections
== 0)
355 if (test_and_set_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
))
358 for (i
= 0; i
< config
->num_connections
; i
++) {
359 struct nbd_sock
*nsock
= config
->socks
[i
];
360 mutex_lock(&nsock
->tx_lock
);
361 nbd_mark_nsock_dead(nbd
, nsock
, 0);
362 mutex_unlock(&nsock
->tx_lock
);
364 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
367 static u32
req_to_nbd_cmd_type(struct request
*req
)
369 switch (req_op(req
)) {
373 return NBD_CMD_FLUSH
;
375 return NBD_CMD_WRITE
;
383 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
386 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
387 struct nbd_device
*nbd
= cmd
->nbd
;
388 struct nbd_config
*config
;
390 if (!mutex_trylock(&cmd
->lock
))
391 return BLK_EH_RESET_TIMER
;
393 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
394 cmd
->status
= BLK_STS_TIMEOUT
;
395 mutex_unlock(&cmd
->lock
);
398 config
= nbd
->config
;
400 if (config
->num_connections
> 1 ||
401 (config
->num_connections
== 1 && nbd
->tag_set
.timeout
)) {
402 dev_err_ratelimited(nbd_to_dev(nbd
),
403 "Connection timed out, retrying (%d/%d alive)\n",
404 atomic_read(&config
->live_connections
),
405 config
->num_connections
);
407 * Hooray we have more connections, requeue this IO, the submit
408 * path will put it on a real connection. Or if only one
409 * connection is configured, the submit path will wait util
410 * a new connection is reconfigured or util dead timeout.
413 if (cmd
->index
< config
->num_connections
) {
414 struct nbd_sock
*nsock
=
415 config
->socks
[cmd
->index
];
416 mutex_lock(&nsock
->tx_lock
);
417 /* We can have multiple outstanding requests, so
418 * we don't want to mark the nsock dead if we've
419 * already reconnected with a new socket, so
420 * only mark it dead if its the same socket we
423 if (cmd
->cookie
== nsock
->cookie
)
424 nbd_mark_nsock_dead(nbd
, nsock
, 1);
425 mutex_unlock(&nsock
->tx_lock
);
427 mutex_unlock(&cmd
->lock
);
428 nbd_requeue_cmd(cmd
);
434 if (!nbd
->tag_set
.timeout
) {
436 * Userspace sets timeout=0 to disable socket disconnection,
437 * so just warn and reset the timer.
439 struct nbd_sock
*nsock
= config
->socks
[cmd
->index
];
441 dev_info(nbd_to_dev(nbd
), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
442 req
, nbdcmd_to_ascii(req_to_nbd_cmd_type(req
)),
443 (unsigned long long)blk_rq_pos(req
) << 9,
444 blk_rq_bytes(req
), (req
->timeout
/ HZ
) * cmd
->retries
);
446 mutex_lock(&nsock
->tx_lock
);
447 if (cmd
->cookie
!= nsock
->cookie
) {
448 nbd_requeue_cmd(cmd
);
449 mutex_unlock(&nsock
->tx_lock
);
450 mutex_unlock(&cmd
->lock
);
454 mutex_unlock(&nsock
->tx_lock
);
455 mutex_unlock(&cmd
->lock
);
457 return BLK_EH_RESET_TIMER
;
460 dev_err_ratelimited(nbd_to_dev(nbd
), "Connection timed out\n");
461 set_bit(NBD_RT_TIMEDOUT
, &config
->runtime_flags
);
462 cmd
->status
= BLK_STS_IOERR
;
463 mutex_unlock(&cmd
->lock
);
467 blk_mq_complete_request(req
);
472 * Send or receive packet.
474 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
,
475 struct iov_iter
*iter
, int msg_flags
, int *sent
)
477 struct nbd_config
*config
= nbd
->config
;
478 struct socket
*sock
= config
->socks
[index
]->sock
;
481 unsigned int noreclaim_flag
;
483 if (unlikely(!sock
)) {
484 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
485 "Attempted %s on closed socket in sock_xmit\n",
486 (send
? "send" : "recv"));
490 msg
.msg_iter
= *iter
;
492 noreclaim_flag
= memalloc_noreclaim_save();
494 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
497 msg
.msg_control
= NULL
;
498 msg
.msg_controllen
= 0;
499 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
502 result
= sock_sendmsg(sock
, &msg
);
504 result
= sock_recvmsg(sock
, &msg
, msg
.msg_flags
);
508 result
= -EPIPE
; /* short read */
513 } while (msg_data_left(&msg
));
515 memalloc_noreclaim_restore(noreclaim_flag
);
521 * Different settings for sk->sk_sndtimeo can result in different return values
522 * if there is a signal pending when we enter sendmsg, because reasons?
524 static inline int was_interrupted(int result
)
526 return result
== -ERESTARTSYS
|| result
== -EINTR
;
529 /* always call with the tx_lock held */
530 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
532 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
533 struct nbd_config
*config
= nbd
->config
;
534 struct nbd_sock
*nsock
= config
->socks
[index
];
536 struct nbd_request request
= {.magic
= htonl(NBD_REQUEST_MAGIC
)};
537 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
538 struct iov_iter from
;
539 unsigned long size
= blk_rq_bytes(req
);
543 u32 nbd_cmd_flags
= 0;
544 int sent
= nsock
->sent
, skip
= 0;
546 iov_iter_kvec(&from
, WRITE
, &iov
, 1, sizeof(request
));
548 type
= req_to_nbd_cmd_type(req
);
552 if (rq_data_dir(req
) == WRITE
&&
553 (config
->flags
& NBD_FLAG_READ_ONLY
)) {
554 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
555 "Write on read-only\n");
559 if (req
->cmd_flags
& REQ_FUA
)
560 nbd_cmd_flags
|= NBD_CMD_FLAG_FUA
;
562 /* We did a partial send previously, and we at least sent the whole
563 * request struct, so just go and send the rest of the pages in the
567 if (sent
>= sizeof(request
)) {
568 skip
= sent
- sizeof(request
);
570 /* initialize handle for tracing purposes */
571 handle
= nbd_cmd_handle(cmd
);
575 iov_iter_advance(&from
, sent
);
580 cmd
->cookie
= nsock
->cookie
;
582 request
.type
= htonl(type
| nbd_cmd_flags
);
583 if (type
!= NBD_CMD_FLUSH
) {
584 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
585 request
.len
= htonl(size
);
587 handle
= nbd_cmd_handle(cmd
);
588 memcpy(request
.handle
, &handle
, sizeof(handle
));
590 trace_nbd_send_request(&request
, nbd
->index
, blk_mq_rq_from_pdu(cmd
));
592 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
593 req
, nbdcmd_to_ascii(type
),
594 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
595 result
= sock_xmit(nbd
, index
, 1, &from
,
596 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0, &sent
);
597 trace_nbd_header_sent(req
, handle
);
599 if (was_interrupted(result
)) {
600 /* If we havne't sent anything we can just return BUSY,
601 * however if we have sent something we need to make
602 * sure we only allow this req to be sent until we are
606 nsock
->pending
= req
;
609 set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
610 return BLK_STS_RESOURCE
;
612 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
613 "Send control failed (result %d)\n", result
);
617 if (type
!= NBD_CMD_WRITE
)
622 struct bio
*next
= bio
->bi_next
;
623 struct bvec_iter iter
;
626 bio_for_each_segment(bvec
, bio
, iter
) {
627 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
628 int flags
= is_last
? 0 : MSG_MORE
;
630 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
632 iov_iter_bvec(&from
, WRITE
, &bvec
, 1, bvec
.bv_len
);
634 if (skip
>= iov_iter_count(&from
)) {
635 skip
-= iov_iter_count(&from
);
638 iov_iter_advance(&from
, skip
);
641 result
= sock_xmit(nbd
, index
, 1, &from
, flags
, &sent
);
643 if (was_interrupted(result
)) {
644 /* We've already sent the header, we
645 * have no choice but to set pending and
648 nsock
->pending
= req
;
650 set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
651 return BLK_STS_RESOURCE
;
653 dev_err(disk_to_dev(nbd
->disk
),
654 "Send data failed (result %d)\n",
659 * The completion might already have come in,
660 * so break for the last one instead of letting
661 * the iterator do it. This prevents use-after-free
670 trace_nbd_payload_sent(req
, handle
);
671 nsock
->pending
= NULL
;
676 /* NULL returned = something went wrong, inform userspace */
677 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
679 struct nbd_config
*config
= nbd
->config
;
681 struct nbd_reply reply
;
683 struct request
*req
= NULL
;
687 struct kvec iov
= {.iov_base
= &reply
, .iov_len
= sizeof(reply
)};
692 iov_iter_kvec(&to
, READ
, &iov
, 1, sizeof(reply
));
693 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
695 if (!nbd_disconnected(config
))
696 dev_err(disk_to_dev(nbd
->disk
),
697 "Receive control failed (result %d)\n", result
);
698 return ERR_PTR(result
);
701 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
702 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
703 (unsigned long)ntohl(reply
.magic
));
704 return ERR_PTR(-EPROTO
);
707 memcpy(&handle
, reply
.handle
, sizeof(handle
));
708 tag
= nbd_handle_to_tag(handle
);
709 hwq
= blk_mq_unique_tag_to_hwq(tag
);
710 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
711 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
712 blk_mq_unique_tag_to_tag(tag
));
713 if (!req
|| !blk_mq_request_started(req
)) {
714 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
716 return ERR_PTR(-ENOENT
);
718 trace_nbd_header_received(req
, handle
);
719 cmd
= blk_mq_rq_to_pdu(req
);
721 mutex_lock(&cmd
->lock
);
722 if (cmd
->cmd_cookie
!= nbd_handle_to_cookie(handle
)) {
723 dev_err(disk_to_dev(nbd
->disk
), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
724 req
, cmd
->cmd_cookie
, nbd_handle_to_cookie(handle
));
728 if (cmd
->status
!= BLK_STS_OK
) {
729 dev_err(disk_to_dev(nbd
->disk
), "Command already handled %p\n",
734 if (test_bit(NBD_CMD_REQUEUED
, &cmd
->flags
)) {
735 dev_err(disk_to_dev(nbd
->disk
), "Raced with timeout on req %p\n",
740 if (ntohl(reply
.error
)) {
741 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
743 cmd
->status
= BLK_STS_IOERR
;
747 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", req
);
748 if (rq_data_dir(req
) != WRITE
) {
749 struct req_iterator iter
;
752 rq_for_each_segment(bvec
, req
, iter
) {
753 iov_iter_bvec(&to
, READ
, &bvec
, 1, bvec
.bv_len
);
754 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
756 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
759 * If we've disconnected, we need to make sure we
760 * complete this request, otherwise error out
761 * and let the timeout stuff handle resubmitting
762 * this request onto another connection.
764 if (nbd_disconnected(config
)) {
765 cmd
->status
= BLK_STS_IOERR
;
771 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
776 trace_nbd_payload_received(req
, handle
);
777 mutex_unlock(&cmd
->lock
);
778 return ret
? ERR_PTR(ret
) : cmd
;
781 static void recv_work(struct work_struct
*work
)
783 struct recv_thread_args
*args
= container_of(work
,
784 struct recv_thread_args
,
786 struct nbd_device
*nbd
= args
->nbd
;
787 struct nbd_config
*config
= nbd
->config
;
792 cmd
= nbd_read_stat(nbd
, args
->index
);
794 struct nbd_sock
*nsock
= config
->socks
[args
->index
];
796 mutex_lock(&nsock
->tx_lock
);
797 nbd_mark_nsock_dead(nbd
, nsock
, 1);
798 mutex_unlock(&nsock
->tx_lock
);
802 rq
= blk_mq_rq_from_pdu(cmd
);
803 if (likely(!blk_should_fake_timeout(rq
->q
)))
804 blk_mq_complete_request(rq
);
807 atomic_dec(&config
->recv_threads
);
808 wake_up(&config
->recv_wq
);
812 static bool nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
814 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
816 mutex_lock(&cmd
->lock
);
817 cmd
->status
= BLK_STS_IOERR
;
818 mutex_unlock(&cmd
->lock
);
820 blk_mq_complete_request(req
);
824 static void nbd_clear_que(struct nbd_device
*nbd
)
826 blk_mq_quiesce_queue(nbd
->disk
->queue
);
827 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
828 blk_mq_unquiesce_queue(nbd
->disk
->queue
);
829 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
832 static int find_fallback(struct nbd_device
*nbd
, int index
)
834 struct nbd_config
*config
= nbd
->config
;
836 struct nbd_sock
*nsock
= config
->socks
[index
];
837 int fallback
= nsock
->fallback_index
;
839 if (test_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
))
842 if (config
->num_connections
<= 1) {
843 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
844 "Dead connection, failed to find a fallback\n");
848 if (fallback
>= 0 && fallback
< config
->num_connections
&&
849 !config
->socks
[fallback
]->dead
)
852 if (nsock
->fallback_index
< 0 ||
853 nsock
->fallback_index
>= config
->num_connections
||
854 config
->socks
[nsock
->fallback_index
]->dead
) {
856 for (i
= 0; i
< config
->num_connections
; i
++) {
859 if (!config
->socks
[i
]->dead
) {
864 nsock
->fallback_index
= new_index
;
866 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
867 "Dead connection, failed to find a fallback\n");
871 new_index
= nsock
->fallback_index
;
875 static int wait_for_reconnect(struct nbd_device
*nbd
)
877 struct nbd_config
*config
= nbd
->config
;
878 if (!config
->dead_conn_timeout
)
880 if (test_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
))
882 return wait_event_timeout(config
->conn_wait
,
883 atomic_read(&config
->live_connections
) > 0,
884 config
->dead_conn_timeout
) > 0;
887 static int nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
889 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
890 struct nbd_device
*nbd
= cmd
->nbd
;
891 struct nbd_config
*config
;
892 struct nbd_sock
*nsock
;
895 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
896 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
897 "Socks array is empty\n");
898 blk_mq_start_request(req
);
901 config
= nbd
->config
;
903 if (index
>= config
->num_connections
) {
904 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
905 "Attempted send on invalid socket\n");
907 blk_mq_start_request(req
);
910 cmd
->status
= BLK_STS_OK
;
912 nsock
= config
->socks
[index
];
913 mutex_lock(&nsock
->tx_lock
);
915 int old_index
= index
;
916 index
= find_fallback(nbd
, index
);
917 mutex_unlock(&nsock
->tx_lock
);
919 if (wait_for_reconnect(nbd
)) {
923 /* All the sockets should already be down at this point,
924 * we just want to make sure that DISCONNECTED is set so
925 * any requests that come in that were queue'ed waiting
926 * for the reconnect timer don't trigger the timer again
927 * and instead just error out.
931 blk_mq_start_request(req
);
937 /* Handle the case that we have a pending request that was partially
938 * transmitted that _has_ to be serviced first. We need to call requeue
939 * here so that it gets put _after_ the request that is already on the
942 blk_mq_start_request(req
);
943 if (unlikely(nsock
->pending
&& nsock
->pending
!= req
)) {
944 nbd_requeue_cmd(cmd
);
949 * Some failures are related to the link going down, so anything that
950 * returns EAGAIN can be retried on a different socket.
952 ret
= nbd_send_cmd(nbd
, cmd
, index
);
953 if (ret
== -EAGAIN
) {
954 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
955 "Request send failed, requeueing\n");
956 nbd_mark_nsock_dead(nbd
, nsock
, 1);
957 nbd_requeue_cmd(cmd
);
961 mutex_unlock(&nsock
->tx_lock
);
966 static blk_status_t
nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
967 const struct blk_mq_queue_data
*bd
)
969 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
973 * Since we look at the bio's to send the request over the network we
974 * need to make sure the completion work doesn't mark this request done
975 * before we are done doing our send. This keeps us from dereferencing
976 * freed data if we have particularly fast completions (ie we get the
977 * completion before we exit sock_xmit on the last bvec) or in the case
978 * that the server is misbehaving (or there was an error) before we're
979 * done sending everything over the wire.
981 mutex_lock(&cmd
->lock
);
982 clear_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
984 /* We can be called directly from the user space process, which means we
985 * could possibly have signals pending so our sendmsg will fail. In
986 * this case we need to return that we are busy, otherwise error out as
989 ret
= nbd_handle_cmd(cmd
, hctx
->queue_num
);
994 mutex_unlock(&cmd
->lock
);
999 static struct socket
*nbd_get_socket(struct nbd_device
*nbd
, unsigned long fd
,
1002 struct socket
*sock
;
1005 sock
= sockfd_lookup(fd
, err
);
1009 if (sock
->ops
->shutdown
== sock_no_shutdown
) {
1010 dev_err(disk_to_dev(nbd
->disk
), "Unsupported socket: shutdown callout must be supported.\n");
1019 static int nbd_add_socket(struct nbd_device
*nbd
, unsigned long arg
,
1022 struct nbd_config
*config
= nbd
->config
;
1023 struct socket
*sock
;
1024 struct nbd_sock
**socks
;
1025 struct nbd_sock
*nsock
;
1028 sock
= nbd_get_socket(nbd
, arg
, &err
);
1032 if (!netlink
&& !nbd
->task_setup
&&
1033 !test_bit(NBD_RT_BOUND
, &config
->runtime_flags
))
1034 nbd
->task_setup
= current
;
1037 (nbd
->task_setup
!= current
||
1038 test_bit(NBD_RT_BOUND
, &config
->runtime_flags
))) {
1039 dev_err(disk_to_dev(nbd
->disk
),
1040 "Device being setup by another task");
1045 nsock
= kzalloc(sizeof(*nsock
), GFP_KERNEL
);
1051 socks
= krealloc(config
->socks
, (config
->num_connections
+ 1) *
1052 sizeof(struct nbd_sock
*), GFP_KERNEL
);
1059 config
->socks
= socks
;
1061 nsock
->fallback_index
= -1;
1062 nsock
->dead
= false;
1063 mutex_init(&nsock
->tx_lock
);
1065 nsock
->pending
= NULL
;
1068 socks
[config
->num_connections
++] = nsock
;
1069 atomic_inc(&config
->live_connections
);
1078 static int nbd_reconnect_socket(struct nbd_device
*nbd
, unsigned long arg
)
1080 struct nbd_config
*config
= nbd
->config
;
1081 struct socket
*sock
, *old
;
1082 struct recv_thread_args
*args
;
1086 sock
= nbd_get_socket(nbd
, arg
, &err
);
1090 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1096 for (i
= 0; i
< config
->num_connections
; i
++) {
1097 struct nbd_sock
*nsock
= config
->socks
[i
];
1102 mutex_lock(&nsock
->tx_lock
);
1104 mutex_unlock(&nsock
->tx_lock
);
1107 sk_set_memalloc(sock
->sk
);
1108 if (nbd
->tag_set
.timeout
)
1109 sock
->sk
->sk_sndtimeo
= nbd
->tag_set
.timeout
;
1110 atomic_inc(&config
->recv_threads
);
1111 refcount_inc(&nbd
->config_refs
);
1113 nsock
->fallback_index
= -1;
1115 nsock
->dead
= false;
1116 INIT_WORK(&args
->work
, recv_work
);
1120 mutex_unlock(&nsock
->tx_lock
);
1123 clear_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
);
1125 /* We take the tx_mutex in an error path in the recv_work, so we
1126 * need to queue_work outside of the tx_mutex.
1128 queue_work(nbd
->recv_workq
, &args
->work
);
1130 atomic_inc(&config
->live_connections
);
1131 wake_up(&config
->conn_wait
);
1139 static void nbd_bdev_reset(struct block_device
*bdev
)
1141 if (bdev
->bd_openers
> 1)
1143 bd_set_nr_sectors(bdev
, 0);
1146 static void nbd_parse_flags(struct nbd_device
*nbd
)
1148 struct nbd_config
*config
= nbd
->config
;
1149 if (config
->flags
& NBD_FLAG_READ_ONLY
)
1150 set_disk_ro(nbd
->disk
, true);
1152 set_disk_ro(nbd
->disk
, false);
1153 if (config
->flags
& NBD_FLAG_SEND_TRIM
)
1154 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
1155 if (config
->flags
& NBD_FLAG_SEND_FLUSH
) {
1156 if (config
->flags
& NBD_FLAG_SEND_FUA
)
1157 blk_queue_write_cache(nbd
->disk
->queue
, true, true);
1159 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
1162 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
1165 static void send_disconnects(struct nbd_device
*nbd
)
1167 struct nbd_config
*config
= nbd
->config
;
1168 struct nbd_request request
= {
1169 .magic
= htonl(NBD_REQUEST_MAGIC
),
1170 .type
= htonl(NBD_CMD_DISC
),
1172 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
1173 struct iov_iter from
;
1176 for (i
= 0; i
< config
->num_connections
; i
++) {
1177 struct nbd_sock
*nsock
= config
->socks
[i
];
1179 iov_iter_kvec(&from
, WRITE
, &iov
, 1, sizeof(request
));
1180 mutex_lock(&nsock
->tx_lock
);
1181 ret
= sock_xmit(nbd
, i
, 1, &from
, 0, NULL
);
1183 dev_err(disk_to_dev(nbd
->disk
),
1184 "Send disconnect failed %d\n", ret
);
1185 mutex_unlock(&nsock
->tx_lock
);
1189 static int nbd_disconnect(struct nbd_device
*nbd
)
1191 struct nbd_config
*config
= nbd
->config
;
1193 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
1194 set_bit(NBD_RT_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
1195 set_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->flags
);
1196 send_disconnects(nbd
);
1200 static void nbd_clear_sock(struct nbd_device
*nbd
)
1204 nbd
->task_setup
= NULL
;
1207 static void nbd_config_put(struct nbd_device
*nbd
)
1209 if (refcount_dec_and_mutex_lock(&nbd
->config_refs
,
1210 &nbd
->config_lock
)) {
1211 struct nbd_config
*config
= nbd
->config
;
1212 nbd_dev_dbg_close(nbd
);
1213 nbd_size_clear(nbd
);
1214 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE
,
1215 &config
->runtime_flags
))
1216 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1217 nbd
->task_recv
= NULL
;
1218 nbd_clear_sock(nbd
);
1219 if (config
->num_connections
) {
1221 for (i
= 0; i
< config
->num_connections
; i
++) {
1222 sockfd_put(config
->socks
[i
]->sock
);
1223 kfree(config
->socks
[i
]);
1225 kfree(config
->socks
);
1230 if (nbd
->recv_workq
)
1231 destroy_workqueue(nbd
->recv_workq
);
1232 nbd
->recv_workq
= NULL
;
1234 nbd
->tag_set
.timeout
= 0;
1235 nbd
->disk
->queue
->limits
.discard_granularity
= 0;
1236 nbd
->disk
->queue
->limits
.discard_alignment
= 0;
1237 blk_queue_max_discard_sectors(nbd
->disk
->queue
, UINT_MAX
);
1238 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
1240 mutex_unlock(&nbd
->config_lock
);
1242 module_put(THIS_MODULE
);
1246 static int nbd_start_device(struct nbd_device
*nbd
)
1248 struct nbd_config
*config
= nbd
->config
;
1249 int num_connections
= config
->num_connections
;
1256 if (num_connections
> 1 &&
1257 !(config
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
1258 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
1262 nbd
->recv_workq
= alloc_workqueue("knbd%d-recv",
1263 WQ_MEM_RECLAIM
| WQ_HIGHPRI
|
1264 WQ_UNBOUND
, 0, nbd
->index
);
1265 if (!nbd
->recv_workq
) {
1266 dev_err(disk_to_dev(nbd
->disk
), "Could not allocate knbd recv work queue.\n");
1270 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, config
->num_connections
);
1271 nbd
->task_recv
= current
;
1273 nbd_parse_flags(nbd
);
1275 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1277 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
1280 set_bit(NBD_RT_HAS_PID_FILE
, &config
->runtime_flags
);
1282 nbd_dev_dbg_init(nbd
);
1283 for (i
= 0; i
< num_connections
; i
++) {
1284 struct recv_thread_args
*args
;
1286 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1290 * If num_connections is m (2 < m),
1291 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1292 * But NO.(n + 1) failed. We still have n recv threads.
1293 * So, add flush_workqueue here to prevent recv threads
1294 * dropping the last config_refs and trying to destroy
1295 * the workqueue from inside the workqueue.
1298 flush_workqueue(nbd
->recv_workq
);
1301 sk_set_memalloc(config
->socks
[i
]->sock
->sk
);
1302 if (nbd
->tag_set
.timeout
)
1303 config
->socks
[i
]->sock
->sk
->sk_sndtimeo
=
1304 nbd
->tag_set
.timeout
;
1305 atomic_inc(&config
->recv_threads
);
1306 refcount_inc(&nbd
->config_refs
);
1307 INIT_WORK(&args
->work
, recv_work
);
1310 queue_work(nbd
->recv_workq
, &args
->work
);
1312 nbd_size_update(nbd
, true);
1316 static int nbd_start_device_ioctl(struct nbd_device
*nbd
, struct block_device
*bdev
)
1318 struct nbd_config
*config
= nbd
->config
;
1321 ret
= nbd_start_device(nbd
);
1326 set_bit(GD_NEED_PART_SCAN
, &nbd
->disk
->state
);
1327 mutex_unlock(&nbd
->config_lock
);
1328 ret
= wait_event_interruptible(config
->recv_wq
,
1329 atomic_read(&config
->recv_threads
) == 0);
1332 flush_workqueue(nbd
->recv_workq
);
1334 mutex_lock(&nbd
->config_lock
);
1335 nbd_bdev_reset(bdev
);
1336 /* user requested, ignore socket errors */
1337 if (test_bit(NBD_RT_DISCONNECT_REQUESTED
, &config
->runtime_flags
))
1339 if (test_bit(NBD_RT_TIMEDOUT
, &config
->runtime_flags
))
1344 static void nbd_clear_sock_ioctl(struct nbd_device
*nbd
,
1345 struct block_device
*bdev
)
1348 __invalidate_device(bdev
, true);
1349 nbd_bdev_reset(bdev
);
1350 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF
,
1351 &nbd
->config
->runtime_flags
))
1352 nbd_config_put(nbd
);
1355 static bool nbd_is_valid_blksize(unsigned long blksize
)
1357 if (!blksize
|| !is_power_of_2(blksize
) || blksize
< 512 ||
1358 blksize
> PAGE_SIZE
)
1363 static void nbd_set_cmd_timeout(struct nbd_device
*nbd
, u64 timeout
)
1365 nbd
->tag_set
.timeout
= timeout
* HZ
;
1367 blk_queue_rq_timeout(nbd
->disk
->queue
, timeout
* HZ
);
1369 blk_queue_rq_timeout(nbd
->disk
->queue
, 30 * HZ
);
1372 /* Must be called with config_lock held */
1373 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
1374 unsigned int cmd
, unsigned long arg
)
1376 struct nbd_config
*config
= nbd
->config
;
1379 case NBD_DISCONNECT
:
1380 return nbd_disconnect(nbd
);
1381 case NBD_CLEAR_SOCK
:
1382 nbd_clear_sock_ioctl(nbd
, bdev
);
1385 return nbd_add_socket(nbd
, arg
, false);
1386 case NBD_SET_BLKSIZE
:
1388 arg
= NBD_DEF_BLKSIZE
;
1389 if (!nbd_is_valid_blksize(arg
))
1391 nbd_size_set(nbd
, arg
,
1392 div_s64(config
->bytesize
, arg
));
1395 nbd_size_set(nbd
, config
->blksize
,
1396 div_s64(arg
, config
->blksize
));
1398 case NBD_SET_SIZE_BLOCKS
:
1399 nbd_size_set(nbd
, config
->blksize
, arg
);
1401 case NBD_SET_TIMEOUT
:
1402 nbd_set_cmd_timeout(nbd
, arg
);
1406 config
->flags
= arg
;
1409 return nbd_start_device_ioctl(nbd
, bdev
);
1412 * This is for compatibility only. The queue is always cleared
1413 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1416 case NBD_PRINT_DEBUG
:
1418 * For compatibility only, we no longer keep a list of
1419 * outstanding requests.
1426 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
1427 unsigned int cmd
, unsigned long arg
)
1429 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
1430 struct nbd_config
*config
= nbd
->config
;
1431 int error
= -EINVAL
;
1433 if (!capable(CAP_SYS_ADMIN
))
1436 /* The block layer will pass back some non-nbd ioctls in case we have
1437 * special handling for them, but we don't so just return an error.
1439 if (_IOC_TYPE(cmd
) != 0xab)
1442 mutex_lock(&nbd
->config_lock
);
1444 /* Don't allow ioctl operations on a nbd device that was created with
1445 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1447 if (!test_bit(NBD_RT_BOUND
, &config
->runtime_flags
) ||
1448 (cmd
== NBD_DISCONNECT
|| cmd
== NBD_CLEAR_SOCK
))
1449 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
1451 dev_err(nbd_to_dev(nbd
), "Cannot use ioctl interface on a netlink controlled device.\n");
1452 mutex_unlock(&nbd
->config_lock
);
1456 static struct nbd_config
*nbd_alloc_config(void)
1458 struct nbd_config
*config
;
1460 config
= kzalloc(sizeof(struct nbd_config
), GFP_NOFS
);
1463 atomic_set(&config
->recv_threads
, 0);
1464 init_waitqueue_head(&config
->recv_wq
);
1465 init_waitqueue_head(&config
->conn_wait
);
1466 config
->blksize
= NBD_DEF_BLKSIZE
;
1467 atomic_set(&config
->live_connections
, 0);
1468 try_module_get(THIS_MODULE
);
1472 static int nbd_open(struct block_device
*bdev
, fmode_t mode
)
1474 struct nbd_device
*nbd
;
1477 mutex_lock(&nbd_index_mutex
);
1478 nbd
= bdev
->bd_disk
->private_data
;
1483 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1487 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1488 struct nbd_config
*config
;
1490 mutex_lock(&nbd
->config_lock
);
1491 if (refcount_inc_not_zero(&nbd
->config_refs
)) {
1492 mutex_unlock(&nbd
->config_lock
);
1495 config
= nbd
->config
= nbd_alloc_config();
1498 mutex_unlock(&nbd
->config_lock
);
1501 refcount_set(&nbd
->config_refs
, 1);
1502 refcount_inc(&nbd
->refs
);
1503 mutex_unlock(&nbd
->config_lock
);
1504 set_bit(GD_NEED_PART_SCAN
, &bdev
->bd_disk
->state
);
1505 } else if (nbd_disconnected(nbd
->config
)) {
1506 set_bit(GD_NEED_PART_SCAN
, &bdev
->bd_disk
->state
);
1509 mutex_unlock(&nbd_index_mutex
);
1513 static void nbd_release(struct gendisk
*disk
, fmode_t mode
)
1515 struct nbd_device
*nbd
= disk
->private_data
;
1516 struct block_device
*bdev
= bdget_disk(disk
, 0);
1518 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE
, &nbd
->config
->runtime_flags
) &&
1519 bdev
->bd_openers
== 0)
1520 nbd_disconnect_and_put(nbd
);
1522 nbd_config_put(nbd
);
1526 static const struct block_device_operations nbd_fops
=
1528 .owner
= THIS_MODULE
,
1530 .release
= nbd_release
,
1532 .compat_ioctl
= nbd_ioctl
,
1535 #if IS_ENABLED(CONFIG_DEBUG_FS)
1537 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
1539 struct nbd_device
*nbd
= s
->private;
1542 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
1547 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
1549 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
1552 static const struct file_operations nbd_dbg_tasks_ops
= {
1553 .open
= nbd_dbg_tasks_open
,
1555 .llseek
= seq_lseek
,
1556 .release
= single_release
,
1559 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
1561 struct nbd_device
*nbd
= s
->private;
1562 u32 flags
= nbd
->config
->flags
;
1564 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
1566 seq_puts(s
, "Known flags:\n");
1568 if (flags
& NBD_FLAG_HAS_FLAGS
)
1569 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
1570 if (flags
& NBD_FLAG_READ_ONLY
)
1571 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
1572 if (flags
& NBD_FLAG_SEND_FLUSH
)
1573 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
1574 if (flags
& NBD_FLAG_SEND_FUA
)
1575 seq_puts(s
, "NBD_FLAG_SEND_FUA\n");
1576 if (flags
& NBD_FLAG_SEND_TRIM
)
1577 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
1582 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
1584 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
1587 static const struct file_operations nbd_dbg_flags_ops
= {
1588 .open
= nbd_dbg_flags_open
,
1590 .llseek
= seq_lseek
,
1591 .release
= single_release
,
1594 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1597 struct nbd_config
*config
= nbd
->config
;
1602 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
1604 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
1608 config
->dbg_dir
= dir
;
1610 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
1611 debugfs_create_u64("size_bytes", 0444, dir
, &config
->bytesize
);
1612 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
1613 debugfs_create_u64("blocksize", 0444, dir
, &config
->blksize
);
1614 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
1619 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1621 debugfs_remove_recursive(nbd
->config
->dbg_dir
);
1624 static int nbd_dbg_init(void)
1626 struct dentry
*dbg_dir
;
1628 dbg_dir
= debugfs_create_dir("nbd", NULL
);
1632 nbd_dbg_dir
= dbg_dir
;
1637 static void nbd_dbg_close(void)
1639 debugfs_remove_recursive(nbd_dbg_dir
);
1642 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1644 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1649 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1653 static int nbd_dbg_init(void)
1658 static void nbd_dbg_close(void)
1664 static int nbd_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1665 unsigned int hctx_idx
, unsigned int numa_node
)
1667 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
1668 cmd
->nbd
= set
->driver_data
;
1670 mutex_init(&cmd
->lock
);
1674 static const struct blk_mq_ops nbd_mq_ops
= {
1675 .queue_rq
= nbd_queue_rq
,
1676 .complete
= nbd_complete_rq
,
1677 .init_request
= nbd_init_request
,
1678 .timeout
= nbd_xmit_timeout
,
1681 static int nbd_dev_add(int index
)
1683 struct nbd_device
*nbd
;
1684 struct gendisk
*disk
;
1685 struct request_queue
*q
;
1688 nbd
= kzalloc(sizeof(struct nbd_device
), GFP_KERNEL
);
1692 disk
= alloc_disk(1 << part_shift
);
1697 err
= idr_alloc(&nbd_index_idr
, nbd
, index
, index
+ 1,
1702 err
= idr_alloc(&nbd_index_idr
, nbd
, 0, 0, GFP_KERNEL
);
1711 nbd
->tag_set
.ops
= &nbd_mq_ops
;
1712 nbd
->tag_set
.nr_hw_queues
= 1;
1713 nbd
->tag_set
.queue_depth
= 128;
1714 nbd
->tag_set
.numa_node
= NUMA_NO_NODE
;
1715 nbd
->tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1716 nbd
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1718 nbd
->tag_set
.driver_data
= nbd
;
1719 nbd
->destroy_complete
= NULL
;
1721 err
= blk_mq_alloc_tag_set(&nbd
->tag_set
);
1725 q
= blk_mq_init_queue(&nbd
->tag_set
);
1733 * Tell the block layer that we are not a rotational device
1735 blk_queue_flag_set(QUEUE_FLAG_NONROT
, disk
->queue
);
1736 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1737 disk
->queue
->limits
.discard_granularity
= 0;
1738 disk
->queue
->limits
.discard_alignment
= 0;
1739 blk_queue_max_discard_sectors(disk
->queue
, 0);
1740 blk_queue_max_segment_size(disk
->queue
, UINT_MAX
);
1741 blk_queue_max_segments(disk
->queue
, USHRT_MAX
);
1742 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1743 disk
->queue
->limits
.max_sectors
= 256;
1745 mutex_init(&nbd
->config_lock
);
1746 refcount_set(&nbd
->config_refs
, 0);
1747 refcount_set(&nbd
->refs
, 1);
1748 INIT_LIST_HEAD(&nbd
->list
);
1749 disk
->major
= NBD_MAJOR
;
1750 disk
->first_minor
= index
<< part_shift
;
1751 disk
->fops
= &nbd_fops
;
1752 disk
->private_data
= nbd
;
1753 sprintf(disk
->disk_name
, "nbd%d", index
);
1755 nbd_total_devices
++;
1759 blk_mq_free_tag_set(&nbd
->tag_set
);
1761 idr_remove(&nbd_index_idr
, index
);
1770 static int find_free_cb(int id
, void *ptr
, void *data
)
1772 struct nbd_device
*nbd
= ptr
;
1773 struct nbd_device
**found
= data
;
1775 if (!refcount_read(&nbd
->config_refs
)) {
1782 /* Netlink interface. */
1783 static const struct nla_policy nbd_attr_policy
[NBD_ATTR_MAX
+ 1] = {
1784 [NBD_ATTR_INDEX
] = { .type
= NLA_U32
},
1785 [NBD_ATTR_SIZE_BYTES
] = { .type
= NLA_U64
},
1786 [NBD_ATTR_BLOCK_SIZE_BYTES
] = { .type
= NLA_U64
},
1787 [NBD_ATTR_TIMEOUT
] = { .type
= NLA_U64
},
1788 [NBD_ATTR_SERVER_FLAGS
] = { .type
= NLA_U64
},
1789 [NBD_ATTR_CLIENT_FLAGS
] = { .type
= NLA_U64
},
1790 [NBD_ATTR_SOCKETS
] = { .type
= NLA_NESTED
},
1791 [NBD_ATTR_DEAD_CONN_TIMEOUT
] = { .type
= NLA_U64
},
1792 [NBD_ATTR_DEVICE_LIST
] = { .type
= NLA_NESTED
},
1795 static const struct nla_policy nbd_sock_policy
[NBD_SOCK_MAX
+ 1] = {
1796 [NBD_SOCK_FD
] = { .type
= NLA_U32
},
1799 /* We don't use this right now since we don't parse the incoming list, but we
1800 * still want it here so userspace knows what to expect.
1802 static const struct nla_policy
__attribute__((unused
))
1803 nbd_device_policy
[NBD_DEVICE_ATTR_MAX
+ 1] = {
1804 [NBD_DEVICE_INDEX
] = { .type
= NLA_U32
},
1805 [NBD_DEVICE_CONNECTED
] = { .type
= NLA_U8
},
1808 static int nbd_genl_size_set(struct genl_info
*info
, struct nbd_device
*nbd
)
1810 struct nbd_config
*config
= nbd
->config
;
1811 u64 bsize
= config
->blksize
;
1812 u64 bytes
= config
->bytesize
;
1814 if (info
->attrs
[NBD_ATTR_SIZE_BYTES
])
1815 bytes
= nla_get_u64(info
->attrs
[NBD_ATTR_SIZE_BYTES
]);
1817 if (info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]) {
1818 bsize
= nla_get_u64(info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]);
1820 bsize
= NBD_DEF_BLKSIZE
;
1821 if (!nbd_is_valid_blksize(bsize
)) {
1822 printk(KERN_ERR
"Invalid block size %llu\n", bsize
);
1827 if (bytes
!= config
->bytesize
|| bsize
!= config
->blksize
)
1828 nbd_size_set(nbd
, bsize
, div64_u64(bytes
, bsize
));
1832 static int nbd_genl_connect(struct sk_buff
*skb
, struct genl_info
*info
)
1834 DECLARE_COMPLETION_ONSTACK(destroy_complete
);
1835 struct nbd_device
*nbd
= NULL
;
1836 struct nbd_config
*config
;
1839 bool put_dev
= false;
1841 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1844 if (info
->attrs
[NBD_ATTR_INDEX
])
1845 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1846 if (!info
->attrs
[NBD_ATTR_SOCKETS
]) {
1847 printk(KERN_ERR
"nbd: must specify at least one socket\n");
1850 if (!info
->attrs
[NBD_ATTR_SIZE_BYTES
]) {
1851 printk(KERN_ERR
"nbd: must specify a size in bytes for the device\n");
1855 mutex_lock(&nbd_index_mutex
);
1857 ret
= idr_for_each(&nbd_index_idr
, &find_free_cb
, &nbd
);
1860 new_index
= nbd_dev_add(-1);
1861 if (new_index
< 0) {
1862 mutex_unlock(&nbd_index_mutex
);
1863 printk(KERN_ERR
"nbd: failed to add new device\n");
1866 nbd
= idr_find(&nbd_index_idr
, new_index
);
1869 nbd
= idr_find(&nbd_index_idr
, index
);
1871 ret
= nbd_dev_add(index
);
1873 mutex_unlock(&nbd_index_mutex
);
1874 printk(KERN_ERR
"nbd: failed to add new device\n");
1877 nbd
= idr_find(&nbd_index_idr
, index
);
1881 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
1883 mutex_unlock(&nbd_index_mutex
);
1887 if (test_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
) &&
1888 test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->flags
)) {
1889 nbd
->destroy_complete
= &destroy_complete
;
1890 mutex_unlock(&nbd_index_mutex
);
1892 /* Wait untill the the nbd stuff is totally destroyed */
1893 wait_for_completion(&destroy_complete
);
1897 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1898 mutex_unlock(&nbd_index_mutex
);
1901 printk(KERN_ERR
"nbd: device at index %d is going down\n",
1905 mutex_unlock(&nbd_index_mutex
);
1907 mutex_lock(&nbd
->config_lock
);
1908 if (refcount_read(&nbd
->config_refs
)) {
1909 mutex_unlock(&nbd
->config_lock
);
1913 printk(KERN_ERR
"nbd: nbd%d already in use\n", index
);
1916 if (WARN_ON(nbd
->config
)) {
1917 mutex_unlock(&nbd
->config_lock
);
1921 config
= nbd
->config
= nbd_alloc_config();
1923 mutex_unlock(&nbd
->config_lock
);
1925 printk(KERN_ERR
"nbd: couldn't allocate config\n");
1928 refcount_set(&nbd
->config_refs
, 1);
1929 set_bit(NBD_RT_BOUND
, &config
->runtime_flags
);
1931 ret
= nbd_genl_size_set(info
, nbd
);
1935 if (info
->attrs
[NBD_ATTR_TIMEOUT
])
1936 nbd_set_cmd_timeout(nbd
,
1937 nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]));
1938 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
1939 config
->dead_conn_timeout
=
1940 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
1941 config
->dead_conn_timeout
*= HZ
;
1943 if (info
->attrs
[NBD_ATTR_SERVER_FLAGS
])
1945 nla_get_u64(info
->attrs
[NBD_ATTR_SERVER_FLAGS
]);
1946 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
1947 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
1948 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
1949 set_bit(NBD_RT_DESTROY_ON_DISCONNECT
,
1950 &config
->runtime_flags
);
1951 set_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
);
1954 clear_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
);
1956 if (flags
& NBD_CFLAG_DISCONNECT_ON_CLOSE
) {
1957 set_bit(NBD_RT_DISCONNECT_ON_CLOSE
,
1958 &config
->runtime_flags
);
1962 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
1963 struct nlattr
*attr
;
1966 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
1968 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
1970 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
1971 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
1975 ret
= nla_parse_nested_deprecated(socks
, NBD_SOCK_MAX
,
1980 printk(KERN_ERR
"nbd: error processing sock list\n");
1984 if (!socks
[NBD_SOCK_FD
])
1986 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
1987 ret
= nbd_add_socket(nbd
, fd
, true);
1992 ret
= nbd_start_device(nbd
);
1994 mutex_unlock(&nbd
->config_lock
);
1996 set_bit(NBD_RT_HAS_CONFIG_REF
, &config
->runtime_flags
);
1997 refcount_inc(&nbd
->config_refs
);
1998 nbd_connect_reply(info
, nbd
->index
);
2000 nbd_config_put(nbd
);
2006 static void nbd_disconnect_and_put(struct nbd_device
*nbd
)
2008 mutex_lock(&nbd
->config_lock
);
2009 nbd_disconnect(nbd
);
2010 nbd_clear_sock(nbd
);
2011 mutex_unlock(&nbd
->config_lock
);
2013 * Make sure recv thread has finished, so it does not drop the last
2014 * config ref and try to destroy the workqueue from inside the work
2017 flush_workqueue(nbd
->recv_workq
);
2018 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF
,
2019 &nbd
->config
->runtime_flags
))
2020 nbd_config_put(nbd
);
2023 static int nbd_genl_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
2025 struct nbd_device
*nbd
;
2028 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
2031 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
2032 printk(KERN_ERR
"nbd: must specify an index to disconnect\n");
2035 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
2036 mutex_lock(&nbd_index_mutex
);
2037 nbd
= idr_find(&nbd_index_idr
, index
);
2039 mutex_unlock(&nbd_index_mutex
);
2040 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
2044 if (!refcount_inc_not_zero(&nbd
->refs
)) {
2045 mutex_unlock(&nbd_index_mutex
);
2046 printk(KERN_ERR
"nbd: device at index %d is going down\n",
2050 mutex_unlock(&nbd_index_mutex
);
2051 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
2055 nbd_disconnect_and_put(nbd
);
2056 nbd_config_put(nbd
);
2061 static int nbd_genl_reconfigure(struct sk_buff
*skb
, struct genl_info
*info
)
2063 struct nbd_device
*nbd
= NULL
;
2064 struct nbd_config
*config
;
2067 bool put_dev
= false;
2069 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
2072 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
2073 printk(KERN_ERR
"nbd: must specify a device to reconfigure\n");
2076 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
2077 mutex_lock(&nbd_index_mutex
);
2078 nbd
= idr_find(&nbd_index_idr
, index
);
2080 mutex_unlock(&nbd_index_mutex
);
2081 printk(KERN_ERR
"nbd: couldn't find a device at index %d\n",
2085 if (!refcount_inc_not_zero(&nbd
->refs
)) {
2086 mutex_unlock(&nbd_index_mutex
);
2087 printk(KERN_ERR
"nbd: device at index %d is going down\n",
2091 mutex_unlock(&nbd_index_mutex
);
2093 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
2094 dev_err(nbd_to_dev(nbd
),
2095 "not configured, cannot reconfigure\n");
2100 mutex_lock(&nbd
->config_lock
);
2101 config
= nbd
->config
;
2102 if (!test_bit(NBD_RT_BOUND
, &config
->runtime_flags
) ||
2104 dev_err(nbd_to_dev(nbd
),
2105 "not configured, cannot reconfigure\n");
2110 ret
= nbd_genl_size_set(info
, nbd
);
2114 if (info
->attrs
[NBD_ATTR_TIMEOUT
])
2115 nbd_set_cmd_timeout(nbd
,
2116 nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]));
2117 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
2118 config
->dead_conn_timeout
=
2119 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
2120 config
->dead_conn_timeout
*= HZ
;
2122 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
2123 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
2124 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
2125 if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT
,
2126 &config
->runtime_flags
))
2128 set_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
);
2130 if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT
,
2131 &config
->runtime_flags
))
2132 refcount_inc(&nbd
->refs
);
2133 clear_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
);
2136 if (flags
& NBD_CFLAG_DISCONNECT_ON_CLOSE
) {
2137 set_bit(NBD_RT_DISCONNECT_ON_CLOSE
,
2138 &config
->runtime_flags
);
2140 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE
,
2141 &config
->runtime_flags
);
2145 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
2146 struct nlattr
*attr
;
2149 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
2151 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
2153 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
2154 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
2158 ret
= nla_parse_nested_deprecated(socks
, NBD_SOCK_MAX
,
2163 printk(KERN_ERR
"nbd: error processing sock list\n");
2167 if (!socks
[NBD_SOCK_FD
])
2169 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
2170 ret
= nbd_reconnect_socket(nbd
, fd
);
2176 dev_info(nbd_to_dev(nbd
), "reconnected socket\n");
2180 mutex_unlock(&nbd
->config_lock
);
2181 nbd_config_put(nbd
);
2188 static const struct genl_small_ops nbd_connect_genl_ops
[] = {
2190 .cmd
= NBD_CMD_CONNECT
,
2191 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2192 .doit
= nbd_genl_connect
,
2195 .cmd
= NBD_CMD_DISCONNECT
,
2196 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2197 .doit
= nbd_genl_disconnect
,
2200 .cmd
= NBD_CMD_RECONFIGURE
,
2201 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2202 .doit
= nbd_genl_reconfigure
,
2205 .cmd
= NBD_CMD_STATUS
,
2206 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2207 .doit
= nbd_genl_status
,
2211 static const struct genl_multicast_group nbd_mcast_grps
[] = {
2212 { .name
= NBD_GENL_MCAST_GROUP_NAME
, },
2215 static struct genl_family nbd_genl_family __ro_after_init
= {
2217 .name
= NBD_GENL_FAMILY_NAME
,
2218 .version
= NBD_GENL_VERSION
,
2219 .module
= THIS_MODULE
,
2220 .small_ops
= nbd_connect_genl_ops
,
2221 .n_small_ops
= ARRAY_SIZE(nbd_connect_genl_ops
),
2222 .maxattr
= NBD_ATTR_MAX
,
2223 .policy
= nbd_attr_policy
,
2224 .mcgrps
= nbd_mcast_grps
,
2225 .n_mcgrps
= ARRAY_SIZE(nbd_mcast_grps
),
2228 static int populate_nbd_status(struct nbd_device
*nbd
, struct sk_buff
*reply
)
2230 struct nlattr
*dev_opt
;
2234 /* This is a little racey, but for status it's ok. The
2235 * reason we don't take a ref here is because we can't
2236 * take a ref in the index == -1 case as we would need
2237 * to put under the nbd_index_mutex, which could
2238 * deadlock if we are configured to remove ourselves
2239 * once we're disconnected.
2241 if (refcount_read(&nbd
->config_refs
))
2243 dev_opt
= nla_nest_start_noflag(reply
, NBD_DEVICE_ITEM
);
2246 ret
= nla_put_u32(reply
, NBD_DEVICE_INDEX
, nbd
->index
);
2249 ret
= nla_put_u8(reply
, NBD_DEVICE_CONNECTED
,
2253 nla_nest_end(reply
, dev_opt
);
2257 static int status_cb(int id
, void *ptr
, void *data
)
2259 struct nbd_device
*nbd
= ptr
;
2260 return populate_nbd_status(nbd
, (struct sk_buff
*)data
);
2263 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
)
2265 struct nlattr
*dev_list
;
2266 struct sk_buff
*reply
;
2272 if (info
->attrs
[NBD_ATTR_INDEX
])
2273 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
2275 mutex_lock(&nbd_index_mutex
);
2277 msg_size
= nla_total_size(nla_attr_size(sizeof(u32
)) +
2278 nla_attr_size(sizeof(u8
)));
2279 msg_size
*= (index
== -1) ? nbd_total_devices
: 1;
2281 reply
= genlmsg_new(msg_size
, GFP_KERNEL
);
2284 reply_head
= genlmsg_put_reply(reply
, info
, &nbd_genl_family
, 0,
2291 dev_list
= nla_nest_start_noflag(reply
, NBD_ATTR_DEVICE_LIST
);
2293 ret
= idr_for_each(&nbd_index_idr
, &status_cb
, reply
);
2299 struct nbd_device
*nbd
;
2300 nbd
= idr_find(&nbd_index_idr
, index
);
2302 ret
= populate_nbd_status(nbd
, reply
);
2309 nla_nest_end(reply
, dev_list
);
2310 genlmsg_end(reply
, reply_head
);
2311 ret
= genlmsg_reply(reply
, info
);
2313 mutex_unlock(&nbd_index_mutex
);
2317 static void nbd_connect_reply(struct genl_info
*info
, int index
)
2319 struct sk_buff
*skb
;
2323 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2326 msg_head
= genlmsg_put_reply(skb
, info
, &nbd_genl_family
, 0,
2332 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2337 genlmsg_end(skb
, msg_head
);
2338 genlmsg_reply(skb
, info
);
2341 static void nbd_mcast_index(int index
)
2343 struct sk_buff
*skb
;
2347 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2350 msg_head
= genlmsg_put(skb
, 0, 0, &nbd_genl_family
, 0,
2356 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2361 genlmsg_end(skb
, msg_head
);
2362 genlmsg_multicast(&nbd_genl_family
, skb
, 0, 0, GFP_KERNEL
);
2365 static void nbd_dead_link_work(struct work_struct
*work
)
2367 struct link_dead_args
*args
= container_of(work
, struct link_dead_args
,
2369 nbd_mcast_index(args
->index
);
2373 static int __init
nbd_init(void)
2377 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
2380 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
2386 part_shift
= fls(max_part
);
2389 * Adjust max_part according to part_shift as it is exported
2390 * to user space so that user can know the max number of
2391 * partition kernel should be able to manage.
2393 * Note that -1 is required because partition 0 is reserved
2394 * for the whole disk.
2396 max_part
= (1UL << part_shift
) - 1;
2399 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
2402 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
2405 if (register_blkdev(NBD_MAJOR
, "nbd"))
2408 if (genl_register_family(&nbd_genl_family
)) {
2409 unregister_blkdev(NBD_MAJOR
, "nbd");
2414 mutex_lock(&nbd_index_mutex
);
2415 for (i
= 0; i
< nbds_max
; i
++)
2417 mutex_unlock(&nbd_index_mutex
);
2421 static int nbd_exit_cb(int id
, void *ptr
, void *data
)
2423 struct list_head
*list
= (struct list_head
*)data
;
2424 struct nbd_device
*nbd
= ptr
;
2426 list_add_tail(&nbd
->list
, list
);
2430 static void __exit
nbd_cleanup(void)
2432 struct nbd_device
*nbd
;
2433 LIST_HEAD(del_list
);
2437 mutex_lock(&nbd_index_mutex
);
2438 idr_for_each(&nbd_index_idr
, &nbd_exit_cb
, &del_list
);
2439 mutex_unlock(&nbd_index_mutex
);
2441 while (!list_empty(&del_list
)) {
2442 nbd
= list_first_entry(&del_list
, struct nbd_device
, list
);
2443 list_del_init(&nbd
->list
);
2444 if (refcount_read(&nbd
->refs
) != 1)
2445 printk(KERN_ERR
"nbd: possibly leaking a device\n");
2449 idr_destroy(&nbd_index_idr
);
2450 genl_unregister_family(&nbd_genl_family
);
2451 unregister_blkdev(NBD_MAJOR
, "nbd");
2454 module_init(nbd_init
);
2455 module_exit(nbd_cleanup
);
2457 MODULE_DESCRIPTION("Network Block Device");
2458 MODULE_LICENSE("GPL");
2460 module_param(nbds_max
, int, 0444);
2461 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
2462 module_param(max_part
, int, 0444);
2463 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 16)");