1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
11 * (part of code stolen from loop.c)
14 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/completion.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/nbd.h>
50 static DEFINE_IDR(nbd_index_idr
);
51 static DEFINE_MUTEX(nbd_index_mutex
);
52 static struct workqueue_struct
*nbd_del_wq
;
53 static int nbd_total_devices
= 0;
58 struct request
*pending
;
65 struct recv_thread_args
{
66 struct work_struct work
;
67 struct nbd_device
*nbd
;
71 struct link_dead_args
{
72 struct work_struct work
;
76 #define NBD_RT_TIMEDOUT 0
77 #define NBD_RT_DISCONNECT_REQUESTED 1
78 #define NBD_RT_DISCONNECTED 2
79 #define NBD_RT_HAS_PID_FILE 3
80 #define NBD_RT_HAS_CONFIG_REF 4
81 #define NBD_RT_BOUND 5
82 #define NBD_RT_DISCONNECT_ON_CLOSE 6
83 #define NBD_RT_HAS_BACKEND_FILE 7
85 #define NBD_DESTROY_ON_DISCONNECT 0
86 #define NBD_DISCONNECT_REQUESTED 1
90 unsigned long runtime_flags
;
91 u64 dead_conn_timeout
;
93 struct nbd_sock
**socks
;
95 atomic_t live_connections
;
96 wait_queue_head_t conn_wait
;
98 atomic_t recv_threads
;
99 wait_queue_head_t recv_wq
;
100 unsigned int blksize_bits
;
102 #if IS_ENABLED(CONFIG_DEBUG_FS)
103 struct dentry
*dbg_dir
;
107 static inline unsigned int nbd_blksize(struct nbd_config
*config
)
109 return 1u << config
->blksize_bits
;
113 struct blk_mq_tag_set tag_set
;
116 refcount_t config_refs
;
118 struct nbd_config
*config
;
119 struct mutex config_lock
;
120 struct gendisk
*disk
;
121 struct workqueue_struct
*recv_workq
;
122 struct work_struct remove_work
;
124 struct list_head list
;
125 struct task_struct
*task_setup
;
128 pid_t pid
; /* pid of nbd-client, if attached */
133 #define NBD_CMD_REQUEUED 1
136 struct nbd_device
*nbd
;
146 #if IS_ENABLED(CONFIG_DEBUG_FS)
147 static struct dentry
*nbd_dbg_dir
;
150 #define nbd_name(nbd) ((nbd)->disk->disk_name)
152 #define NBD_MAGIC 0x68797548
154 #define NBD_DEF_BLKSIZE_BITS 10
156 static unsigned int nbds_max
= 16;
157 static int max_part
= 16;
158 static int part_shift
;
160 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
161 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
162 static void nbd_config_put(struct nbd_device
*nbd
);
163 static void nbd_connect_reply(struct genl_info
*info
, int index
);
164 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
);
165 static void nbd_dead_link_work(struct work_struct
*work
);
166 static void nbd_disconnect_and_put(struct nbd_device
*nbd
);
168 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
170 return disk_to_dev(nbd
->disk
);
173 static void nbd_requeue_cmd(struct nbd_cmd
*cmd
)
175 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
177 if (!test_and_set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
))
178 blk_mq_requeue_request(req
, true);
181 #define NBD_COOKIE_BITS 32
183 static u64
nbd_cmd_handle(struct nbd_cmd
*cmd
)
185 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
186 u32 tag
= blk_mq_unique_tag(req
);
187 u64 cookie
= cmd
->cmd_cookie
;
189 return (cookie
<< NBD_COOKIE_BITS
) | tag
;
192 static u32
nbd_handle_to_tag(u64 handle
)
197 static u32
nbd_handle_to_cookie(u64 handle
)
199 return (u32
)(handle
>> NBD_COOKIE_BITS
);
202 static const char *nbdcmd_to_ascii(int cmd
)
205 case NBD_CMD_READ
: return "read";
206 case NBD_CMD_WRITE
: return "write";
207 case NBD_CMD_DISC
: return "disconnect";
208 case NBD_CMD_FLUSH
: return "flush";
209 case NBD_CMD_TRIM
: return "trim/discard";
214 static ssize_t
pid_show(struct device
*dev
,
215 struct device_attribute
*attr
, char *buf
)
217 struct gendisk
*disk
= dev_to_disk(dev
);
218 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
220 return sprintf(buf
, "%d\n", nbd
->pid
);
223 static const struct device_attribute pid_attr
= {
224 .attr
= { .name
= "pid", .mode
= 0444},
228 static ssize_t
backend_show(struct device
*dev
,
229 struct device_attribute
*attr
, char *buf
)
231 struct gendisk
*disk
= dev_to_disk(dev
);
232 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
234 return sprintf(buf
, "%s\n", nbd
->backend
?: "");
237 static const struct device_attribute backend_attr
= {
238 .attr
= { .name
= "backend", .mode
= 0444},
239 .show
= backend_show
,
242 static void nbd_dev_remove(struct nbd_device
*nbd
)
244 struct gendisk
*disk
= nbd
->disk
;
247 blk_cleanup_disk(disk
);
248 blk_mq_free_tag_set(&nbd
->tag_set
);
251 * Remove from idr after del_gendisk() completes, so if the same ID is
252 * reused, the following add_disk() will succeed.
254 mutex_lock(&nbd_index_mutex
);
255 idr_remove(&nbd_index_idr
, nbd
->index
);
256 mutex_unlock(&nbd_index_mutex
);
257 destroy_workqueue(nbd
->recv_workq
);
261 static void nbd_dev_remove_work(struct work_struct
*work
)
263 nbd_dev_remove(container_of(work
, struct nbd_device
, remove_work
));
266 static void nbd_put(struct nbd_device
*nbd
)
268 if (!refcount_dec_and_test(&nbd
->refs
))
271 /* Call del_gendisk() asynchrounously to prevent deadlock */
272 if (test_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
))
273 queue_work(nbd_del_wq
, &nbd
->remove_work
);
278 static int nbd_disconnected(struct nbd_config
*config
)
280 return test_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
) ||
281 test_bit(NBD_RT_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
284 static void nbd_mark_nsock_dead(struct nbd_device
*nbd
, struct nbd_sock
*nsock
,
287 if (!nsock
->dead
&& notify
&& !nbd_disconnected(nbd
->config
)) {
288 struct link_dead_args
*args
;
289 args
= kmalloc(sizeof(struct link_dead_args
), GFP_NOIO
);
291 INIT_WORK(&args
->work
, nbd_dead_link_work
);
292 args
->index
= nbd
->index
;
293 queue_work(system_wq
, &args
->work
);
297 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
298 if (atomic_dec_return(&nbd
->config
->live_connections
) == 0) {
299 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED
,
300 &nbd
->config
->runtime_flags
)) {
301 set_bit(NBD_RT_DISCONNECTED
,
302 &nbd
->config
->runtime_flags
);
303 dev_info(nbd_to_dev(nbd
),
304 "Disconnected due to user request.\n");
309 nsock
->pending
= NULL
;
313 static void nbd_size_clear(struct nbd_device
*nbd
)
315 if (nbd
->config
->bytesize
) {
316 set_capacity(nbd
->disk
, 0);
317 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
321 static int nbd_set_size(struct nbd_device
*nbd
, loff_t bytesize
,
325 blksize
= 1u << NBD_DEF_BLKSIZE_BITS
;
326 if (blksize
< 512 || blksize
> PAGE_SIZE
|| !is_power_of_2(blksize
))
329 nbd
->config
->bytesize
= bytesize
;
330 nbd
->config
->blksize_bits
= __ffs(blksize
);
335 if (nbd
->config
->flags
& NBD_FLAG_SEND_TRIM
) {
336 nbd
->disk
->queue
->limits
.discard_granularity
= blksize
;
337 nbd
->disk
->queue
->limits
.discard_alignment
= blksize
;
338 blk_queue_max_discard_sectors(nbd
->disk
->queue
, UINT_MAX
);
340 blk_queue_logical_block_size(nbd
->disk
->queue
, blksize
);
341 blk_queue_physical_block_size(nbd
->disk
->queue
, blksize
);
344 set_bit(GD_NEED_PART_SCAN
, &nbd
->disk
->state
);
345 if (!set_capacity_and_notify(nbd
->disk
, bytesize
>> 9))
346 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
350 static void nbd_complete_rq(struct request
*req
)
352 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
354 dev_dbg(nbd_to_dev(cmd
->nbd
), "request %p: %s\n", req
,
355 cmd
->status
? "failed" : "done");
357 blk_mq_end_request(req
, cmd
->status
);
361 * Forcibly shutdown the socket causing all listeners to error
363 static void sock_shutdown(struct nbd_device
*nbd
)
365 struct nbd_config
*config
= nbd
->config
;
368 if (config
->num_connections
== 0)
370 if (test_and_set_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
))
373 for (i
= 0; i
< config
->num_connections
; i
++) {
374 struct nbd_sock
*nsock
= config
->socks
[i
];
375 mutex_lock(&nsock
->tx_lock
);
376 nbd_mark_nsock_dead(nbd
, nsock
, 0);
377 mutex_unlock(&nsock
->tx_lock
);
379 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
382 static u32
req_to_nbd_cmd_type(struct request
*req
)
384 switch (req_op(req
)) {
388 return NBD_CMD_FLUSH
;
390 return NBD_CMD_WRITE
;
398 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
401 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
402 struct nbd_device
*nbd
= cmd
->nbd
;
403 struct nbd_config
*config
;
405 if (!mutex_trylock(&cmd
->lock
))
406 return BLK_EH_RESET_TIMER
;
408 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
409 cmd
->status
= BLK_STS_TIMEOUT
;
410 mutex_unlock(&cmd
->lock
);
413 config
= nbd
->config
;
415 if (config
->num_connections
> 1 ||
416 (config
->num_connections
== 1 && nbd
->tag_set
.timeout
)) {
417 dev_err_ratelimited(nbd_to_dev(nbd
),
418 "Connection timed out, retrying (%d/%d alive)\n",
419 atomic_read(&config
->live_connections
),
420 config
->num_connections
);
422 * Hooray we have more connections, requeue this IO, the submit
423 * path will put it on a real connection. Or if only one
424 * connection is configured, the submit path will wait util
425 * a new connection is reconfigured or util dead timeout.
428 if (cmd
->index
< config
->num_connections
) {
429 struct nbd_sock
*nsock
=
430 config
->socks
[cmd
->index
];
431 mutex_lock(&nsock
->tx_lock
);
432 /* We can have multiple outstanding requests, so
433 * we don't want to mark the nsock dead if we've
434 * already reconnected with a new socket, so
435 * only mark it dead if its the same socket we
438 if (cmd
->cookie
== nsock
->cookie
)
439 nbd_mark_nsock_dead(nbd
, nsock
, 1);
440 mutex_unlock(&nsock
->tx_lock
);
442 mutex_unlock(&cmd
->lock
);
443 nbd_requeue_cmd(cmd
);
449 if (!nbd
->tag_set
.timeout
) {
451 * Userspace sets timeout=0 to disable socket disconnection,
452 * so just warn and reset the timer.
454 struct nbd_sock
*nsock
= config
->socks
[cmd
->index
];
456 dev_info(nbd_to_dev(nbd
), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
457 req
, nbdcmd_to_ascii(req_to_nbd_cmd_type(req
)),
458 (unsigned long long)blk_rq_pos(req
) << 9,
459 blk_rq_bytes(req
), (req
->timeout
/ HZ
) * cmd
->retries
);
461 mutex_lock(&nsock
->tx_lock
);
462 if (cmd
->cookie
!= nsock
->cookie
) {
463 nbd_requeue_cmd(cmd
);
464 mutex_unlock(&nsock
->tx_lock
);
465 mutex_unlock(&cmd
->lock
);
469 mutex_unlock(&nsock
->tx_lock
);
470 mutex_unlock(&cmd
->lock
);
472 return BLK_EH_RESET_TIMER
;
475 dev_err_ratelimited(nbd_to_dev(nbd
), "Connection timed out\n");
476 set_bit(NBD_RT_TIMEDOUT
, &config
->runtime_flags
);
477 cmd
->status
= BLK_STS_IOERR
;
478 mutex_unlock(&cmd
->lock
);
482 blk_mq_complete_request(req
);
487 * Send or receive packet.
489 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
,
490 struct iov_iter
*iter
, int msg_flags
, int *sent
)
492 struct nbd_config
*config
= nbd
->config
;
493 struct socket
*sock
= config
->socks
[index
]->sock
;
496 unsigned int noreclaim_flag
;
498 if (unlikely(!sock
)) {
499 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
500 "Attempted %s on closed socket in sock_xmit\n",
501 (send
? "send" : "recv"));
505 msg
.msg_iter
= *iter
;
507 noreclaim_flag
= memalloc_noreclaim_save();
509 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
512 msg
.msg_control
= NULL
;
513 msg
.msg_controllen
= 0;
514 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
517 result
= sock_sendmsg(sock
, &msg
);
519 result
= sock_recvmsg(sock
, &msg
, msg
.msg_flags
);
523 result
= -EPIPE
; /* short read */
528 } while (msg_data_left(&msg
));
530 memalloc_noreclaim_restore(noreclaim_flag
);
536 * Different settings for sk->sk_sndtimeo can result in different return values
537 * if there is a signal pending when we enter sendmsg, because reasons?
539 static inline int was_interrupted(int result
)
541 return result
== -ERESTARTSYS
|| result
== -EINTR
;
544 /* always call with the tx_lock held */
545 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
547 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
548 struct nbd_config
*config
= nbd
->config
;
549 struct nbd_sock
*nsock
= config
->socks
[index
];
551 struct nbd_request request
= {.magic
= htonl(NBD_REQUEST_MAGIC
)};
552 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
553 struct iov_iter from
;
554 unsigned long size
= blk_rq_bytes(req
);
558 u32 nbd_cmd_flags
= 0;
559 int sent
= nsock
->sent
, skip
= 0;
561 iov_iter_kvec(&from
, WRITE
, &iov
, 1, sizeof(request
));
563 type
= req_to_nbd_cmd_type(req
);
567 if (rq_data_dir(req
) == WRITE
&&
568 (config
->flags
& NBD_FLAG_READ_ONLY
)) {
569 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
570 "Write on read-only\n");
574 if (req
->cmd_flags
& REQ_FUA
)
575 nbd_cmd_flags
|= NBD_CMD_FLAG_FUA
;
577 /* We did a partial send previously, and we at least sent the whole
578 * request struct, so just go and send the rest of the pages in the
582 if (sent
>= sizeof(request
)) {
583 skip
= sent
- sizeof(request
);
585 /* initialize handle for tracing purposes */
586 handle
= nbd_cmd_handle(cmd
);
590 iov_iter_advance(&from
, sent
);
595 cmd
->cookie
= nsock
->cookie
;
597 request
.type
= htonl(type
| nbd_cmd_flags
);
598 if (type
!= NBD_CMD_FLUSH
) {
599 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
600 request
.len
= htonl(size
);
602 handle
= nbd_cmd_handle(cmd
);
603 memcpy(request
.handle
, &handle
, sizeof(handle
));
605 trace_nbd_send_request(&request
, nbd
->index
, blk_mq_rq_from_pdu(cmd
));
607 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
608 req
, nbdcmd_to_ascii(type
),
609 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
610 result
= sock_xmit(nbd
, index
, 1, &from
,
611 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0, &sent
);
612 trace_nbd_header_sent(req
, handle
);
614 if (was_interrupted(result
)) {
615 /* If we havne't sent anything we can just return BUSY,
616 * however if we have sent something we need to make
617 * sure we only allow this req to be sent until we are
621 nsock
->pending
= req
;
624 set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
625 return BLK_STS_RESOURCE
;
627 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
628 "Send control failed (result %d)\n", result
);
632 if (type
!= NBD_CMD_WRITE
)
637 struct bio
*next
= bio
->bi_next
;
638 struct bvec_iter iter
;
641 bio_for_each_segment(bvec
, bio
, iter
) {
642 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
643 int flags
= is_last
? 0 : MSG_MORE
;
645 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
647 iov_iter_bvec(&from
, WRITE
, &bvec
, 1, bvec
.bv_len
);
649 if (skip
>= iov_iter_count(&from
)) {
650 skip
-= iov_iter_count(&from
);
653 iov_iter_advance(&from
, skip
);
656 result
= sock_xmit(nbd
, index
, 1, &from
, flags
, &sent
);
658 if (was_interrupted(result
)) {
659 /* We've already sent the header, we
660 * have no choice but to set pending and
663 nsock
->pending
= req
;
665 set_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
666 return BLK_STS_RESOURCE
;
668 dev_err(disk_to_dev(nbd
->disk
),
669 "Send data failed (result %d)\n",
674 * The completion might already have come in,
675 * so break for the last one instead of letting
676 * the iterator do it. This prevents use-after-free
685 trace_nbd_payload_sent(req
, handle
);
686 nsock
->pending
= NULL
;
691 /* NULL returned = something went wrong, inform userspace */
692 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
694 struct nbd_config
*config
= nbd
->config
;
696 struct nbd_reply reply
;
698 struct request
*req
= NULL
;
702 struct kvec iov
= {.iov_base
= &reply
, .iov_len
= sizeof(reply
)};
707 iov_iter_kvec(&to
, READ
, &iov
, 1, sizeof(reply
));
708 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
710 if (!nbd_disconnected(config
))
711 dev_err(disk_to_dev(nbd
->disk
),
712 "Receive control failed (result %d)\n", result
);
713 return ERR_PTR(result
);
716 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
717 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
718 (unsigned long)ntohl(reply
.magic
));
719 return ERR_PTR(-EPROTO
);
722 memcpy(&handle
, reply
.handle
, sizeof(handle
));
723 tag
= nbd_handle_to_tag(handle
);
724 hwq
= blk_mq_unique_tag_to_hwq(tag
);
725 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
726 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
727 blk_mq_unique_tag_to_tag(tag
));
728 if (!req
|| !blk_mq_request_started(req
)) {
729 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
731 return ERR_PTR(-ENOENT
);
733 trace_nbd_header_received(req
, handle
);
734 cmd
= blk_mq_rq_to_pdu(req
);
736 mutex_lock(&cmd
->lock
);
737 if (cmd
->cmd_cookie
!= nbd_handle_to_cookie(handle
)) {
738 dev_err(disk_to_dev(nbd
->disk
), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
739 req
, cmd
->cmd_cookie
, nbd_handle_to_cookie(handle
));
743 if (cmd
->status
!= BLK_STS_OK
) {
744 dev_err(disk_to_dev(nbd
->disk
), "Command already handled %p\n",
749 if (test_bit(NBD_CMD_REQUEUED
, &cmd
->flags
)) {
750 dev_err(disk_to_dev(nbd
->disk
), "Raced with timeout on req %p\n",
755 if (ntohl(reply
.error
)) {
756 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
758 cmd
->status
= BLK_STS_IOERR
;
762 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", req
);
763 if (rq_data_dir(req
) != WRITE
) {
764 struct req_iterator iter
;
767 rq_for_each_segment(bvec
, req
, iter
) {
768 iov_iter_bvec(&to
, READ
, &bvec
, 1, bvec
.bv_len
);
769 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
, NULL
);
771 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
774 * If we've disconnected, we need to make sure we
775 * complete this request, otherwise error out
776 * and let the timeout stuff handle resubmitting
777 * this request onto another connection.
779 if (nbd_disconnected(config
)) {
780 cmd
->status
= BLK_STS_IOERR
;
786 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
791 trace_nbd_payload_received(req
, handle
);
792 mutex_unlock(&cmd
->lock
);
793 return ret
? ERR_PTR(ret
) : cmd
;
796 static void recv_work(struct work_struct
*work
)
798 struct recv_thread_args
*args
= container_of(work
,
799 struct recv_thread_args
,
801 struct nbd_device
*nbd
= args
->nbd
;
802 struct nbd_config
*config
= nbd
->config
;
807 cmd
= nbd_read_stat(nbd
, args
->index
);
809 struct nbd_sock
*nsock
= config
->socks
[args
->index
];
811 mutex_lock(&nsock
->tx_lock
);
812 nbd_mark_nsock_dead(nbd
, nsock
, 1);
813 mutex_unlock(&nsock
->tx_lock
);
817 rq
= blk_mq_rq_from_pdu(cmd
);
818 if (likely(!blk_should_fake_timeout(rq
->q
)))
819 blk_mq_complete_request(rq
);
822 atomic_dec(&config
->recv_threads
);
823 wake_up(&config
->recv_wq
);
827 static bool nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
829 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
831 /* don't abort one completed request */
832 if (blk_mq_request_completed(req
))
835 mutex_lock(&cmd
->lock
);
836 cmd
->status
= BLK_STS_IOERR
;
837 mutex_unlock(&cmd
->lock
);
839 blk_mq_complete_request(req
);
843 static void nbd_clear_que(struct nbd_device
*nbd
)
845 blk_mq_quiesce_queue(nbd
->disk
->queue
);
846 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
847 blk_mq_unquiesce_queue(nbd
->disk
->queue
);
848 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
851 static int find_fallback(struct nbd_device
*nbd
, int index
)
853 struct nbd_config
*config
= nbd
->config
;
855 struct nbd_sock
*nsock
= config
->socks
[index
];
856 int fallback
= nsock
->fallback_index
;
858 if (test_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
))
861 if (config
->num_connections
<= 1) {
862 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
863 "Dead connection, failed to find a fallback\n");
867 if (fallback
>= 0 && fallback
< config
->num_connections
&&
868 !config
->socks
[fallback
]->dead
)
871 if (nsock
->fallback_index
< 0 ||
872 nsock
->fallback_index
>= config
->num_connections
||
873 config
->socks
[nsock
->fallback_index
]->dead
) {
875 for (i
= 0; i
< config
->num_connections
; i
++) {
878 if (!config
->socks
[i
]->dead
) {
883 nsock
->fallback_index
= new_index
;
885 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
886 "Dead connection, failed to find a fallback\n");
890 new_index
= nsock
->fallback_index
;
894 static int wait_for_reconnect(struct nbd_device
*nbd
)
896 struct nbd_config
*config
= nbd
->config
;
897 if (!config
->dead_conn_timeout
)
899 if (test_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
))
901 return wait_event_timeout(config
->conn_wait
,
902 atomic_read(&config
->live_connections
) > 0,
903 config
->dead_conn_timeout
) > 0;
906 static int nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
908 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
909 struct nbd_device
*nbd
= cmd
->nbd
;
910 struct nbd_config
*config
;
911 struct nbd_sock
*nsock
;
914 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
915 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
916 "Socks array is empty\n");
917 blk_mq_start_request(req
);
920 config
= nbd
->config
;
922 if (index
>= config
->num_connections
) {
923 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
924 "Attempted send on invalid socket\n");
926 blk_mq_start_request(req
);
929 cmd
->status
= BLK_STS_OK
;
931 nsock
= config
->socks
[index
];
932 mutex_lock(&nsock
->tx_lock
);
934 int old_index
= index
;
935 index
= find_fallback(nbd
, index
);
936 mutex_unlock(&nsock
->tx_lock
);
938 if (wait_for_reconnect(nbd
)) {
942 /* All the sockets should already be down at this point,
943 * we just want to make sure that DISCONNECTED is set so
944 * any requests that come in that were queue'ed waiting
945 * for the reconnect timer don't trigger the timer again
946 * and instead just error out.
950 blk_mq_start_request(req
);
956 /* Handle the case that we have a pending request that was partially
957 * transmitted that _has_ to be serviced first. We need to call requeue
958 * here so that it gets put _after_ the request that is already on the
961 blk_mq_start_request(req
);
962 if (unlikely(nsock
->pending
&& nsock
->pending
!= req
)) {
963 nbd_requeue_cmd(cmd
);
968 * Some failures are related to the link going down, so anything that
969 * returns EAGAIN can be retried on a different socket.
971 ret
= nbd_send_cmd(nbd
, cmd
, index
);
972 if (ret
== -EAGAIN
) {
973 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
974 "Request send failed, requeueing\n");
975 nbd_mark_nsock_dead(nbd
, nsock
, 1);
976 nbd_requeue_cmd(cmd
);
980 mutex_unlock(&nsock
->tx_lock
);
985 static blk_status_t
nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
986 const struct blk_mq_queue_data
*bd
)
988 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
992 * Since we look at the bio's to send the request over the network we
993 * need to make sure the completion work doesn't mark this request done
994 * before we are done doing our send. This keeps us from dereferencing
995 * freed data if we have particularly fast completions (ie we get the
996 * completion before we exit sock_xmit on the last bvec) or in the case
997 * that the server is misbehaving (or there was an error) before we're
998 * done sending everything over the wire.
1000 mutex_lock(&cmd
->lock
);
1001 clear_bit(NBD_CMD_REQUEUED
, &cmd
->flags
);
1003 /* We can be called directly from the user space process, which means we
1004 * could possibly have signals pending so our sendmsg will fail. In
1005 * this case we need to return that we are busy, otherwise error out as
1008 ret
= nbd_handle_cmd(cmd
, hctx
->queue_num
);
1010 ret
= BLK_STS_IOERR
;
1013 mutex_unlock(&cmd
->lock
);
1018 static struct socket
*nbd_get_socket(struct nbd_device
*nbd
, unsigned long fd
,
1021 struct socket
*sock
;
1024 sock
= sockfd_lookup(fd
, err
);
1028 if (sock
->ops
->shutdown
== sock_no_shutdown
) {
1029 dev_err(disk_to_dev(nbd
->disk
), "Unsupported socket: shutdown callout must be supported.\n");
1038 static int nbd_add_socket(struct nbd_device
*nbd
, unsigned long arg
,
1041 struct nbd_config
*config
= nbd
->config
;
1042 struct socket
*sock
;
1043 struct nbd_sock
**socks
;
1044 struct nbd_sock
*nsock
;
1047 sock
= nbd_get_socket(nbd
, arg
, &err
);
1052 * We need to make sure we don't get any errant requests while we're
1053 * reallocating the ->socks array.
1055 blk_mq_freeze_queue(nbd
->disk
->queue
);
1057 if (!netlink
&& !nbd
->task_setup
&&
1058 !test_bit(NBD_RT_BOUND
, &config
->runtime_flags
))
1059 nbd
->task_setup
= current
;
1062 (nbd
->task_setup
!= current
||
1063 test_bit(NBD_RT_BOUND
, &config
->runtime_flags
))) {
1064 dev_err(disk_to_dev(nbd
->disk
),
1065 "Device being setup by another task");
1070 nsock
= kzalloc(sizeof(*nsock
), GFP_KERNEL
);
1076 socks
= krealloc(config
->socks
, (config
->num_connections
+ 1) *
1077 sizeof(struct nbd_sock
*), GFP_KERNEL
);
1084 config
->socks
= socks
;
1086 nsock
->fallback_index
= -1;
1087 nsock
->dead
= false;
1088 mutex_init(&nsock
->tx_lock
);
1090 nsock
->pending
= NULL
;
1093 socks
[config
->num_connections
++] = nsock
;
1094 atomic_inc(&config
->live_connections
);
1095 blk_mq_unfreeze_queue(nbd
->disk
->queue
);
1100 blk_mq_unfreeze_queue(nbd
->disk
->queue
);
1105 static int nbd_reconnect_socket(struct nbd_device
*nbd
, unsigned long arg
)
1107 struct nbd_config
*config
= nbd
->config
;
1108 struct socket
*sock
, *old
;
1109 struct recv_thread_args
*args
;
1113 sock
= nbd_get_socket(nbd
, arg
, &err
);
1117 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1123 for (i
= 0; i
< config
->num_connections
; i
++) {
1124 struct nbd_sock
*nsock
= config
->socks
[i
];
1129 mutex_lock(&nsock
->tx_lock
);
1131 mutex_unlock(&nsock
->tx_lock
);
1134 sk_set_memalloc(sock
->sk
);
1135 if (nbd
->tag_set
.timeout
)
1136 sock
->sk
->sk_sndtimeo
= nbd
->tag_set
.timeout
;
1137 atomic_inc(&config
->recv_threads
);
1138 refcount_inc(&nbd
->config_refs
);
1140 nsock
->fallback_index
= -1;
1142 nsock
->dead
= false;
1143 INIT_WORK(&args
->work
, recv_work
);
1147 mutex_unlock(&nsock
->tx_lock
);
1150 clear_bit(NBD_RT_DISCONNECTED
, &config
->runtime_flags
);
1152 /* We take the tx_mutex in an error path in the recv_work, so we
1153 * need to queue_work outside of the tx_mutex.
1155 queue_work(nbd
->recv_workq
, &args
->work
);
1157 atomic_inc(&config
->live_connections
);
1158 wake_up(&config
->conn_wait
);
1166 static void nbd_bdev_reset(struct block_device
*bdev
)
1168 if (bdev
->bd_openers
> 1)
1170 set_capacity(bdev
->bd_disk
, 0);
1173 static void nbd_parse_flags(struct nbd_device
*nbd
)
1175 struct nbd_config
*config
= nbd
->config
;
1176 if (config
->flags
& NBD_FLAG_READ_ONLY
)
1177 set_disk_ro(nbd
->disk
, true);
1179 set_disk_ro(nbd
->disk
, false);
1180 if (config
->flags
& NBD_FLAG_SEND_TRIM
)
1181 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
1182 if (config
->flags
& NBD_FLAG_SEND_FLUSH
) {
1183 if (config
->flags
& NBD_FLAG_SEND_FUA
)
1184 blk_queue_write_cache(nbd
->disk
->queue
, true, true);
1186 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
1189 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
1192 static void send_disconnects(struct nbd_device
*nbd
)
1194 struct nbd_config
*config
= nbd
->config
;
1195 struct nbd_request request
= {
1196 .magic
= htonl(NBD_REQUEST_MAGIC
),
1197 .type
= htonl(NBD_CMD_DISC
),
1199 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
1200 struct iov_iter from
;
1203 for (i
= 0; i
< config
->num_connections
; i
++) {
1204 struct nbd_sock
*nsock
= config
->socks
[i
];
1206 iov_iter_kvec(&from
, WRITE
, &iov
, 1, sizeof(request
));
1207 mutex_lock(&nsock
->tx_lock
);
1208 ret
= sock_xmit(nbd
, i
, 1, &from
, 0, NULL
);
1210 dev_err(disk_to_dev(nbd
->disk
),
1211 "Send disconnect failed %d\n", ret
);
1212 mutex_unlock(&nsock
->tx_lock
);
1216 static int nbd_disconnect(struct nbd_device
*nbd
)
1218 struct nbd_config
*config
= nbd
->config
;
1220 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
1221 set_bit(NBD_RT_DISCONNECT_REQUESTED
, &config
->runtime_flags
);
1222 set_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->flags
);
1223 send_disconnects(nbd
);
1227 static void nbd_clear_sock(struct nbd_device
*nbd
)
1231 nbd
->task_setup
= NULL
;
1234 static void nbd_config_put(struct nbd_device
*nbd
)
1236 if (refcount_dec_and_mutex_lock(&nbd
->config_refs
,
1237 &nbd
->config_lock
)) {
1238 struct nbd_config
*config
= nbd
->config
;
1239 nbd_dev_dbg_close(nbd
);
1240 nbd_size_clear(nbd
);
1241 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE
,
1242 &config
->runtime_flags
))
1243 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1245 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE
,
1246 &config
->runtime_flags
)) {
1247 device_remove_file(disk_to_dev(nbd
->disk
), &backend_attr
);
1248 kfree(nbd
->backend
);
1249 nbd
->backend
= NULL
;
1251 nbd_clear_sock(nbd
);
1252 if (config
->num_connections
) {
1254 for (i
= 0; i
< config
->num_connections
; i
++) {
1255 sockfd_put(config
->socks
[i
]->sock
);
1256 kfree(config
->socks
[i
]);
1258 kfree(config
->socks
);
1263 nbd
->tag_set
.timeout
= 0;
1264 nbd
->disk
->queue
->limits
.discard_granularity
= 0;
1265 nbd
->disk
->queue
->limits
.discard_alignment
= 0;
1266 blk_queue_max_discard_sectors(nbd
->disk
->queue
, UINT_MAX
);
1267 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
1269 mutex_unlock(&nbd
->config_lock
);
1271 module_put(THIS_MODULE
);
1275 static int nbd_start_device(struct nbd_device
*nbd
)
1277 struct nbd_config
*config
= nbd
->config
;
1278 int num_connections
= config
->num_connections
;
1285 if (num_connections
> 1 &&
1286 !(config
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
1287 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
1291 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, config
->num_connections
);
1292 nbd
->pid
= task_pid_nr(current
);
1294 nbd_parse_flags(nbd
);
1296 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
1298 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed for pid!\n");
1301 set_bit(NBD_RT_HAS_PID_FILE
, &config
->runtime_flags
);
1303 nbd_dev_dbg_init(nbd
);
1304 for (i
= 0; i
< num_connections
; i
++) {
1305 struct recv_thread_args
*args
;
1307 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1311 * If num_connections is m (2 < m),
1312 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1313 * But NO.(n + 1) failed. We still have n recv threads.
1314 * So, add flush_workqueue here to prevent recv threads
1315 * dropping the last config_refs and trying to destroy
1316 * the workqueue from inside the workqueue.
1319 flush_workqueue(nbd
->recv_workq
);
1322 sk_set_memalloc(config
->socks
[i
]->sock
->sk
);
1323 if (nbd
->tag_set
.timeout
)
1324 config
->socks
[i
]->sock
->sk
->sk_sndtimeo
=
1325 nbd
->tag_set
.timeout
;
1326 atomic_inc(&config
->recv_threads
);
1327 refcount_inc(&nbd
->config_refs
);
1328 INIT_WORK(&args
->work
, recv_work
);
1331 queue_work(nbd
->recv_workq
, &args
->work
);
1333 return nbd_set_size(nbd
, config
->bytesize
, nbd_blksize(config
));
1336 static int nbd_start_device_ioctl(struct nbd_device
*nbd
, struct block_device
*bdev
)
1338 struct nbd_config
*config
= nbd
->config
;
1341 ret
= nbd_start_device(nbd
);
1346 set_bit(GD_NEED_PART_SCAN
, &nbd
->disk
->state
);
1347 mutex_unlock(&nbd
->config_lock
);
1348 ret
= wait_event_interruptible(config
->recv_wq
,
1349 atomic_read(&config
->recv_threads
) == 0);
1352 flush_workqueue(nbd
->recv_workq
);
1354 mutex_lock(&nbd
->config_lock
);
1355 nbd_bdev_reset(bdev
);
1356 /* user requested, ignore socket errors */
1357 if (test_bit(NBD_RT_DISCONNECT_REQUESTED
, &config
->runtime_flags
))
1359 if (test_bit(NBD_RT_TIMEDOUT
, &config
->runtime_flags
))
1364 static void nbd_clear_sock_ioctl(struct nbd_device
*nbd
,
1365 struct block_device
*bdev
)
1368 __invalidate_device(bdev
, true);
1369 nbd_bdev_reset(bdev
);
1370 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF
,
1371 &nbd
->config
->runtime_flags
))
1372 nbd_config_put(nbd
);
1375 static void nbd_set_cmd_timeout(struct nbd_device
*nbd
, u64 timeout
)
1377 nbd
->tag_set
.timeout
= timeout
* HZ
;
1379 blk_queue_rq_timeout(nbd
->disk
->queue
, timeout
* HZ
);
1381 blk_queue_rq_timeout(nbd
->disk
->queue
, 30 * HZ
);
1384 /* Must be called with config_lock held */
1385 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
1386 unsigned int cmd
, unsigned long arg
)
1388 struct nbd_config
*config
= nbd
->config
;
1392 case NBD_DISCONNECT
:
1393 return nbd_disconnect(nbd
);
1394 case NBD_CLEAR_SOCK
:
1395 nbd_clear_sock_ioctl(nbd
, bdev
);
1398 return nbd_add_socket(nbd
, arg
, false);
1399 case NBD_SET_BLKSIZE
:
1400 return nbd_set_size(nbd
, config
->bytesize
, arg
);
1402 return nbd_set_size(nbd
, arg
, nbd_blksize(config
));
1403 case NBD_SET_SIZE_BLOCKS
:
1404 if (check_shl_overflow(arg
, config
->blksize_bits
, &bytesize
))
1406 return nbd_set_size(nbd
, bytesize
, nbd_blksize(config
));
1407 case NBD_SET_TIMEOUT
:
1408 nbd_set_cmd_timeout(nbd
, arg
);
1412 config
->flags
= arg
;
1415 return nbd_start_device_ioctl(nbd
, bdev
);
1418 * This is for compatibility only. The queue is always cleared
1419 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1422 case NBD_PRINT_DEBUG
:
1424 * For compatibility only, we no longer keep a list of
1425 * outstanding requests.
1432 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
1433 unsigned int cmd
, unsigned long arg
)
1435 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
1436 struct nbd_config
*config
= nbd
->config
;
1437 int error
= -EINVAL
;
1439 if (!capable(CAP_SYS_ADMIN
))
1442 /* The block layer will pass back some non-nbd ioctls in case we have
1443 * special handling for them, but we don't so just return an error.
1445 if (_IOC_TYPE(cmd
) != 0xab)
1448 mutex_lock(&nbd
->config_lock
);
1450 /* Don't allow ioctl operations on a nbd device that was created with
1451 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1453 if (!test_bit(NBD_RT_BOUND
, &config
->runtime_flags
) ||
1454 (cmd
== NBD_DISCONNECT
|| cmd
== NBD_CLEAR_SOCK
))
1455 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
1457 dev_err(nbd_to_dev(nbd
), "Cannot use ioctl interface on a netlink controlled device.\n");
1458 mutex_unlock(&nbd
->config_lock
);
1462 static struct nbd_config
*nbd_alloc_config(void)
1464 struct nbd_config
*config
;
1466 config
= kzalloc(sizeof(struct nbd_config
), GFP_NOFS
);
1469 atomic_set(&config
->recv_threads
, 0);
1470 init_waitqueue_head(&config
->recv_wq
);
1471 init_waitqueue_head(&config
->conn_wait
);
1472 config
->blksize_bits
= NBD_DEF_BLKSIZE_BITS
;
1473 atomic_set(&config
->live_connections
, 0);
1474 try_module_get(THIS_MODULE
);
1478 static int nbd_open(struct block_device
*bdev
, fmode_t mode
)
1480 struct nbd_device
*nbd
;
1483 mutex_lock(&nbd_index_mutex
);
1484 nbd
= bdev
->bd_disk
->private_data
;
1489 if (!refcount_inc_not_zero(&nbd
->refs
)) {
1493 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
1494 struct nbd_config
*config
;
1496 mutex_lock(&nbd
->config_lock
);
1497 if (refcount_inc_not_zero(&nbd
->config_refs
)) {
1498 mutex_unlock(&nbd
->config_lock
);
1501 config
= nbd
->config
= nbd_alloc_config();
1504 mutex_unlock(&nbd
->config_lock
);
1507 refcount_set(&nbd
->config_refs
, 1);
1508 refcount_inc(&nbd
->refs
);
1509 mutex_unlock(&nbd
->config_lock
);
1511 set_bit(GD_NEED_PART_SCAN
, &bdev
->bd_disk
->state
);
1512 } else if (nbd_disconnected(nbd
->config
)) {
1514 set_bit(GD_NEED_PART_SCAN
, &bdev
->bd_disk
->state
);
1517 mutex_unlock(&nbd_index_mutex
);
1521 static void nbd_release(struct gendisk
*disk
, fmode_t mode
)
1523 struct nbd_device
*nbd
= disk
->private_data
;
1525 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE
, &nbd
->config
->runtime_flags
) &&
1526 disk
->part0
->bd_openers
== 0)
1527 nbd_disconnect_and_put(nbd
);
1529 nbd_config_put(nbd
);
1533 static const struct block_device_operations nbd_fops
=
1535 .owner
= THIS_MODULE
,
1537 .release
= nbd_release
,
1539 .compat_ioctl
= nbd_ioctl
,
1542 #if IS_ENABLED(CONFIG_DEBUG_FS)
1544 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
1546 struct nbd_device
*nbd
= s
->private;
1549 seq_printf(s
, "recv: %d\n", nbd
->pid
);
1554 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks
);
1556 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
1558 struct nbd_device
*nbd
= s
->private;
1559 u32 flags
= nbd
->config
->flags
;
1561 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
1563 seq_puts(s
, "Known flags:\n");
1565 if (flags
& NBD_FLAG_HAS_FLAGS
)
1566 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
1567 if (flags
& NBD_FLAG_READ_ONLY
)
1568 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
1569 if (flags
& NBD_FLAG_SEND_FLUSH
)
1570 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
1571 if (flags
& NBD_FLAG_SEND_FUA
)
1572 seq_puts(s
, "NBD_FLAG_SEND_FUA\n");
1573 if (flags
& NBD_FLAG_SEND_TRIM
)
1574 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
1579 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags
);
1581 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1584 struct nbd_config
*config
= nbd
->config
;
1589 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
1591 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
1595 config
->dbg_dir
= dir
;
1597 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_fops
);
1598 debugfs_create_u64("size_bytes", 0444, dir
, &config
->bytesize
);
1599 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
1600 debugfs_create_u32("blocksize_bits", 0444, dir
, &config
->blksize_bits
);
1601 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_fops
);
1606 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1608 debugfs_remove_recursive(nbd
->config
->dbg_dir
);
1611 static int nbd_dbg_init(void)
1613 struct dentry
*dbg_dir
;
1615 dbg_dir
= debugfs_create_dir("nbd", NULL
);
1619 nbd_dbg_dir
= dbg_dir
;
1624 static void nbd_dbg_close(void)
1626 debugfs_remove_recursive(nbd_dbg_dir
);
1629 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1631 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
1636 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
1640 static int nbd_dbg_init(void)
1645 static void nbd_dbg_close(void)
1651 static int nbd_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1652 unsigned int hctx_idx
, unsigned int numa_node
)
1654 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
1655 cmd
->nbd
= set
->driver_data
;
1657 mutex_init(&cmd
->lock
);
1661 static const struct blk_mq_ops nbd_mq_ops
= {
1662 .queue_rq
= nbd_queue_rq
,
1663 .complete
= nbd_complete_rq
,
1664 .init_request
= nbd_init_request
,
1665 .timeout
= nbd_xmit_timeout
,
1668 static struct nbd_device
*nbd_dev_add(int index
, unsigned int refs
)
1670 struct nbd_device
*nbd
;
1671 struct gendisk
*disk
;
1674 nbd
= kzalloc(sizeof(struct nbd_device
), GFP_KERNEL
);
1678 nbd
->tag_set
.ops
= &nbd_mq_ops
;
1679 nbd
->tag_set
.nr_hw_queues
= 1;
1680 nbd
->tag_set
.queue_depth
= 128;
1681 nbd
->tag_set
.numa_node
= NUMA_NO_NODE
;
1682 nbd
->tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1683 nbd
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1685 nbd
->tag_set
.driver_data
= nbd
;
1686 INIT_WORK(&nbd
->remove_work
, nbd_dev_remove_work
);
1687 nbd
->backend
= NULL
;
1689 err
= blk_mq_alloc_tag_set(&nbd
->tag_set
);
1693 mutex_lock(&nbd_index_mutex
);
1695 err
= idr_alloc(&nbd_index_idr
, nbd
, index
, index
+ 1,
1700 err
= idr_alloc(&nbd_index_idr
, nbd
, 0, 0, GFP_KERNEL
);
1705 mutex_unlock(&nbd_index_mutex
);
1709 disk
= blk_mq_alloc_disk(&nbd
->tag_set
, NULL
);
1711 err
= PTR_ERR(disk
);
1716 nbd
->recv_workq
= alloc_workqueue("nbd%d-recv",
1717 WQ_MEM_RECLAIM
| WQ_HIGHPRI
|
1718 WQ_UNBOUND
, 0, nbd
->index
);
1719 if (!nbd
->recv_workq
) {
1720 dev_err(disk_to_dev(nbd
->disk
), "Could not allocate knbd recv work queue.\n");
1726 * Tell the block layer that we are not a rotational device
1728 blk_queue_flag_set(QUEUE_FLAG_NONROT
, disk
->queue
);
1729 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1730 disk
->queue
->limits
.discard_granularity
= 0;
1731 disk
->queue
->limits
.discard_alignment
= 0;
1732 blk_queue_max_discard_sectors(disk
->queue
, 0);
1733 blk_queue_max_segment_size(disk
->queue
, UINT_MAX
);
1734 blk_queue_max_segments(disk
->queue
, USHRT_MAX
);
1735 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1736 disk
->queue
->limits
.max_sectors
= 256;
1738 mutex_init(&nbd
->config_lock
);
1739 refcount_set(&nbd
->config_refs
, 0);
1741 * Start out with a zero references to keep other threads from using
1742 * this device until it is fully initialized.
1744 refcount_set(&nbd
->refs
, 0);
1745 INIT_LIST_HEAD(&nbd
->list
);
1746 disk
->major
= NBD_MAJOR
;
1748 /* Too big first_minor can cause duplicate creation of
1749 * sysfs files/links, since index << part_shift might overflow, or
1750 * MKDEV() expect that the max bits of first_minor is 20.
1752 disk
->first_minor
= index
<< part_shift
;
1753 if (disk
->first_minor
< index
|| disk
->first_minor
> MINORMASK
) {
1758 disk
->minors
= 1 << part_shift
;
1759 disk
->fops
= &nbd_fops
;
1760 disk
->private_data
= nbd
;
1761 sprintf(disk
->disk_name
, "nbd%d", index
);
1762 err
= add_disk(disk
);
1767 * Now publish the device.
1769 refcount_set(&nbd
->refs
, refs
);
1770 nbd_total_devices
++;
1774 destroy_workqueue(nbd
->recv_workq
);
1776 blk_cleanup_disk(disk
);
1778 mutex_lock(&nbd_index_mutex
);
1779 idr_remove(&nbd_index_idr
, index
);
1780 mutex_unlock(&nbd_index_mutex
);
1782 blk_mq_free_tag_set(&nbd
->tag_set
);
1786 return ERR_PTR(err
);
1789 static struct nbd_device
*nbd_find_get_unused(void)
1791 struct nbd_device
*nbd
;
1794 lockdep_assert_held(&nbd_index_mutex
);
1796 idr_for_each_entry(&nbd_index_idr
, nbd
, id
) {
1797 if (refcount_read(&nbd
->config_refs
) ||
1798 test_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
))
1800 if (refcount_inc_not_zero(&nbd
->refs
))
1807 /* Netlink interface. */
1808 static const struct nla_policy nbd_attr_policy
[NBD_ATTR_MAX
+ 1] = {
1809 [NBD_ATTR_INDEX
] = { .type
= NLA_U32
},
1810 [NBD_ATTR_SIZE_BYTES
] = { .type
= NLA_U64
},
1811 [NBD_ATTR_BLOCK_SIZE_BYTES
] = { .type
= NLA_U64
},
1812 [NBD_ATTR_TIMEOUT
] = { .type
= NLA_U64
},
1813 [NBD_ATTR_SERVER_FLAGS
] = { .type
= NLA_U64
},
1814 [NBD_ATTR_CLIENT_FLAGS
] = { .type
= NLA_U64
},
1815 [NBD_ATTR_SOCKETS
] = { .type
= NLA_NESTED
},
1816 [NBD_ATTR_DEAD_CONN_TIMEOUT
] = { .type
= NLA_U64
},
1817 [NBD_ATTR_DEVICE_LIST
] = { .type
= NLA_NESTED
},
1818 [NBD_ATTR_BACKEND_IDENTIFIER
] = { .type
= NLA_STRING
},
1821 static const struct nla_policy nbd_sock_policy
[NBD_SOCK_MAX
+ 1] = {
1822 [NBD_SOCK_FD
] = { .type
= NLA_U32
},
1825 /* We don't use this right now since we don't parse the incoming list, but we
1826 * still want it here so userspace knows what to expect.
1828 static const struct nla_policy
__attribute__((unused
))
1829 nbd_device_policy
[NBD_DEVICE_ATTR_MAX
+ 1] = {
1830 [NBD_DEVICE_INDEX
] = { .type
= NLA_U32
},
1831 [NBD_DEVICE_CONNECTED
] = { .type
= NLA_U8
},
1834 static int nbd_genl_size_set(struct genl_info
*info
, struct nbd_device
*nbd
)
1836 struct nbd_config
*config
= nbd
->config
;
1837 u64 bsize
= nbd_blksize(config
);
1838 u64 bytes
= config
->bytesize
;
1840 if (info
->attrs
[NBD_ATTR_SIZE_BYTES
])
1841 bytes
= nla_get_u64(info
->attrs
[NBD_ATTR_SIZE_BYTES
]);
1843 if (info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
])
1844 bsize
= nla_get_u64(info
->attrs
[NBD_ATTR_BLOCK_SIZE_BYTES
]);
1846 if (bytes
!= config
->bytesize
|| bsize
!= nbd_blksize(config
))
1847 return nbd_set_size(nbd
, bytes
, bsize
);
1851 static int nbd_genl_connect(struct sk_buff
*skb
, struct genl_info
*info
)
1853 struct nbd_device
*nbd
;
1854 struct nbd_config
*config
;
1857 bool put_dev
= false;
1859 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
1862 if (info
->attrs
[NBD_ATTR_INDEX
])
1863 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
1864 if (!info
->attrs
[NBD_ATTR_SOCKETS
]) {
1865 printk(KERN_ERR
"nbd: must specify at least one socket\n");
1868 if (!info
->attrs
[NBD_ATTR_SIZE_BYTES
]) {
1869 printk(KERN_ERR
"nbd: must specify a size in bytes for the device\n");
1873 mutex_lock(&nbd_index_mutex
);
1875 nbd
= nbd_find_get_unused();
1877 nbd
= idr_find(&nbd_index_idr
, index
);
1879 if ((test_bit(NBD_DESTROY_ON_DISCONNECT
, &nbd
->flags
) &&
1880 test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->flags
)) ||
1881 !refcount_inc_not_zero(&nbd
->refs
)) {
1882 mutex_unlock(&nbd_index_mutex
);
1883 pr_err("nbd: device at index %d is going down\n",
1889 mutex_unlock(&nbd_index_mutex
);
1892 nbd
= nbd_dev_add(index
, 2);
1894 pr_err("nbd: failed to add new device\n");
1895 return PTR_ERR(nbd
);
1899 mutex_lock(&nbd
->config_lock
);
1900 if (refcount_read(&nbd
->config_refs
)) {
1901 mutex_unlock(&nbd
->config_lock
);
1905 printk(KERN_ERR
"nbd: nbd%d already in use\n", index
);
1908 if (WARN_ON(nbd
->config
)) {
1909 mutex_unlock(&nbd
->config_lock
);
1913 config
= nbd
->config
= nbd_alloc_config();
1915 mutex_unlock(&nbd
->config_lock
);
1917 printk(KERN_ERR
"nbd: couldn't allocate config\n");
1920 refcount_set(&nbd
->config_refs
, 1);
1921 set_bit(NBD_RT_BOUND
, &config
->runtime_flags
);
1923 ret
= nbd_genl_size_set(info
, nbd
);
1927 if (info
->attrs
[NBD_ATTR_TIMEOUT
])
1928 nbd_set_cmd_timeout(nbd
,
1929 nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]));
1930 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
1931 config
->dead_conn_timeout
=
1932 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
1933 config
->dead_conn_timeout
*= HZ
;
1935 if (info
->attrs
[NBD_ATTR_SERVER_FLAGS
])
1937 nla_get_u64(info
->attrs
[NBD_ATTR_SERVER_FLAGS
]);
1938 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
1939 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
1940 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
1942 * We have 1 ref to keep the device around, and then 1
1943 * ref for our current operation here, which will be
1944 * inherited by the config. If we already have
1945 * DESTROY_ON_DISCONNECT set then we know we don't have
1946 * that extra ref already held so we don't need the
1949 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT
,
1953 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT
,
1955 refcount_inc(&nbd
->refs
);
1957 if (flags
& NBD_CFLAG_DISCONNECT_ON_CLOSE
) {
1958 set_bit(NBD_RT_DISCONNECT_ON_CLOSE
,
1959 &config
->runtime_flags
);
1963 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
1964 struct nlattr
*attr
;
1967 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
1969 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
1971 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
1972 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
1976 ret
= nla_parse_nested_deprecated(socks
, NBD_SOCK_MAX
,
1981 printk(KERN_ERR
"nbd: error processing sock list\n");
1985 if (!socks
[NBD_SOCK_FD
])
1987 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
1988 ret
= nbd_add_socket(nbd
, fd
, true);
1993 ret
= nbd_start_device(nbd
);
1996 if (info
->attrs
[NBD_ATTR_BACKEND_IDENTIFIER
]) {
1997 nbd
->backend
= nla_strdup(info
->attrs
[NBD_ATTR_BACKEND_IDENTIFIER
],
1999 if (!nbd
->backend
) {
2004 ret
= device_create_file(disk_to_dev(nbd
->disk
), &backend_attr
);
2006 dev_err(disk_to_dev(nbd
->disk
),
2007 "device_create_file failed for backend!\n");
2010 set_bit(NBD_RT_HAS_BACKEND_FILE
, &config
->runtime_flags
);
2012 mutex_unlock(&nbd
->config_lock
);
2014 set_bit(NBD_RT_HAS_CONFIG_REF
, &config
->runtime_flags
);
2015 refcount_inc(&nbd
->config_refs
);
2016 nbd_connect_reply(info
, nbd
->index
);
2018 nbd_config_put(nbd
);
2024 static void nbd_disconnect_and_put(struct nbd_device
*nbd
)
2026 mutex_lock(&nbd
->config_lock
);
2027 nbd_disconnect(nbd
);
2030 * Make sure recv thread has finished, we can safely call nbd_clear_que()
2031 * to cancel the inflight I/Os.
2033 flush_workqueue(nbd
->recv_workq
);
2035 nbd
->task_setup
= NULL
;
2036 mutex_unlock(&nbd
->config_lock
);
2038 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF
,
2039 &nbd
->config
->runtime_flags
))
2040 nbd_config_put(nbd
);
2043 static int nbd_genl_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
2045 struct nbd_device
*nbd
;
2048 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
2051 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
2052 printk(KERN_ERR
"nbd: must specify an index to disconnect\n");
2055 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
2056 mutex_lock(&nbd_index_mutex
);
2057 nbd
= idr_find(&nbd_index_idr
, index
);
2059 mutex_unlock(&nbd_index_mutex
);
2060 printk(KERN_ERR
"nbd: couldn't find device at index %d\n",
2064 if (!refcount_inc_not_zero(&nbd
->refs
)) {
2065 mutex_unlock(&nbd_index_mutex
);
2066 printk(KERN_ERR
"nbd: device at index %d is going down\n",
2070 mutex_unlock(&nbd_index_mutex
);
2071 if (!refcount_inc_not_zero(&nbd
->config_refs
))
2073 nbd_disconnect_and_put(nbd
);
2074 nbd_config_put(nbd
);
2080 static int nbd_genl_reconfigure(struct sk_buff
*skb
, struct genl_info
*info
)
2082 struct nbd_device
*nbd
= NULL
;
2083 struct nbd_config
*config
;
2086 bool put_dev
= false;
2088 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
2091 if (!info
->attrs
[NBD_ATTR_INDEX
]) {
2092 printk(KERN_ERR
"nbd: must specify a device to reconfigure\n");
2095 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
2096 mutex_lock(&nbd_index_mutex
);
2097 nbd
= idr_find(&nbd_index_idr
, index
);
2099 mutex_unlock(&nbd_index_mutex
);
2100 printk(KERN_ERR
"nbd: couldn't find a device at index %d\n",
2105 if (info
->attrs
[NBD_ATTR_BACKEND_IDENTIFIER
]) {
2106 if (nla_strcmp(info
->attrs
[NBD_ATTR_BACKEND_IDENTIFIER
],
2108 mutex_unlock(&nbd_index_mutex
);
2109 dev_err(nbd_to_dev(nbd
),
2110 "backend image doesn't match with %s\n",
2115 mutex_unlock(&nbd_index_mutex
);
2116 dev_err(nbd_to_dev(nbd
), "must specify backend\n");
2120 if (!refcount_inc_not_zero(&nbd
->refs
)) {
2121 mutex_unlock(&nbd_index_mutex
);
2122 printk(KERN_ERR
"nbd: device at index %d is going down\n",
2126 mutex_unlock(&nbd_index_mutex
);
2128 if (!refcount_inc_not_zero(&nbd
->config_refs
)) {
2129 dev_err(nbd_to_dev(nbd
),
2130 "not configured, cannot reconfigure\n");
2135 mutex_lock(&nbd
->config_lock
);
2136 config
= nbd
->config
;
2137 if (!test_bit(NBD_RT_BOUND
, &config
->runtime_flags
) ||
2139 dev_err(nbd_to_dev(nbd
),
2140 "not configured, cannot reconfigure\n");
2145 ret
= nbd_genl_size_set(info
, nbd
);
2149 if (info
->attrs
[NBD_ATTR_TIMEOUT
])
2150 nbd_set_cmd_timeout(nbd
,
2151 nla_get_u64(info
->attrs
[NBD_ATTR_TIMEOUT
]));
2152 if (info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]) {
2153 config
->dead_conn_timeout
=
2154 nla_get_u64(info
->attrs
[NBD_ATTR_DEAD_CONN_TIMEOUT
]);
2155 config
->dead_conn_timeout
*= HZ
;
2157 if (info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]) {
2158 u64 flags
= nla_get_u64(info
->attrs
[NBD_ATTR_CLIENT_FLAGS
]);
2159 if (flags
& NBD_CFLAG_DESTROY_ON_DISCONNECT
) {
2160 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT
,
2164 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT
,
2166 refcount_inc(&nbd
->refs
);
2169 if (flags
& NBD_CFLAG_DISCONNECT_ON_CLOSE
) {
2170 set_bit(NBD_RT_DISCONNECT_ON_CLOSE
,
2171 &config
->runtime_flags
);
2173 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE
,
2174 &config
->runtime_flags
);
2178 if (info
->attrs
[NBD_ATTR_SOCKETS
]) {
2179 struct nlattr
*attr
;
2182 nla_for_each_nested(attr
, info
->attrs
[NBD_ATTR_SOCKETS
],
2184 struct nlattr
*socks
[NBD_SOCK_MAX
+1];
2186 if (nla_type(attr
) != NBD_SOCK_ITEM
) {
2187 printk(KERN_ERR
"nbd: socks must be embedded in a SOCK_ITEM attr\n");
2191 ret
= nla_parse_nested_deprecated(socks
, NBD_SOCK_MAX
,
2196 printk(KERN_ERR
"nbd: error processing sock list\n");
2200 if (!socks
[NBD_SOCK_FD
])
2202 fd
= (int)nla_get_u32(socks
[NBD_SOCK_FD
]);
2203 ret
= nbd_reconnect_socket(nbd
, fd
);
2209 dev_info(nbd_to_dev(nbd
), "reconnected socket\n");
2213 mutex_unlock(&nbd
->config_lock
);
2214 nbd_config_put(nbd
);
2221 static const struct genl_small_ops nbd_connect_genl_ops
[] = {
2223 .cmd
= NBD_CMD_CONNECT
,
2224 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2225 .doit
= nbd_genl_connect
,
2228 .cmd
= NBD_CMD_DISCONNECT
,
2229 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2230 .doit
= nbd_genl_disconnect
,
2233 .cmd
= NBD_CMD_RECONFIGURE
,
2234 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2235 .doit
= nbd_genl_reconfigure
,
2238 .cmd
= NBD_CMD_STATUS
,
2239 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2240 .doit
= nbd_genl_status
,
2244 static const struct genl_multicast_group nbd_mcast_grps
[] = {
2245 { .name
= NBD_GENL_MCAST_GROUP_NAME
, },
2248 static struct genl_family nbd_genl_family __ro_after_init
= {
2250 .name
= NBD_GENL_FAMILY_NAME
,
2251 .version
= NBD_GENL_VERSION
,
2252 .module
= THIS_MODULE
,
2253 .small_ops
= nbd_connect_genl_ops
,
2254 .n_small_ops
= ARRAY_SIZE(nbd_connect_genl_ops
),
2255 .maxattr
= NBD_ATTR_MAX
,
2256 .policy
= nbd_attr_policy
,
2257 .mcgrps
= nbd_mcast_grps
,
2258 .n_mcgrps
= ARRAY_SIZE(nbd_mcast_grps
),
2261 static int populate_nbd_status(struct nbd_device
*nbd
, struct sk_buff
*reply
)
2263 struct nlattr
*dev_opt
;
2267 /* This is a little racey, but for status it's ok. The
2268 * reason we don't take a ref here is because we can't
2269 * take a ref in the index == -1 case as we would need
2270 * to put under the nbd_index_mutex, which could
2271 * deadlock if we are configured to remove ourselves
2272 * once we're disconnected.
2274 if (refcount_read(&nbd
->config_refs
))
2276 dev_opt
= nla_nest_start_noflag(reply
, NBD_DEVICE_ITEM
);
2279 ret
= nla_put_u32(reply
, NBD_DEVICE_INDEX
, nbd
->index
);
2282 ret
= nla_put_u8(reply
, NBD_DEVICE_CONNECTED
,
2286 nla_nest_end(reply
, dev_opt
);
2290 static int status_cb(int id
, void *ptr
, void *data
)
2292 struct nbd_device
*nbd
= ptr
;
2293 return populate_nbd_status(nbd
, (struct sk_buff
*)data
);
2296 static int nbd_genl_status(struct sk_buff
*skb
, struct genl_info
*info
)
2298 struct nlattr
*dev_list
;
2299 struct sk_buff
*reply
;
2305 if (info
->attrs
[NBD_ATTR_INDEX
])
2306 index
= nla_get_u32(info
->attrs
[NBD_ATTR_INDEX
]);
2308 mutex_lock(&nbd_index_mutex
);
2310 msg_size
= nla_total_size(nla_attr_size(sizeof(u32
)) +
2311 nla_attr_size(sizeof(u8
)));
2312 msg_size
*= (index
== -1) ? nbd_total_devices
: 1;
2314 reply
= genlmsg_new(msg_size
, GFP_KERNEL
);
2317 reply_head
= genlmsg_put_reply(reply
, info
, &nbd_genl_family
, 0,
2324 dev_list
= nla_nest_start_noflag(reply
, NBD_ATTR_DEVICE_LIST
);
2332 ret
= idr_for_each(&nbd_index_idr
, &status_cb
, reply
);
2338 struct nbd_device
*nbd
;
2339 nbd
= idr_find(&nbd_index_idr
, index
);
2341 ret
= populate_nbd_status(nbd
, reply
);
2348 nla_nest_end(reply
, dev_list
);
2349 genlmsg_end(reply
, reply_head
);
2350 ret
= genlmsg_reply(reply
, info
);
2352 mutex_unlock(&nbd_index_mutex
);
2356 static void nbd_connect_reply(struct genl_info
*info
, int index
)
2358 struct sk_buff
*skb
;
2362 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2365 msg_head
= genlmsg_put_reply(skb
, info
, &nbd_genl_family
, 0,
2371 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2376 genlmsg_end(skb
, msg_head
);
2377 genlmsg_reply(skb
, info
);
2380 static void nbd_mcast_index(int index
)
2382 struct sk_buff
*skb
;
2386 skb
= genlmsg_new(nla_total_size(sizeof(u32
)), GFP_KERNEL
);
2389 msg_head
= genlmsg_put(skb
, 0, 0, &nbd_genl_family
, 0,
2395 ret
= nla_put_u32(skb
, NBD_ATTR_INDEX
, index
);
2400 genlmsg_end(skb
, msg_head
);
2401 genlmsg_multicast(&nbd_genl_family
, skb
, 0, 0, GFP_KERNEL
);
2404 static void nbd_dead_link_work(struct work_struct
*work
)
2406 struct link_dead_args
*args
= container_of(work
, struct link_dead_args
,
2408 nbd_mcast_index(args
->index
);
2412 static int __init
nbd_init(void)
2416 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
2419 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
2425 part_shift
= fls(max_part
);
2428 * Adjust max_part according to part_shift as it is exported
2429 * to user space so that user can know the max number of
2430 * partition kernel should be able to manage.
2432 * Note that -1 is required because partition 0 is reserved
2433 * for the whole disk.
2435 max_part
= (1UL << part_shift
) - 1;
2438 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
2441 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
2444 if (register_blkdev(NBD_MAJOR
, "nbd"))
2447 nbd_del_wq
= alloc_workqueue("nbd-del", WQ_UNBOUND
, 0);
2449 unregister_blkdev(NBD_MAJOR
, "nbd");
2453 if (genl_register_family(&nbd_genl_family
)) {
2454 destroy_workqueue(nbd_del_wq
);
2455 unregister_blkdev(NBD_MAJOR
, "nbd");
2460 for (i
= 0; i
< nbds_max
; i
++)
2465 static int nbd_exit_cb(int id
, void *ptr
, void *data
)
2467 struct list_head
*list
= (struct list_head
*)data
;
2468 struct nbd_device
*nbd
= ptr
;
2470 /* Skip nbd that is being removed asynchronously */
2471 if (refcount_read(&nbd
->refs
))
2472 list_add_tail(&nbd
->list
, list
);
2477 static void __exit
nbd_cleanup(void)
2479 struct nbd_device
*nbd
;
2480 LIST_HEAD(del_list
);
2484 mutex_lock(&nbd_index_mutex
);
2485 idr_for_each(&nbd_index_idr
, &nbd_exit_cb
, &del_list
);
2486 mutex_unlock(&nbd_index_mutex
);
2488 while (!list_empty(&del_list
)) {
2489 nbd
= list_first_entry(&del_list
, struct nbd_device
, list
);
2490 list_del_init(&nbd
->list
);
2491 if (refcount_read(&nbd
->refs
) != 1)
2492 printk(KERN_ERR
"nbd: possibly leaking a device\n");
2496 /* Also wait for nbd_dev_remove_work() completes */
2497 destroy_workqueue(nbd_del_wq
);
2499 idr_destroy(&nbd_index_idr
);
2500 genl_unregister_family(&nbd_genl_family
);
2501 unregister_blkdev(NBD_MAJOR
, "nbd");
2504 module_init(nbd_init
);
2505 module_exit(nbd_cleanup
);
2507 MODULE_DESCRIPTION("Network Block Device");
2508 MODULE_LICENSE("GPL");
2510 module_param(nbds_max
, int, 0444);
2511 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
2512 module_param(max_part
, int, 0444);
2513 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 16)");