2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
44 static DEFINE_IDR(nbd_index_idr
);
45 static DEFINE_MUTEX(nbd_index_mutex
);
52 #define NBD_TIMEDOUT 0
53 #define NBD_DISCONNECT_REQUESTED 1
54 #define NBD_DISCONNECTED 2
59 unsigned long runtime_flags
;
60 struct nbd_sock
**socks
;
63 struct blk_mq_tag_set tag_set
;
65 struct mutex config_lock
;
68 atomic_t recv_threads
;
69 wait_queue_head_t recv_wq
;
73 struct task_struct
*task_recv
;
74 struct task_struct
*task_setup
;
76 #if IS_ENABLED(CONFIG_DEBUG_FS)
77 struct dentry
*dbg_dir
;
82 struct nbd_device
*nbd
;
83 struct completion send_complete
;
86 #if IS_ENABLED(CONFIG_DEBUG_FS)
87 static struct dentry
*nbd_dbg_dir
;
90 #define nbd_name(nbd) ((nbd)->disk->disk_name)
92 #define NBD_MAGIC 0x68797548
94 static unsigned int nbds_max
= 16;
96 static struct workqueue_struct
*recv_workqueue
;
97 static int part_shift
;
99 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
101 return disk_to_dev(nbd
->disk
);
104 static bool nbd_is_connected(struct nbd_device
*nbd
)
106 return !!nbd
->task_recv
;
109 static const char *nbdcmd_to_ascii(int cmd
)
112 case NBD_CMD_READ
: return "read";
113 case NBD_CMD_WRITE
: return "write";
114 case NBD_CMD_DISC
: return "disconnect";
115 case NBD_CMD_FLUSH
: return "flush";
116 case NBD_CMD_TRIM
: return "trim/discard";
121 static int nbd_size_clear(struct nbd_device
*nbd
, struct block_device
*bdev
)
123 bdev
->bd_inode
->i_size
= 0;
124 set_capacity(nbd
->disk
, 0);
125 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
130 static void nbd_size_update(struct nbd_device
*nbd
, struct block_device
*bdev
)
132 if (!nbd_is_connected(nbd
))
135 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
136 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
137 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
140 static int nbd_size_set(struct nbd_device
*nbd
, struct block_device
*bdev
,
141 loff_t blocksize
, loff_t nr_blocks
)
145 ret
= set_blocksize(bdev
, blocksize
);
149 nbd
->blksize
= blocksize
;
150 nbd
->bytesize
= blocksize
* nr_blocks
;
152 nbd_size_update(nbd
, bdev
);
157 static void nbd_end_request(struct nbd_cmd
*cmd
)
159 struct nbd_device
*nbd
= cmd
->nbd
;
160 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
161 int error
= req
->errors
? -EIO
: 0;
163 dev_dbg(nbd_to_dev(nbd
), "request %p: %s\n", cmd
,
164 error
? "failed" : "done");
166 blk_mq_complete_request(req
, error
);
170 * Forcibly shutdown the socket causing all listeners to error
172 static void sock_shutdown(struct nbd_device
*nbd
)
176 if (nbd
->num_connections
== 0)
178 if (test_and_set_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
))
181 for (i
= 0; i
< nbd
->num_connections
; i
++) {
182 struct nbd_sock
*nsock
= nbd
->socks
[i
];
183 mutex_lock(&nsock
->tx_lock
);
184 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
185 mutex_unlock(&nsock
->tx_lock
);
187 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
190 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
193 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
194 struct nbd_device
*nbd
= cmd
->nbd
;
196 dev_err(nbd_to_dev(nbd
), "Connection timed out, shutting down connection\n");
197 set_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
);
200 mutex_lock(&nbd
->config_lock
);
202 mutex_unlock(&nbd
->config_lock
);
203 return BLK_EH_HANDLED
;
207 * Send or receive packet.
209 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
, void *buf
,
210 int size
, int msg_flags
)
212 struct socket
*sock
= nbd
->socks
[index
]->sock
;
216 unsigned long pflags
= current
->flags
;
218 if (unlikely(!sock
)) {
219 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
220 "Attempted %s on closed socket in sock_xmit\n",
221 (send
? "send" : "recv"));
225 current
->flags
|= PF_MEMALLOC
;
227 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
232 msg
.msg_control
= NULL
;
233 msg
.msg_controllen
= 0;
234 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
237 result
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
239 result
= kernel_recvmsg(sock
, &msg
, &iov
, 1, size
,
244 result
= -EPIPE
; /* short read */
251 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
256 static inline int sock_send_bvec(struct nbd_device
*nbd
, int index
,
257 struct bio_vec
*bvec
, int flags
)
260 void *kaddr
= kmap(bvec
->bv_page
);
261 result
= sock_xmit(nbd
, index
, 1, kaddr
+ bvec
->bv_offset
,
262 bvec
->bv_len
, flags
);
263 kunmap(bvec
->bv_page
);
267 /* always call with the tx_lock held */
268 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
270 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
272 struct nbd_request request
;
273 unsigned long size
= blk_rq_bytes(req
);
276 u32 tag
= blk_mq_unique_tag(req
);
278 switch (req_op(req
)) {
283 type
= NBD_CMD_FLUSH
;
286 type
= NBD_CMD_WRITE
;
295 if (rq_data_dir(req
) == WRITE
&&
296 (nbd
->flags
& NBD_FLAG_READ_ONLY
)) {
297 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
298 "Write on read-only\n");
302 memset(&request
, 0, sizeof(request
));
303 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
304 request
.type
= htonl(type
);
305 if (type
!= NBD_CMD_FLUSH
) {
306 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
307 request
.len
= htonl(size
);
309 memcpy(request
.handle
, &tag
, sizeof(tag
));
311 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
312 cmd
, nbdcmd_to_ascii(type
),
313 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
314 result
= sock_xmit(nbd
, index
, 1, &request
, sizeof(request
),
315 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0);
317 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
318 "Send control failed (result %d)\n", result
);
322 if (type
!= NBD_CMD_WRITE
)
327 struct bio
*next
= bio
->bi_next
;
328 struct bvec_iter iter
;
331 bio_for_each_segment(bvec
, bio
, iter
) {
332 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
333 int flags
= is_last
? 0 : MSG_MORE
;
335 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
337 result
= sock_send_bvec(nbd
, index
, &bvec
, flags
);
339 dev_err(disk_to_dev(nbd
->disk
),
340 "Send data failed (result %d)\n",
345 * The completion might already have come in,
346 * so break for the last one instead of letting
347 * the iterator do it. This prevents use-after-free
358 static inline int sock_recv_bvec(struct nbd_device
*nbd
, int index
,
359 struct bio_vec
*bvec
)
362 void *kaddr
= kmap(bvec
->bv_page
);
363 result
= sock_xmit(nbd
, index
, 0, kaddr
+ bvec
->bv_offset
,
364 bvec
->bv_len
, MSG_WAITALL
);
365 kunmap(bvec
->bv_page
);
369 /* NULL returned = something went wrong, inform userspace */
370 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
373 struct nbd_reply reply
;
375 struct request
*req
= NULL
;
380 result
= sock_xmit(nbd
, index
, 0, &reply
, sizeof(reply
), MSG_WAITALL
);
382 if (!test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
) &&
383 !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
384 dev_err(disk_to_dev(nbd
->disk
),
385 "Receive control failed (result %d)\n", result
);
386 return ERR_PTR(result
);
389 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
390 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
391 (unsigned long)ntohl(reply
.magic
));
392 return ERR_PTR(-EPROTO
);
395 memcpy(&tag
, reply
.handle
, sizeof(u32
));
397 hwq
= blk_mq_unique_tag_to_hwq(tag
);
398 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
399 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
400 blk_mq_unique_tag_to_tag(tag
));
401 if (!req
|| !blk_mq_request_started(req
)) {
402 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
404 return ERR_PTR(-ENOENT
);
406 cmd
= blk_mq_rq_to_pdu(req
);
407 if (ntohl(reply
.error
)) {
408 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
414 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", cmd
);
415 if (rq_data_dir(req
) != WRITE
) {
416 struct req_iterator iter
;
419 rq_for_each_segment(bvec
, req
, iter
) {
420 result
= sock_recv_bvec(nbd
, index
, &bvec
);
422 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
427 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
431 /* See the comment in nbd_queue_rq. */
432 wait_for_completion(&cmd
->send_complete
);
437 static ssize_t
pid_show(struct device
*dev
,
438 struct device_attribute
*attr
, char *buf
)
440 struct gendisk
*disk
= dev_to_disk(dev
);
441 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
443 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
446 static struct device_attribute pid_attr
= {
447 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
451 struct recv_thread_args
{
452 struct work_struct work
;
453 struct nbd_device
*nbd
;
457 static void recv_work(struct work_struct
*work
)
459 struct recv_thread_args
*args
= container_of(work
,
460 struct recv_thread_args
,
462 struct nbd_device
*nbd
= args
->nbd
;
466 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
468 cmd
= nbd_read_stat(nbd
, args
->index
);
474 nbd_end_request(cmd
);
478 * We got an error, shut everybody down if this wasn't the result of a
479 * disconnect request.
481 if (ret
&& !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
483 atomic_dec(&nbd
->recv_threads
);
484 wake_up(&nbd
->recv_wq
);
487 static void nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
491 if (!blk_mq_request_started(req
))
493 cmd
= blk_mq_rq_to_pdu(req
);
495 nbd_end_request(cmd
);
498 static void nbd_clear_que(struct nbd_device
*nbd
)
500 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
502 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
503 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
507 static void nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
509 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
510 struct nbd_device
*nbd
= cmd
->nbd
;
511 struct nbd_sock
*nsock
;
513 if (index
>= nbd
->num_connections
) {
514 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
515 "Attempted send on invalid socket\n");
519 if (test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
)) {
520 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
521 "Attempted send on closed socket\n");
527 nsock
= nbd
->socks
[index
];
528 mutex_lock(&nsock
->tx_lock
);
529 if (unlikely(!nsock
->sock
)) {
530 mutex_unlock(&nsock
->tx_lock
);
531 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
532 "Attempted send on closed socket\n");
536 if (nbd_send_cmd(nbd
, cmd
, index
) != 0) {
537 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
538 "Request send failed\n");
540 nbd_end_request(cmd
);
543 mutex_unlock(&nsock
->tx_lock
);
549 nbd_end_request(cmd
);
552 static int nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
553 const struct blk_mq_queue_data
*bd
)
555 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
558 * Since we look at the bio's to send the request over the network we
559 * need to make sure the completion work doesn't mark this request done
560 * before we are done doing our send. This keeps us from dereferencing
561 * freed data if we have particularly fast completions (ie we get the
562 * completion before we exit sock_xmit on the last bvec) or in the case
563 * that the server is misbehaving (or there was an error) before we're
564 * done sending everything over the wire.
566 init_completion(&cmd
->send_complete
);
567 blk_mq_start_request(bd
->rq
);
568 nbd_handle_cmd(cmd
, hctx
->queue_num
);
569 complete(&cmd
->send_complete
);
571 return BLK_MQ_RQ_QUEUE_OK
;
574 static int nbd_add_socket(struct nbd_device
*nbd
, struct socket
*sock
)
576 struct nbd_sock
**socks
;
577 struct nbd_sock
*nsock
;
579 if (!nbd
->task_setup
)
580 nbd
->task_setup
= current
;
581 if (nbd
->task_setup
!= current
) {
582 dev_err(disk_to_dev(nbd
->disk
),
583 "Device being setup by another task");
587 socks
= krealloc(nbd
->socks
, (nbd
->num_connections
+ 1) *
588 sizeof(struct nbd_sock
*), GFP_KERNEL
);
591 nsock
= kzalloc(sizeof(struct nbd_sock
), GFP_KERNEL
);
597 mutex_init(&nsock
->tx_lock
);
599 socks
[nbd
->num_connections
++] = nsock
;
604 /* Reset all properties of an NBD device */
605 static void nbd_reset(struct nbd_device
*nbd
)
609 for (i
= 0; i
< nbd
->num_connections
; i
++)
610 kfree(nbd
->socks
[i
]);
613 nbd
->runtime_flags
= 0;
616 set_capacity(nbd
->disk
, 0);
618 nbd
->tag_set
.timeout
= 0;
619 nbd
->num_connections
= 0;
620 nbd
->task_setup
= NULL
;
621 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
624 static void nbd_bdev_reset(struct block_device
*bdev
)
626 set_device_ro(bdev
, false);
627 bdev
->bd_inode
->i_size
= 0;
629 blkdev_reread_part(bdev
);
630 bdev
->bd_invalidated
= 1;
634 static void nbd_parse_flags(struct nbd_device
*nbd
, struct block_device
*bdev
)
636 if (nbd
->flags
& NBD_FLAG_READ_ONLY
)
637 set_device_ro(bdev
, true);
638 if (nbd
->flags
& NBD_FLAG_SEND_TRIM
)
639 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
640 if (nbd
->flags
& NBD_FLAG_SEND_FLUSH
)
641 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
643 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
646 static void send_disconnects(struct nbd_device
*nbd
)
648 struct nbd_request request
= {};
651 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
652 request
.type
= htonl(NBD_CMD_DISC
);
654 for (i
= 0; i
< nbd
->num_connections
; i
++) {
655 ret
= sock_xmit(nbd
, i
, 1, &request
, sizeof(request
), 0);
657 dev_err(disk_to_dev(nbd
->disk
),
658 "Send disconnect failed %d\n", ret
);
662 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
663 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
665 /* Must be called with config_lock held */
666 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
667 unsigned int cmd
, unsigned long arg
)
670 case NBD_DISCONNECT
: {
671 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
675 mutex_unlock(&nbd
->config_lock
);
677 mutex_lock(&nbd
->config_lock
);
679 /* Check again after getting mutex back. */
683 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED
,
684 &nbd
->runtime_flags
))
685 send_disconnects(nbd
);
693 nbd_bdev_reset(bdev
);
695 * We want to give the run thread a chance to wait for everybody
696 * to clean up and then do it's own cleanup.
698 if (!test_bit(NBD_RUNNING
, &nbd
->runtime_flags
)) {
701 for (i
= 0; i
< nbd
->num_connections
; i
++)
702 kfree(nbd
->socks
[i
]);
705 nbd
->num_connections
= 0;
706 nbd
->task_setup
= NULL
;
712 struct socket
*sock
= sockfd_lookup(arg
, &err
);
717 err
= nbd_add_socket(nbd
, sock
);
718 if (!err
&& max_part
)
719 bdev
->bd_invalidated
= 1;
724 case NBD_SET_BLKSIZE
: {
725 loff_t bsize
= div_s64(nbd
->bytesize
, arg
);
727 return nbd_size_set(nbd
, bdev
, arg
, bsize
);
731 return nbd_size_set(nbd
, bdev
, nbd
->blksize
,
732 div_s64(arg
, nbd
->blksize
));
734 case NBD_SET_SIZE_BLOCKS
:
735 return nbd_size_set(nbd
, bdev
, nbd
->blksize
, arg
);
737 case NBD_SET_TIMEOUT
:
738 nbd
->tag_set
.timeout
= arg
* HZ
;
746 struct recv_thread_args
*args
;
747 int num_connections
= nbd
->num_connections
;
754 if (num_connections
> 1 &&
755 !(nbd
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
756 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
761 set_bit(NBD_RUNNING
, &nbd
->runtime_flags
);
762 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, nbd
->num_connections
);
763 args
= kcalloc(num_connections
, sizeof(*args
), GFP_KERNEL
);
768 nbd
->task_recv
= current
;
769 mutex_unlock(&nbd
->config_lock
);
771 nbd_parse_flags(nbd
, bdev
);
773 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
775 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
779 nbd_size_update(nbd
, bdev
);
781 nbd_dev_dbg_init(nbd
);
782 for (i
= 0; i
< num_connections
; i
++) {
783 sk_set_memalloc(nbd
->socks
[i
]->sock
->sk
);
784 atomic_inc(&nbd
->recv_threads
);
785 INIT_WORK(&args
[i
].work
, recv_work
);
788 queue_work(recv_workqueue
, &args
[i
].work
);
790 wait_event_interruptible(nbd
->recv_wq
,
791 atomic_read(&nbd
->recv_threads
) == 0);
792 for (i
= 0; i
< num_connections
; i
++)
793 flush_work(&args
[i
].work
);
794 nbd_dev_dbg_close(nbd
);
795 nbd_size_clear(nbd
, bdev
);
796 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
798 mutex_lock(&nbd
->config_lock
);
799 nbd
->task_recv
= NULL
;
804 nbd_bdev_reset(bdev
);
806 /* user requested, ignore socket errors */
807 if (test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
809 if (test_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
))
818 * This is for compatibility only. The queue is always cleared
819 * by NBD_DO_IT or NBD_CLEAR_SOCK.
823 case NBD_PRINT_DEBUG
:
825 * For compatibility only, we no longer keep a list of
826 * outstanding requests.
833 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
834 unsigned int cmd
, unsigned long arg
)
836 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
839 if (!capable(CAP_SYS_ADMIN
))
842 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
844 mutex_lock(&nbd
->config_lock
);
845 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
846 mutex_unlock(&nbd
->config_lock
);
851 static const struct block_device_operations nbd_fops
=
853 .owner
= THIS_MODULE
,
855 .compat_ioctl
= nbd_ioctl
,
858 #if IS_ENABLED(CONFIG_DEBUG_FS)
860 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
862 struct nbd_device
*nbd
= s
->private;
865 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
870 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
872 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
875 static const struct file_operations nbd_dbg_tasks_ops
= {
876 .open
= nbd_dbg_tasks_open
,
879 .release
= single_release
,
882 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
884 struct nbd_device
*nbd
= s
->private;
885 u32 flags
= nbd
->flags
;
887 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
889 seq_puts(s
, "Known flags:\n");
891 if (flags
& NBD_FLAG_HAS_FLAGS
)
892 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
893 if (flags
& NBD_FLAG_READ_ONLY
)
894 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
895 if (flags
& NBD_FLAG_SEND_FLUSH
)
896 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
897 if (flags
& NBD_FLAG_SEND_TRIM
)
898 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
903 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
905 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
908 static const struct file_operations nbd_dbg_flags_ops
= {
909 .open
= nbd_dbg_flags_open
,
912 .release
= single_release
,
915 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
922 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
924 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
930 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
931 debugfs_create_u64("size_bytes", 0444, dir
, &nbd
->bytesize
);
932 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
933 debugfs_create_u64("blocksize", 0444, dir
, &nbd
->blksize
);
934 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
939 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
941 debugfs_remove_recursive(nbd
->dbg_dir
);
944 static int nbd_dbg_init(void)
946 struct dentry
*dbg_dir
;
948 dbg_dir
= debugfs_create_dir("nbd", NULL
);
952 nbd_dbg_dir
= dbg_dir
;
957 static void nbd_dbg_close(void)
959 debugfs_remove_recursive(nbd_dbg_dir
);
962 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
964 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
969 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
973 static int nbd_dbg_init(void)
978 static void nbd_dbg_close(void)
984 static int nbd_init_request(void *data
, struct request
*rq
,
985 unsigned int hctx_idx
, unsigned int request_idx
,
986 unsigned int numa_node
)
988 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
993 static struct blk_mq_ops nbd_mq_ops
= {
994 .queue_rq
= nbd_queue_rq
,
995 .init_request
= nbd_init_request
,
996 .timeout
= nbd_xmit_timeout
,
999 static void nbd_dev_remove(struct nbd_device
*nbd
)
1001 struct gendisk
*disk
= nbd
->disk
;
1005 blk_cleanup_queue(disk
->queue
);
1006 blk_mq_free_tag_set(&nbd
->tag_set
);
1012 static int nbd_dev_add(int index
)
1014 struct nbd_device
*nbd
;
1015 struct gendisk
*disk
;
1016 struct request_queue
*q
;
1019 nbd
= kzalloc(sizeof(struct nbd_device
), GFP_KERNEL
);
1023 disk
= alloc_disk(1 << part_shift
);
1028 err
= idr_alloc(&nbd_index_idr
, nbd
, index
, index
+ 1,
1033 err
= idr_alloc(&nbd_index_idr
, nbd
, 0, 0, GFP_KERNEL
);
1041 nbd
->tag_set
.ops
= &nbd_mq_ops
;
1042 nbd
->tag_set
.nr_hw_queues
= 1;
1043 nbd
->tag_set
.queue_depth
= 128;
1044 nbd
->tag_set
.numa_node
= NUMA_NO_NODE
;
1045 nbd
->tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1046 nbd
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1047 BLK_MQ_F_SG_MERGE
| BLK_MQ_F_BLOCKING
;
1048 nbd
->tag_set
.driver_data
= nbd
;
1050 err
= blk_mq_alloc_tag_set(&nbd
->tag_set
);
1054 q
= blk_mq_init_queue(&nbd
->tag_set
);
1062 * Tell the block layer that we are not a rotational device
1064 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1065 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1066 disk
->queue
->limits
.discard_granularity
= 512;
1067 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1068 disk
->queue
->limits
.discard_zeroes_data
= 0;
1069 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1070 disk
->queue
->limits
.max_sectors
= 256;
1072 nbd
->magic
= NBD_MAGIC
;
1073 mutex_init(&nbd
->config_lock
);
1074 disk
->major
= NBD_MAJOR
;
1075 disk
->first_minor
= index
<< part_shift
;
1076 disk
->fops
= &nbd_fops
;
1077 disk
->private_data
= nbd
;
1078 sprintf(disk
->disk_name
, "nbd%d", index
);
1079 init_waitqueue_head(&nbd
->recv_wq
);
1085 blk_mq_free_tag_set(&nbd
->tag_set
);
1087 idr_remove(&nbd_index_idr
, index
);
1097 * And here should be modules and kernel interface
1098 * (Just smiley confuses emacs :-)
1101 static int __init
nbd_init(void)
1105 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
1108 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
1114 part_shift
= fls(max_part
);
1117 * Adjust max_part according to part_shift as it is exported
1118 * to user space so that user can know the max number of
1119 * partition kernel should be able to manage.
1121 * Note that -1 is required because partition 0 is reserved
1122 * for the whole disk.
1124 max_part
= (1UL << part_shift
) - 1;
1127 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
1130 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
1132 recv_workqueue
= alloc_workqueue("knbd-recv",
1133 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
1134 if (!recv_workqueue
)
1137 if (register_blkdev(NBD_MAJOR
, "nbd"))
1142 mutex_lock(&nbd_index_mutex
);
1143 for (i
= 0; i
< nbds_max
; i
++)
1145 mutex_unlock(&nbd_index_mutex
);
1149 static int nbd_exit_cb(int id
, void *ptr
, void *data
)
1151 struct nbd_device
*nbd
= ptr
;
1152 nbd_dev_remove(nbd
);
1156 static void __exit
nbd_cleanup(void)
1160 idr_for_each(&nbd_index_idr
, &nbd_exit_cb
, NULL
);
1161 idr_destroy(&nbd_index_idr
);
1162 destroy_workqueue(recv_workqueue
);
1163 unregister_blkdev(NBD_MAJOR
, "nbd");
1166 module_init(nbd_init
);
1167 module_exit(nbd_cleanup
);
1169 MODULE_DESCRIPTION("Network Block Device");
1170 MODULE_LICENSE("GPL");
1172 module_param(nbds_max
, int, 0444);
1173 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
1174 module_param(max_part
, int, 0444);
1175 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");