2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
49 #define NBD_TIMEDOUT 0
50 #define NBD_DISCONNECT_REQUESTED 1
51 #define NBD_DISCONNECTED 2
56 unsigned long runtime_flags
;
57 struct nbd_sock
**socks
;
60 struct blk_mq_tag_set tag_set
;
62 struct mutex config_lock
;
65 atomic_t recv_threads
;
66 wait_queue_head_t recv_wq
;
70 struct task_struct
*task_recv
;
71 struct task_struct
*task_setup
;
73 #if IS_ENABLED(CONFIG_DEBUG_FS)
74 struct dentry
*dbg_dir
;
79 struct nbd_device
*nbd
;
80 struct completion send_complete
;
83 #if IS_ENABLED(CONFIG_DEBUG_FS)
84 static struct dentry
*nbd_dbg_dir
;
87 #define nbd_name(nbd) ((nbd)->disk->disk_name)
89 #define NBD_MAGIC 0x68797548
91 static unsigned int nbds_max
= 16;
92 static struct nbd_device
*nbd_dev
;
95 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
97 return disk_to_dev(nbd
->disk
);
100 static bool nbd_is_connected(struct nbd_device
*nbd
)
102 return !!nbd
->task_recv
;
105 static const char *nbdcmd_to_ascii(int cmd
)
108 case NBD_CMD_READ
: return "read";
109 case NBD_CMD_WRITE
: return "write";
110 case NBD_CMD_DISC
: return "disconnect";
111 case NBD_CMD_FLUSH
: return "flush";
112 case NBD_CMD_TRIM
: return "trim/discard";
117 static int nbd_size_clear(struct nbd_device
*nbd
, struct block_device
*bdev
)
119 bdev
->bd_inode
->i_size
= 0;
120 set_capacity(nbd
->disk
, 0);
121 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
126 static void nbd_size_update(struct nbd_device
*nbd
, struct block_device
*bdev
)
128 if (!nbd_is_connected(nbd
))
131 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
132 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
133 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
136 static int nbd_size_set(struct nbd_device
*nbd
, struct block_device
*bdev
,
137 loff_t blocksize
, loff_t nr_blocks
)
141 ret
= set_blocksize(bdev
, blocksize
);
145 nbd
->blksize
= blocksize
;
146 nbd
->bytesize
= blocksize
* nr_blocks
;
148 nbd_size_update(nbd
, bdev
);
153 static void nbd_end_request(struct nbd_cmd
*cmd
)
155 struct nbd_device
*nbd
= cmd
->nbd
;
156 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
157 int error
= req
->errors
? -EIO
: 0;
159 dev_dbg(nbd_to_dev(nbd
), "request %p: %s\n", cmd
,
160 error
? "failed" : "done");
162 blk_mq_complete_request(req
, error
);
166 * Forcibly shutdown the socket causing all listeners to error
168 static void sock_shutdown(struct nbd_device
*nbd
)
172 if (nbd
->num_connections
== 0)
174 if (test_and_set_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
))
177 for (i
= 0; i
< nbd
->num_connections
; i
++) {
178 struct nbd_sock
*nsock
= nbd
->socks
[i
];
179 mutex_lock(&nsock
->tx_lock
);
180 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
181 mutex_unlock(&nsock
->tx_lock
);
183 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
186 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
189 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
190 struct nbd_device
*nbd
= cmd
->nbd
;
192 dev_err(nbd_to_dev(nbd
), "Connection timed out, shutting down connection\n");
193 set_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
);
196 mutex_lock(&nbd
->config_lock
);
198 mutex_unlock(&nbd
->config_lock
);
199 return BLK_EH_HANDLED
;
203 * Send or receive packet.
205 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
, void *buf
,
206 int size
, int msg_flags
)
208 struct socket
*sock
= nbd
->socks
[index
]->sock
;
212 unsigned long pflags
= current
->flags
;
214 if (unlikely(!sock
)) {
215 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
216 "Attempted %s on closed socket in sock_xmit\n",
217 (send
? "send" : "recv"));
221 current
->flags
|= PF_MEMALLOC
;
223 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
228 msg
.msg_control
= NULL
;
229 msg
.msg_controllen
= 0;
230 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
233 result
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
235 result
= kernel_recvmsg(sock
, &msg
, &iov
, 1, size
,
240 result
= -EPIPE
; /* short read */
247 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
252 static inline int sock_send_bvec(struct nbd_device
*nbd
, int index
,
253 struct bio_vec
*bvec
, int flags
)
256 void *kaddr
= kmap(bvec
->bv_page
);
257 result
= sock_xmit(nbd
, index
, 1, kaddr
+ bvec
->bv_offset
,
258 bvec
->bv_len
, flags
);
259 kunmap(bvec
->bv_page
);
263 /* always call with the tx_lock held */
264 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
266 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
268 struct nbd_request request
;
269 unsigned long size
= blk_rq_bytes(req
);
272 u32 tag
= blk_mq_unique_tag(req
);
274 switch (req_op(req
)) {
279 type
= NBD_CMD_FLUSH
;
282 type
= NBD_CMD_WRITE
;
291 if (rq_data_dir(req
) == WRITE
&&
292 (nbd
->flags
& NBD_FLAG_READ_ONLY
)) {
293 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
294 "Write on read-only\n");
298 memset(&request
, 0, sizeof(request
));
299 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
300 request
.type
= htonl(type
);
301 if (type
!= NBD_CMD_FLUSH
) {
302 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
303 request
.len
= htonl(size
);
305 memcpy(request
.handle
, &tag
, sizeof(tag
));
307 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
308 cmd
, nbdcmd_to_ascii(type
),
309 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
310 result
= sock_xmit(nbd
, index
, 1, &request
, sizeof(request
),
311 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0);
313 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
314 "Send control failed (result %d)\n", result
);
318 if (type
!= NBD_CMD_WRITE
)
323 struct bio
*next
= bio
->bi_next
;
324 struct bvec_iter iter
;
327 bio_for_each_segment(bvec
, bio
, iter
) {
328 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
329 int flags
= is_last
? 0 : MSG_MORE
;
331 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
333 result
= sock_send_bvec(nbd
, index
, &bvec
, flags
);
335 dev_err(disk_to_dev(nbd
->disk
),
336 "Send data failed (result %d)\n",
341 * The completion might already have come in,
342 * so break for the last one instead of letting
343 * the iterator do it. This prevents use-after-free
354 static inline int sock_recv_bvec(struct nbd_device
*nbd
, int index
,
355 struct bio_vec
*bvec
)
358 void *kaddr
= kmap(bvec
->bv_page
);
359 result
= sock_xmit(nbd
, index
, 0, kaddr
+ bvec
->bv_offset
,
360 bvec
->bv_len
, MSG_WAITALL
);
361 kunmap(bvec
->bv_page
);
365 /* NULL returned = something went wrong, inform userspace */
366 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
369 struct nbd_reply reply
;
371 struct request
*req
= NULL
;
376 result
= sock_xmit(nbd
, index
, 0, &reply
, sizeof(reply
), MSG_WAITALL
);
378 if (!test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
) &&
379 !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
380 dev_err(disk_to_dev(nbd
->disk
),
381 "Receive control failed (result %d)\n", result
);
382 return ERR_PTR(result
);
385 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
386 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
387 (unsigned long)ntohl(reply
.magic
));
388 return ERR_PTR(-EPROTO
);
391 memcpy(&tag
, reply
.handle
, sizeof(u32
));
393 hwq
= blk_mq_unique_tag_to_hwq(tag
);
394 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
395 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
396 blk_mq_unique_tag_to_tag(tag
));
397 if (!req
|| !blk_mq_request_started(req
)) {
398 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
400 return ERR_PTR(-ENOENT
);
402 cmd
= blk_mq_rq_to_pdu(req
);
403 if (ntohl(reply
.error
)) {
404 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
410 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", cmd
);
411 if (rq_data_dir(req
) != WRITE
) {
412 struct req_iterator iter
;
415 rq_for_each_segment(bvec
, req
, iter
) {
416 result
= sock_recv_bvec(nbd
, index
, &bvec
);
418 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
423 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
427 /* See the comment in nbd_queue_rq. */
428 wait_for_completion(&cmd
->send_complete
);
433 static ssize_t
pid_show(struct device
*dev
,
434 struct device_attribute
*attr
, char *buf
)
436 struct gendisk
*disk
= dev_to_disk(dev
);
437 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
439 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
442 static struct device_attribute pid_attr
= {
443 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
447 struct recv_thread_args
{
448 struct work_struct work
;
449 struct nbd_device
*nbd
;
453 static void recv_work(struct work_struct
*work
)
455 struct recv_thread_args
*args
= container_of(work
,
456 struct recv_thread_args
,
458 struct nbd_device
*nbd
= args
->nbd
;
462 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
464 cmd
= nbd_read_stat(nbd
, args
->index
);
470 nbd_end_request(cmd
);
474 * We got an error, shut everybody down if this wasn't the result of a
475 * disconnect request.
477 if (ret
&& !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
479 atomic_dec(&nbd
->recv_threads
);
480 wake_up(&nbd
->recv_wq
);
483 static void nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
487 if (!blk_mq_request_started(req
))
489 cmd
= blk_mq_rq_to_pdu(req
);
491 nbd_end_request(cmd
);
494 static void nbd_clear_que(struct nbd_device
*nbd
)
496 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
498 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
499 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
503 static void nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
505 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
506 struct nbd_device
*nbd
= cmd
->nbd
;
507 struct nbd_sock
*nsock
;
509 if (index
>= nbd
->num_connections
) {
510 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
511 "Attempted send on invalid socket\n");
515 if (test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
)) {
516 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
517 "Attempted send on closed socket\n");
523 nsock
= nbd
->socks
[index
];
524 mutex_lock(&nsock
->tx_lock
);
525 if (unlikely(!nsock
->sock
)) {
526 mutex_unlock(&nsock
->tx_lock
);
527 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
528 "Attempted send on closed socket\n");
532 if (nbd_send_cmd(nbd
, cmd
, index
) != 0) {
533 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
534 "Request send failed\n");
536 nbd_end_request(cmd
);
539 mutex_unlock(&nsock
->tx_lock
);
545 nbd_end_request(cmd
);
548 static int nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
549 const struct blk_mq_queue_data
*bd
)
551 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
554 * Since we look at the bio's to send the request over the network we
555 * need to make sure the completion work doesn't mark this request done
556 * before we are done doing our send. This keeps us from dereferencing
557 * freed data if we have particularly fast completions (ie we get the
558 * completion before we exit sock_xmit on the last bvec) or in the case
559 * that the server is misbehaving (or there was an error) before we're
560 * done sending everything over the wire.
562 init_completion(&cmd
->send_complete
);
563 blk_mq_start_request(bd
->rq
);
564 nbd_handle_cmd(cmd
, hctx
->queue_num
);
565 complete(&cmd
->send_complete
);
567 return BLK_MQ_RQ_QUEUE_OK
;
570 static int nbd_add_socket(struct nbd_device
*nbd
, struct socket
*sock
)
572 struct nbd_sock
**socks
;
573 struct nbd_sock
*nsock
;
575 if (!nbd
->task_setup
)
576 nbd
->task_setup
= current
;
577 if (nbd
->task_setup
!= current
) {
578 dev_err(disk_to_dev(nbd
->disk
),
579 "Device being setup by another task");
583 socks
= krealloc(nbd
->socks
, (nbd
->num_connections
+ 1) *
584 sizeof(struct nbd_sock
*), GFP_KERNEL
);
587 nsock
= kzalloc(sizeof(struct nbd_sock
), GFP_KERNEL
);
593 mutex_init(&nsock
->tx_lock
);
595 socks
[nbd
->num_connections
++] = nsock
;
600 /* Reset all properties of an NBD device */
601 static void nbd_reset(struct nbd_device
*nbd
)
605 for (i
= 0; i
< nbd
->num_connections
; i
++)
606 kfree(nbd
->socks
[i
]);
609 nbd
->runtime_flags
= 0;
612 set_capacity(nbd
->disk
, 0);
614 nbd
->tag_set
.timeout
= 0;
615 nbd
->num_connections
= 0;
616 nbd
->task_setup
= NULL
;
617 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
620 static void nbd_bdev_reset(struct block_device
*bdev
)
622 set_device_ro(bdev
, false);
623 bdev
->bd_inode
->i_size
= 0;
625 blkdev_reread_part(bdev
);
626 bdev
->bd_invalidated
= 1;
630 static void nbd_parse_flags(struct nbd_device
*nbd
, struct block_device
*bdev
)
632 if (nbd
->flags
& NBD_FLAG_READ_ONLY
)
633 set_device_ro(bdev
, true);
634 if (nbd
->flags
& NBD_FLAG_SEND_TRIM
)
635 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
636 if (nbd
->flags
& NBD_FLAG_SEND_FLUSH
)
637 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
639 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
642 static void send_disconnects(struct nbd_device
*nbd
)
644 struct nbd_request request
= {};
647 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
648 request
.type
= htonl(NBD_CMD_DISC
);
650 for (i
= 0; i
< nbd
->num_connections
; i
++) {
651 ret
= sock_xmit(nbd
, i
, 1, &request
, sizeof(request
), 0);
653 dev_err(disk_to_dev(nbd
->disk
),
654 "Send disconnect failed %d\n", ret
);
658 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
659 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
661 /* Must be called with config_lock held */
662 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
663 unsigned int cmd
, unsigned long arg
)
666 case NBD_DISCONNECT
: {
667 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
671 mutex_unlock(&nbd
->config_lock
);
673 mutex_lock(&nbd
->config_lock
);
675 /* Check again after getting mutex back. */
679 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED
,
680 &nbd
->runtime_flags
))
681 send_disconnects(nbd
);
689 nbd_bdev_reset(bdev
);
691 * We want to give the run thread a chance to wait for everybody
692 * to clean up and then do it's own cleanup.
694 if (!test_bit(NBD_RUNNING
, &nbd
->runtime_flags
)) {
697 for (i
= 0; i
< nbd
->num_connections
; i
++)
698 kfree(nbd
->socks
[i
]);
701 nbd
->num_connections
= 0;
702 nbd
->task_setup
= NULL
;
708 struct socket
*sock
= sockfd_lookup(arg
, &err
);
713 err
= nbd_add_socket(nbd
, sock
);
714 if (!err
&& max_part
)
715 bdev
->bd_invalidated
= 1;
720 case NBD_SET_BLKSIZE
: {
721 loff_t bsize
= div_s64(nbd
->bytesize
, arg
);
723 return nbd_size_set(nbd
, bdev
, arg
, bsize
);
727 return nbd_size_set(nbd
, bdev
, nbd
->blksize
,
728 div_s64(arg
, nbd
->blksize
));
730 case NBD_SET_SIZE_BLOCKS
:
731 return nbd_size_set(nbd
, bdev
, nbd
->blksize
, arg
);
733 case NBD_SET_TIMEOUT
:
734 nbd
->tag_set
.timeout
= arg
* HZ
;
742 struct recv_thread_args
*args
;
743 int num_connections
= nbd
->num_connections
;
750 if (num_connections
> 1 &&
751 !(nbd
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
752 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
757 set_bit(NBD_RUNNING
, &nbd
->runtime_flags
);
758 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, nbd
->num_connections
);
759 args
= kcalloc(num_connections
, sizeof(*args
), GFP_KERNEL
);
764 nbd
->task_recv
= current
;
765 mutex_unlock(&nbd
->config_lock
);
767 nbd_parse_flags(nbd
, bdev
);
769 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
771 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
775 nbd_size_update(nbd
, bdev
);
777 nbd_dev_dbg_init(nbd
);
778 for (i
= 0; i
< num_connections
; i
++) {
779 sk_set_memalloc(nbd
->socks
[i
]->sock
->sk
);
780 atomic_inc(&nbd
->recv_threads
);
781 INIT_WORK(&args
[i
].work
, recv_work
);
784 queue_work(system_long_wq
, &args
[i
].work
);
786 wait_event_interruptible(nbd
->recv_wq
,
787 atomic_read(&nbd
->recv_threads
) == 0);
788 for (i
= 0; i
< num_connections
; i
++)
789 flush_work(&args
[i
].work
);
790 nbd_dev_dbg_close(nbd
);
791 nbd_size_clear(nbd
, bdev
);
792 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
794 mutex_lock(&nbd
->config_lock
);
795 nbd
->task_recv
= NULL
;
800 nbd_bdev_reset(bdev
);
802 /* user requested, ignore socket errors */
803 if (test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
805 if (test_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
))
814 * This is for compatibility only. The queue is always cleared
815 * by NBD_DO_IT or NBD_CLEAR_SOCK.
819 case NBD_PRINT_DEBUG
:
821 * For compatibility only, we no longer keep a list of
822 * outstanding requests.
829 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
830 unsigned int cmd
, unsigned long arg
)
832 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
835 if (!capable(CAP_SYS_ADMIN
))
838 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
840 mutex_lock(&nbd
->config_lock
);
841 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
842 mutex_unlock(&nbd
->config_lock
);
847 static const struct block_device_operations nbd_fops
=
849 .owner
= THIS_MODULE
,
851 .compat_ioctl
= nbd_ioctl
,
854 #if IS_ENABLED(CONFIG_DEBUG_FS)
856 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
858 struct nbd_device
*nbd
= s
->private;
861 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
866 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
868 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
871 static const struct file_operations nbd_dbg_tasks_ops
= {
872 .open
= nbd_dbg_tasks_open
,
875 .release
= single_release
,
878 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
880 struct nbd_device
*nbd
= s
->private;
881 u32 flags
= nbd
->flags
;
883 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
885 seq_puts(s
, "Known flags:\n");
887 if (flags
& NBD_FLAG_HAS_FLAGS
)
888 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
889 if (flags
& NBD_FLAG_READ_ONLY
)
890 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
891 if (flags
& NBD_FLAG_SEND_FLUSH
)
892 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
893 if (flags
& NBD_FLAG_SEND_TRIM
)
894 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
899 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
901 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
904 static const struct file_operations nbd_dbg_flags_ops
= {
905 .open
= nbd_dbg_flags_open
,
908 .release
= single_release
,
911 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
918 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
920 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
926 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
927 debugfs_create_u64("size_bytes", 0444, dir
, &nbd
->bytesize
);
928 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
929 debugfs_create_u64("blocksize", 0444, dir
, &nbd
->blksize
);
930 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
935 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
937 debugfs_remove_recursive(nbd
->dbg_dir
);
940 static int nbd_dbg_init(void)
942 struct dentry
*dbg_dir
;
944 dbg_dir
= debugfs_create_dir("nbd", NULL
);
948 nbd_dbg_dir
= dbg_dir
;
953 static void nbd_dbg_close(void)
955 debugfs_remove_recursive(nbd_dbg_dir
);
958 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
960 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
965 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
969 static int nbd_dbg_init(void)
974 static void nbd_dbg_close(void)
980 static int nbd_init_request(void *data
, struct request
*rq
,
981 unsigned int hctx_idx
, unsigned int request_idx
,
982 unsigned int numa_node
)
984 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
989 static struct blk_mq_ops nbd_mq_ops
= {
990 .queue_rq
= nbd_queue_rq
,
991 .init_request
= nbd_init_request
,
992 .timeout
= nbd_xmit_timeout
,
996 * And here should be modules and kernel interface
997 * (Just smiley confuses emacs :-)
1000 static int __init
nbd_init(void)
1006 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
1009 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
1015 part_shift
= fls(max_part
);
1018 * Adjust max_part according to part_shift as it is exported
1019 * to user space so that user can know the max number of
1020 * partition kernel should be able to manage.
1022 * Note that -1 is required because partition 0 is reserved
1023 * for the whole disk.
1025 max_part
= (1UL << part_shift
) - 1;
1028 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
1031 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
1034 nbd_dev
= kcalloc(nbds_max
, sizeof(*nbd_dev
), GFP_KERNEL
);
1038 for (i
= 0; i
< nbds_max
; i
++) {
1039 struct request_queue
*q
;
1040 struct gendisk
*disk
= alloc_disk(1 << part_shift
);
1043 nbd_dev
[i
].disk
= disk
;
1045 nbd_dev
[i
].tag_set
.ops
= &nbd_mq_ops
;
1046 nbd_dev
[i
].tag_set
.nr_hw_queues
= 1;
1047 nbd_dev
[i
].tag_set
.queue_depth
= 128;
1048 nbd_dev
[i
].tag_set
.numa_node
= NUMA_NO_NODE
;
1049 nbd_dev
[i
].tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1050 nbd_dev
[i
].tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1051 BLK_MQ_F_SG_MERGE
| BLK_MQ_F_BLOCKING
;
1052 nbd_dev
[i
].tag_set
.driver_data
= &nbd_dev
[i
];
1054 err
= blk_mq_alloc_tag_set(&nbd_dev
[i
].tag_set
);
1061 * The new linux 2.5 block layer implementation requires
1062 * every gendisk to have its very own request_queue struct.
1063 * These structs are big so we dynamically allocate them.
1065 q
= blk_mq_init_queue(&nbd_dev
[i
].tag_set
);
1067 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1074 * Tell the block layer that we are not a rotational device
1076 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1077 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1078 disk
->queue
->limits
.discard_granularity
= 512;
1079 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1080 disk
->queue
->limits
.discard_zeroes_data
= 0;
1081 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1082 disk
->queue
->limits
.max_sectors
= 256;
1085 if (register_blkdev(NBD_MAJOR
, "nbd")) {
1090 printk(KERN_INFO
"nbd: registered device at major %d\n", NBD_MAJOR
);
1094 for (i
= 0; i
< nbds_max
; i
++) {
1095 struct gendisk
*disk
= nbd_dev
[i
].disk
;
1096 nbd_dev
[i
].magic
= NBD_MAGIC
;
1097 mutex_init(&nbd_dev
[i
].config_lock
);
1098 disk
->major
= NBD_MAJOR
;
1099 disk
->first_minor
= i
<< part_shift
;
1100 disk
->fops
= &nbd_fops
;
1101 disk
->private_data
= &nbd_dev
[i
];
1102 sprintf(disk
->disk_name
, "nbd%d", i
);
1103 init_waitqueue_head(&nbd_dev
[i
].recv_wq
);
1104 nbd_reset(&nbd_dev
[i
]);
1111 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1112 blk_cleanup_queue(nbd_dev
[i
].disk
->queue
);
1113 put_disk(nbd_dev
[i
].disk
);
1119 static void __exit
nbd_cleanup(void)
1125 for (i
= 0; i
< nbds_max
; i
++) {
1126 struct gendisk
*disk
= nbd_dev
[i
].disk
;
1127 nbd_dev
[i
].magic
= 0;
1130 blk_cleanup_queue(disk
->queue
);
1131 blk_mq_free_tag_set(&nbd_dev
[i
].tag_set
);
1135 unregister_blkdev(NBD_MAJOR
, "nbd");
1137 printk(KERN_INFO
"nbd: unregistered device at major %d\n", NBD_MAJOR
);
1140 module_init(nbd_init
);
1141 module_exit(nbd_cleanup
);
1143 MODULE_DESCRIPTION("Network Block Device");
1144 MODULE_LICENSE("GPL");
1146 module_param(nbds_max
, int, 0444);
1147 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
1148 module_param(max_part
, int, 0444);
1149 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");