2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
44 static DEFINE_IDR(nbd_index_idr
);
45 static DEFINE_MUTEX(nbd_index_mutex
);
52 #define NBD_TIMEDOUT 0
53 #define NBD_DISCONNECT_REQUESTED 1
54 #define NBD_DISCONNECTED 2
59 unsigned long runtime_flags
;
60 struct nbd_sock
**socks
;
63 struct blk_mq_tag_set tag_set
;
65 struct mutex config_lock
;
68 atomic_t recv_threads
;
69 wait_queue_head_t recv_wq
;
73 struct task_struct
*task_recv
;
74 struct task_struct
*task_setup
;
76 #if IS_ENABLED(CONFIG_DEBUG_FS)
77 struct dentry
*dbg_dir
;
82 struct nbd_device
*nbd
;
83 struct completion send_complete
;
86 #if IS_ENABLED(CONFIG_DEBUG_FS)
87 static struct dentry
*nbd_dbg_dir
;
90 #define nbd_name(nbd) ((nbd)->disk->disk_name)
92 #define NBD_MAGIC 0x68797548
94 static unsigned int nbds_max
= 16;
96 static struct workqueue_struct
*recv_workqueue
;
97 static int part_shift
;
99 static int nbd_dev_dbg_init(struct nbd_device
*nbd
);
100 static void nbd_dev_dbg_close(struct nbd_device
*nbd
);
103 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
105 return disk_to_dev(nbd
->disk
);
108 static bool nbd_is_connected(struct nbd_device
*nbd
)
110 return !!nbd
->task_recv
;
113 static const char *nbdcmd_to_ascii(int cmd
)
116 case NBD_CMD_READ
: return "read";
117 case NBD_CMD_WRITE
: return "write";
118 case NBD_CMD_DISC
: return "disconnect";
119 case NBD_CMD_FLUSH
: return "flush";
120 case NBD_CMD_TRIM
: return "trim/discard";
125 static int nbd_size_clear(struct nbd_device
*nbd
, struct block_device
*bdev
)
127 bd_set_size(bdev
, 0);
128 set_capacity(nbd
->disk
, 0);
129 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
134 static void nbd_size_update(struct nbd_device
*nbd
, struct block_device
*bdev
)
136 blk_queue_logical_block_size(nbd
->disk
->queue
, nbd
->blksize
);
137 blk_queue_physical_block_size(nbd
->disk
->queue
, nbd
->blksize
);
138 bd_set_size(bdev
, nbd
->bytesize
);
139 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
140 kobject_uevent(&nbd_to_dev(nbd
)->kobj
, KOBJ_CHANGE
);
143 static void nbd_size_set(struct nbd_device
*nbd
, struct block_device
*bdev
,
144 loff_t blocksize
, loff_t nr_blocks
)
146 nbd
->blksize
= blocksize
;
147 nbd
->bytesize
= blocksize
* nr_blocks
;
148 if (nbd_is_connected(nbd
))
149 nbd_size_update(nbd
, bdev
);
152 static void nbd_end_request(struct nbd_cmd
*cmd
)
154 struct nbd_device
*nbd
= cmd
->nbd
;
155 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
156 int error
= req
->errors
? -EIO
: 0;
158 dev_dbg(nbd_to_dev(nbd
), "request %p: %s\n", cmd
,
159 error
? "failed" : "done");
161 blk_mq_complete_request(req
, error
);
165 * Forcibly shutdown the socket causing all listeners to error
167 static void sock_shutdown(struct nbd_device
*nbd
)
171 if (nbd
->num_connections
== 0)
173 if (test_and_set_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
))
176 for (i
= 0; i
< nbd
->num_connections
; i
++) {
177 struct nbd_sock
*nsock
= nbd
->socks
[i
];
178 mutex_lock(&nsock
->tx_lock
);
179 kernel_sock_shutdown(nsock
->sock
, SHUT_RDWR
);
180 mutex_unlock(&nsock
->tx_lock
);
182 dev_warn(disk_to_dev(nbd
->disk
), "shutting down sockets\n");
185 static enum blk_eh_timer_return
nbd_xmit_timeout(struct request
*req
,
188 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(req
);
189 struct nbd_device
*nbd
= cmd
->nbd
;
191 dev_err(nbd_to_dev(nbd
), "Connection timed out, shutting down connection\n");
192 set_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
);
195 mutex_lock(&nbd
->config_lock
);
197 mutex_unlock(&nbd
->config_lock
);
198 return BLK_EH_HANDLED
;
202 * Send or receive packet.
204 static int sock_xmit(struct nbd_device
*nbd
, int index
, int send
,
205 struct iov_iter
*iter
, int msg_flags
)
207 struct socket
*sock
= nbd
->socks
[index
]->sock
;
210 unsigned long pflags
= current
->flags
;
212 if (unlikely(!sock
)) {
213 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
214 "Attempted %s on closed socket in sock_xmit\n",
215 (send
? "send" : "recv"));
219 msg
.msg_iter
= *iter
;
221 current
->flags
|= PF_MEMALLOC
;
223 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
226 msg
.msg_control
= NULL
;
227 msg
.msg_controllen
= 0;
228 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
231 result
= sock_sendmsg(sock
, &msg
);
233 result
= sock_recvmsg(sock
, &msg
, msg
.msg_flags
);
237 result
= -EPIPE
; /* short read */
240 } while (msg_data_left(&msg
));
242 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
247 /* always call with the tx_lock held */
248 static int nbd_send_cmd(struct nbd_device
*nbd
, struct nbd_cmd
*cmd
, int index
)
250 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
252 struct nbd_request request
= {.magic
= htonl(NBD_REQUEST_MAGIC
)};
253 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
254 struct iov_iter from
;
255 unsigned long size
= blk_rq_bytes(req
);
258 u32 tag
= blk_mq_unique_tag(req
);
260 iov_iter_kvec(&from
, WRITE
| ITER_KVEC
, &iov
, 1, sizeof(request
));
262 switch (req_op(req
)) {
267 type
= NBD_CMD_FLUSH
;
270 type
= NBD_CMD_WRITE
;
279 if (rq_data_dir(req
) == WRITE
&&
280 (nbd
->flags
& NBD_FLAG_READ_ONLY
)) {
281 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
282 "Write on read-only\n");
286 request
.type
= htonl(type
);
287 if (type
!= NBD_CMD_FLUSH
) {
288 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
289 request
.len
= htonl(size
);
291 memcpy(request
.handle
, &tag
, sizeof(tag
));
293 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
294 cmd
, nbdcmd_to_ascii(type
),
295 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
296 result
= sock_xmit(nbd
, index
, 1, &from
,
297 (type
== NBD_CMD_WRITE
) ? MSG_MORE
: 0);
299 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
300 "Send control failed (result %d)\n", result
);
304 if (type
!= NBD_CMD_WRITE
)
309 struct bio
*next
= bio
->bi_next
;
310 struct bvec_iter iter
;
313 bio_for_each_segment(bvec
, bio
, iter
) {
314 bool is_last
= !next
&& bio_iter_last(bvec
, iter
);
315 int flags
= is_last
? 0 : MSG_MORE
;
317 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
319 iov_iter_bvec(&from
, ITER_BVEC
| WRITE
,
320 &bvec
, 1, bvec
.bv_len
);
321 result
= sock_xmit(nbd
, index
, 1, &from
, flags
);
323 dev_err(disk_to_dev(nbd
->disk
),
324 "Send data failed (result %d)\n",
329 * The completion might already have come in,
330 * so break for the last one instead of letting
331 * the iterator do it. This prevents use-after-free
342 /* NULL returned = something went wrong, inform userspace */
343 static struct nbd_cmd
*nbd_read_stat(struct nbd_device
*nbd
, int index
)
346 struct nbd_reply reply
;
348 struct request
*req
= NULL
;
351 struct kvec iov
= {.iov_base
= &reply
, .iov_len
= sizeof(reply
)};
355 iov_iter_kvec(&to
, READ
| ITER_KVEC
, &iov
, 1, sizeof(reply
));
356 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
);
358 if (!test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
) &&
359 !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
360 dev_err(disk_to_dev(nbd
->disk
),
361 "Receive control failed (result %d)\n", result
);
362 return ERR_PTR(result
);
365 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
366 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
367 (unsigned long)ntohl(reply
.magic
));
368 return ERR_PTR(-EPROTO
);
371 memcpy(&tag
, reply
.handle
, sizeof(u32
));
373 hwq
= blk_mq_unique_tag_to_hwq(tag
);
374 if (hwq
< nbd
->tag_set
.nr_hw_queues
)
375 req
= blk_mq_tag_to_rq(nbd
->tag_set
.tags
[hwq
],
376 blk_mq_unique_tag_to_tag(tag
));
377 if (!req
|| !blk_mq_request_started(req
)) {
378 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%d) %p\n",
380 return ERR_PTR(-ENOENT
);
382 cmd
= blk_mq_rq_to_pdu(req
);
383 if (ntohl(reply
.error
)) {
384 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
390 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", cmd
);
391 if (rq_data_dir(req
) != WRITE
) {
392 struct req_iterator iter
;
395 rq_for_each_segment(bvec
, req
, iter
) {
396 iov_iter_bvec(&to
, ITER_BVEC
| READ
,
397 &bvec
, 1, bvec
.bv_len
);
398 result
= sock_xmit(nbd
, index
, 0, &to
, MSG_WAITALL
);
400 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
405 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
409 /* See the comment in nbd_queue_rq. */
410 wait_for_completion(&cmd
->send_complete
);
415 static ssize_t
pid_show(struct device
*dev
,
416 struct device_attribute
*attr
, char *buf
)
418 struct gendisk
*disk
= dev_to_disk(dev
);
419 struct nbd_device
*nbd
= (struct nbd_device
*)disk
->private_data
;
421 return sprintf(buf
, "%d\n", task_pid_nr(nbd
->task_recv
));
424 static struct device_attribute pid_attr
= {
425 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
429 struct recv_thread_args
{
430 struct work_struct work
;
431 struct nbd_device
*nbd
;
435 static void recv_work(struct work_struct
*work
)
437 struct recv_thread_args
*args
= container_of(work
,
438 struct recv_thread_args
,
440 struct nbd_device
*nbd
= args
->nbd
;
444 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
446 cmd
= nbd_read_stat(nbd
, args
->index
);
452 nbd_end_request(cmd
);
456 * We got an error, shut everybody down if this wasn't the result of a
457 * disconnect request.
459 if (ret
&& !test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
461 atomic_dec(&nbd
->recv_threads
);
462 wake_up(&nbd
->recv_wq
);
465 static void nbd_clear_req(struct request
*req
, void *data
, bool reserved
)
469 if (!blk_mq_request_started(req
))
471 cmd
= blk_mq_rq_to_pdu(req
);
473 nbd_end_request(cmd
);
476 static void nbd_clear_que(struct nbd_device
*nbd
)
478 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
480 blk_mq_tagset_busy_iter(&nbd
->tag_set
, nbd_clear_req
, NULL
);
481 dev_dbg(disk_to_dev(nbd
->disk
), "queue cleared\n");
485 static void nbd_handle_cmd(struct nbd_cmd
*cmd
, int index
)
487 struct request
*req
= blk_mq_rq_from_pdu(cmd
);
488 struct nbd_device
*nbd
= cmd
->nbd
;
489 struct nbd_sock
*nsock
;
491 if (index
>= nbd
->num_connections
) {
492 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
493 "Attempted send on invalid socket\n");
497 if (test_bit(NBD_DISCONNECTED
, &nbd
->runtime_flags
)) {
498 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
499 "Attempted send on closed socket\n");
505 nsock
= nbd
->socks
[index
];
506 mutex_lock(&nsock
->tx_lock
);
507 if (unlikely(!nsock
->sock
)) {
508 mutex_unlock(&nsock
->tx_lock
);
509 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
510 "Attempted send on closed socket\n");
514 if (nbd_send_cmd(nbd
, cmd
, index
) != 0) {
515 dev_err_ratelimited(disk_to_dev(nbd
->disk
),
516 "Request send failed\n");
518 nbd_end_request(cmd
);
521 mutex_unlock(&nsock
->tx_lock
);
527 nbd_end_request(cmd
);
530 static int nbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
531 const struct blk_mq_queue_data
*bd
)
533 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
536 * Since we look at the bio's to send the request over the network we
537 * need to make sure the completion work doesn't mark this request done
538 * before we are done doing our send. This keeps us from dereferencing
539 * freed data if we have particularly fast completions (ie we get the
540 * completion before we exit sock_xmit on the last bvec) or in the case
541 * that the server is misbehaving (or there was an error) before we're
542 * done sending everything over the wire.
544 init_completion(&cmd
->send_complete
);
545 blk_mq_start_request(bd
->rq
);
546 nbd_handle_cmd(cmd
, hctx
->queue_num
);
547 complete(&cmd
->send_complete
);
549 return BLK_MQ_RQ_QUEUE_OK
;
552 static int nbd_add_socket(struct nbd_device
*nbd
, struct block_device
*bdev
,
556 struct nbd_sock
**socks
;
557 struct nbd_sock
*nsock
;
560 sock
= sockfd_lookup(arg
, &err
);
564 if (!nbd
->task_setup
)
565 nbd
->task_setup
= current
;
566 if (nbd
->task_setup
!= current
) {
567 dev_err(disk_to_dev(nbd
->disk
),
568 "Device being setup by another task");
572 socks
= krealloc(nbd
->socks
, (nbd
->num_connections
+ 1) *
573 sizeof(struct nbd_sock
*), GFP_KERNEL
);
576 nsock
= kzalloc(sizeof(struct nbd_sock
), GFP_KERNEL
);
582 mutex_init(&nsock
->tx_lock
);
584 socks
[nbd
->num_connections
++] = nsock
;
587 bdev
->bd_invalidated
= 1;
591 /* Reset all properties of an NBD device */
592 static void nbd_reset(struct nbd_device
*nbd
)
594 nbd
->runtime_flags
= 0;
597 set_capacity(nbd
->disk
, 0);
599 nbd
->tag_set
.timeout
= 0;
600 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
603 static void nbd_bdev_reset(struct block_device
*bdev
)
605 set_device_ro(bdev
, false);
606 bdev
->bd_inode
->i_size
= 0;
608 blkdev_reread_part(bdev
);
609 bdev
->bd_invalidated
= 1;
613 static void nbd_parse_flags(struct nbd_device
*nbd
, struct block_device
*bdev
)
615 if (nbd
->flags
& NBD_FLAG_READ_ONLY
)
616 set_device_ro(bdev
, true);
617 if (nbd
->flags
& NBD_FLAG_SEND_TRIM
)
618 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
619 if (nbd
->flags
& NBD_FLAG_SEND_FLUSH
)
620 blk_queue_write_cache(nbd
->disk
->queue
, true, false);
622 blk_queue_write_cache(nbd
->disk
->queue
, false, false);
625 static void send_disconnects(struct nbd_device
*nbd
)
627 struct nbd_request request
= {
628 .magic
= htonl(NBD_REQUEST_MAGIC
),
629 .type
= htonl(NBD_CMD_DISC
),
631 struct kvec iov
= {.iov_base
= &request
, .iov_len
= sizeof(request
)};
632 struct iov_iter from
;
635 for (i
= 0; i
< nbd
->num_connections
; i
++) {
636 iov_iter_kvec(&from
, WRITE
| ITER_KVEC
, &iov
, 1, sizeof(request
));
637 ret
= sock_xmit(nbd
, i
, 1, &from
, 0);
639 dev_err(disk_to_dev(nbd
->disk
),
640 "Send disconnect failed %d\n", ret
);
644 static int nbd_disconnect(struct nbd_device
*nbd
, struct block_device
*bdev
)
646 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
650 mutex_unlock(&nbd
->config_lock
);
652 mutex_lock(&nbd
->config_lock
);
654 /* Check again after getting mutex back. */
658 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED
,
659 &nbd
->runtime_flags
))
660 send_disconnects(nbd
);
664 static int nbd_clear_sock(struct nbd_device
*nbd
, struct block_device
*bdev
)
669 nbd_bdev_reset(bdev
);
671 * We want to give the run thread a chance to wait for everybody
672 * to clean up and then do it's own cleanup.
674 if (!test_bit(NBD_RUNNING
, &nbd
->runtime_flags
) &&
675 nbd
->num_connections
) {
678 for (i
= 0; i
< nbd
->num_connections
; i
++) {
679 sockfd_put(nbd
->socks
[i
]->sock
);
680 kfree(nbd
->socks
[i
]);
684 nbd
->num_connections
= 0;
686 nbd
->task_setup
= NULL
;
691 static int nbd_start_device(struct nbd_device
*nbd
, struct block_device
*bdev
)
693 struct recv_thread_args
*args
;
694 int num_connections
= nbd
->num_connections
;
701 if (num_connections
> 1 &&
702 !(nbd
->flags
& NBD_FLAG_CAN_MULTI_CONN
)) {
703 dev_err(disk_to_dev(nbd
->disk
), "server does not support multiple connections per device.\n");
708 set_bit(NBD_RUNNING
, &nbd
->runtime_flags
);
709 blk_mq_update_nr_hw_queues(&nbd
->tag_set
, nbd
->num_connections
);
710 args
= kcalloc(num_connections
, sizeof(*args
), GFP_KERNEL
);
715 nbd
->task_recv
= current
;
716 mutex_unlock(&nbd
->config_lock
);
718 nbd_parse_flags(nbd
, bdev
);
720 error
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
722 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
726 nbd_size_update(nbd
, bdev
);
728 nbd_dev_dbg_init(nbd
);
729 for (i
= 0; i
< num_connections
; i
++) {
730 sk_set_memalloc(nbd
->socks
[i
]->sock
->sk
);
731 atomic_inc(&nbd
->recv_threads
);
732 INIT_WORK(&args
[i
].work
, recv_work
);
735 queue_work(recv_workqueue
, &args
[i
].work
);
737 wait_event_interruptible(nbd
->recv_wq
,
738 atomic_read(&nbd
->recv_threads
) == 0);
739 for (i
= 0; i
< num_connections
; i
++)
740 flush_work(&args
[i
].work
);
741 nbd_dev_dbg_close(nbd
);
742 nbd_size_clear(nbd
, bdev
);
743 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
745 mutex_lock(&nbd
->config_lock
);
746 nbd
->task_recv
= NULL
;
748 clear_bit(NBD_RUNNING
, &nbd
->runtime_flags
);
749 nbd_clear_sock(nbd
, bdev
);
751 /* user requested, ignore socket errors */
752 if (test_bit(NBD_DISCONNECT_REQUESTED
, &nbd
->runtime_flags
))
754 if (test_bit(NBD_TIMEDOUT
, &nbd
->runtime_flags
))
761 /* Must be called with config_lock held */
762 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
763 unsigned int cmd
, unsigned long arg
)
767 return nbd_disconnect(nbd
, bdev
);
769 return nbd_clear_sock(nbd
, bdev
);
771 return nbd_add_socket(nbd
, bdev
, arg
);
772 case NBD_SET_BLKSIZE
:
773 nbd_size_set(nbd
, bdev
, arg
,
774 div_s64(nbd
->bytesize
, arg
));
777 nbd_size_set(nbd
, bdev
, nbd
->blksize
,
778 div_s64(arg
, nbd
->blksize
));
780 case NBD_SET_SIZE_BLOCKS
:
781 nbd_size_set(nbd
, bdev
, nbd
->blksize
, arg
);
783 case NBD_SET_TIMEOUT
:
784 nbd
->tag_set
.timeout
= arg
* HZ
;
791 return nbd_start_device(nbd
, bdev
);
794 * This is for compatibility only. The queue is always cleared
795 * by NBD_DO_IT or NBD_CLEAR_SOCK.
798 case NBD_PRINT_DEBUG
:
800 * For compatibility only, we no longer keep a list of
801 * outstanding requests.
808 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
809 unsigned int cmd
, unsigned long arg
)
811 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
814 if (!capable(CAP_SYS_ADMIN
))
817 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
819 mutex_lock(&nbd
->config_lock
);
820 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
821 mutex_unlock(&nbd
->config_lock
);
826 static const struct block_device_operations nbd_fops
=
828 .owner
= THIS_MODULE
,
830 .compat_ioctl
= nbd_ioctl
,
833 #if IS_ENABLED(CONFIG_DEBUG_FS)
835 static int nbd_dbg_tasks_show(struct seq_file
*s
, void *unused
)
837 struct nbd_device
*nbd
= s
->private;
840 seq_printf(s
, "recv: %d\n", task_pid_nr(nbd
->task_recv
));
845 static int nbd_dbg_tasks_open(struct inode
*inode
, struct file
*file
)
847 return single_open(file
, nbd_dbg_tasks_show
, inode
->i_private
);
850 static const struct file_operations nbd_dbg_tasks_ops
= {
851 .open
= nbd_dbg_tasks_open
,
854 .release
= single_release
,
857 static int nbd_dbg_flags_show(struct seq_file
*s
, void *unused
)
859 struct nbd_device
*nbd
= s
->private;
860 u32 flags
= nbd
->flags
;
862 seq_printf(s
, "Hex: 0x%08x\n\n", flags
);
864 seq_puts(s
, "Known flags:\n");
866 if (flags
& NBD_FLAG_HAS_FLAGS
)
867 seq_puts(s
, "NBD_FLAG_HAS_FLAGS\n");
868 if (flags
& NBD_FLAG_READ_ONLY
)
869 seq_puts(s
, "NBD_FLAG_READ_ONLY\n");
870 if (flags
& NBD_FLAG_SEND_FLUSH
)
871 seq_puts(s
, "NBD_FLAG_SEND_FLUSH\n");
872 if (flags
& NBD_FLAG_SEND_TRIM
)
873 seq_puts(s
, "NBD_FLAG_SEND_TRIM\n");
878 static int nbd_dbg_flags_open(struct inode
*inode
, struct file
*file
)
880 return single_open(file
, nbd_dbg_flags_show
, inode
->i_private
);
883 static const struct file_operations nbd_dbg_flags_ops
= {
884 .open
= nbd_dbg_flags_open
,
887 .release
= single_release
,
890 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
897 dir
= debugfs_create_dir(nbd_name(nbd
), nbd_dbg_dir
);
899 dev_err(nbd_to_dev(nbd
), "Failed to create debugfs dir for '%s'\n",
905 debugfs_create_file("tasks", 0444, dir
, nbd
, &nbd_dbg_tasks_ops
);
906 debugfs_create_u64("size_bytes", 0444, dir
, &nbd
->bytesize
);
907 debugfs_create_u32("timeout", 0444, dir
, &nbd
->tag_set
.timeout
);
908 debugfs_create_u64("blocksize", 0444, dir
, &nbd
->blksize
);
909 debugfs_create_file("flags", 0444, dir
, nbd
, &nbd_dbg_flags_ops
);
914 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
916 debugfs_remove_recursive(nbd
->dbg_dir
);
919 static int nbd_dbg_init(void)
921 struct dentry
*dbg_dir
;
923 dbg_dir
= debugfs_create_dir("nbd", NULL
);
927 nbd_dbg_dir
= dbg_dir
;
932 static void nbd_dbg_close(void)
934 debugfs_remove_recursive(nbd_dbg_dir
);
937 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
939 static int nbd_dev_dbg_init(struct nbd_device
*nbd
)
944 static void nbd_dev_dbg_close(struct nbd_device
*nbd
)
948 static int nbd_dbg_init(void)
953 static void nbd_dbg_close(void)
959 static int nbd_init_request(void *data
, struct request
*rq
,
960 unsigned int hctx_idx
, unsigned int request_idx
,
961 unsigned int numa_node
)
963 struct nbd_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
968 static struct blk_mq_ops nbd_mq_ops
= {
969 .queue_rq
= nbd_queue_rq
,
970 .init_request
= nbd_init_request
,
971 .timeout
= nbd_xmit_timeout
,
974 static void nbd_dev_remove(struct nbd_device
*nbd
)
976 struct gendisk
*disk
= nbd
->disk
;
980 blk_cleanup_queue(disk
->queue
);
981 blk_mq_free_tag_set(&nbd
->tag_set
);
987 static int nbd_dev_add(int index
)
989 struct nbd_device
*nbd
;
990 struct gendisk
*disk
;
991 struct request_queue
*q
;
994 nbd
= kzalloc(sizeof(struct nbd_device
), GFP_KERNEL
);
998 disk
= alloc_disk(1 << part_shift
);
1003 err
= idr_alloc(&nbd_index_idr
, nbd
, index
, index
+ 1,
1008 err
= idr_alloc(&nbd_index_idr
, nbd
, 0, 0, GFP_KERNEL
);
1016 nbd
->tag_set
.ops
= &nbd_mq_ops
;
1017 nbd
->tag_set
.nr_hw_queues
= 1;
1018 nbd
->tag_set
.queue_depth
= 128;
1019 nbd
->tag_set
.numa_node
= NUMA_NO_NODE
;
1020 nbd
->tag_set
.cmd_size
= sizeof(struct nbd_cmd
);
1021 nbd
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
|
1022 BLK_MQ_F_SG_MERGE
| BLK_MQ_F_BLOCKING
;
1023 nbd
->tag_set
.driver_data
= nbd
;
1025 err
= blk_mq_alloc_tag_set(&nbd
->tag_set
);
1029 q
= blk_mq_init_queue(&nbd
->tag_set
);
1037 * Tell the block layer that we are not a rotational device
1039 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
1040 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
1041 disk
->queue
->limits
.discard_granularity
= 512;
1042 blk_queue_max_discard_sectors(disk
->queue
, UINT_MAX
);
1043 disk
->queue
->limits
.discard_zeroes_data
= 0;
1044 blk_queue_max_hw_sectors(disk
->queue
, 65536);
1045 disk
->queue
->limits
.max_sectors
= 256;
1047 nbd
->magic
= NBD_MAGIC
;
1048 mutex_init(&nbd
->config_lock
);
1049 disk
->major
= NBD_MAJOR
;
1050 disk
->first_minor
= index
<< part_shift
;
1051 disk
->fops
= &nbd_fops
;
1052 disk
->private_data
= nbd
;
1053 sprintf(disk
->disk_name
, "nbd%d", index
);
1054 init_waitqueue_head(&nbd
->recv_wq
);
1060 blk_mq_free_tag_set(&nbd
->tag_set
);
1062 idr_remove(&nbd_index_idr
, index
);
1072 * And here should be modules and kernel interface
1073 * (Just smiley confuses emacs :-)
1076 static int __init
nbd_init(void)
1080 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
1083 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
1089 part_shift
= fls(max_part
);
1092 * Adjust max_part according to part_shift as it is exported
1093 * to user space so that user can know the max number of
1094 * partition kernel should be able to manage.
1096 * Note that -1 is required because partition 0 is reserved
1097 * for the whole disk.
1099 max_part
= (1UL << part_shift
) - 1;
1102 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
1105 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
1107 recv_workqueue
= alloc_workqueue("knbd-recv",
1108 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
1109 if (!recv_workqueue
)
1112 if (register_blkdev(NBD_MAJOR
, "nbd")) {
1113 destroy_workqueue(recv_workqueue
);
1119 mutex_lock(&nbd_index_mutex
);
1120 for (i
= 0; i
< nbds_max
; i
++)
1122 mutex_unlock(&nbd_index_mutex
);
1126 static int nbd_exit_cb(int id
, void *ptr
, void *data
)
1128 struct nbd_device
*nbd
= ptr
;
1129 nbd_dev_remove(nbd
);
1133 static void __exit
nbd_cleanup(void)
1137 idr_for_each(&nbd_index_idr
, &nbd_exit_cb
, NULL
);
1138 idr_destroy(&nbd_index_idr
);
1139 destroy_workqueue(recv_workqueue
);
1140 unregister_blkdev(NBD_MAJOR
, "nbd");
1143 module_init(nbd_init
);
1144 module_exit(nbd_cleanup
);
1146 MODULE_DESCRIPTION("Network Block Device");
1147 MODULE_LICENSE("GPL");
1149 module_param(nbds_max
, int, 0444);
1150 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
1151 module_param(max_part
, int, 0444);
1152 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");