]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/nbd.c
blk-mq-debug: Introduce debugfs_create_files()
[mirror_ubuntu-artful-kernel.git] / drivers / block / nbd.c
CommitLineData
1da177e4
LT
1/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
a2531293 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
1da177e4
LT
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
dbf492d6 10 * This file is released under GPLv2 or later.
1da177e4 11 *
dbf492d6 12 * (part of code stolen from loop.c)
1da177e4
LT
13 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
2a48fc0a 27#include <linux/mutex.h>
4b2f0260
HX
28#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
5a0e3ad6 31#include <linux/slab.h>
1da177e4 32#include <net/sock.h>
91cf45f0 33#include <linux/net.h>
48cf6061 34#include <linux/kthread.h>
b9c495bb 35#include <linux/types.h>
30d53d9c 36#include <linux/debugfs.h>
fd8383fd 37#include <linux/blk-mq.h>
1da177e4 38
7c0f6ba6 39#include <linux/uaccess.h>
1da177e4
LT
40#include <asm/types.h>
41
42#include <linux/nbd.h>
43
9561a7ad
JB
44struct nbd_sock {
45 struct socket *sock;
46 struct mutex tx_lock;
47};
48
9b4a6ba9
JB
49#define NBD_TIMEDOUT 0
50#define NBD_DISCONNECT_REQUESTED 1
9561a7ad
JB
51#define NBD_DISCONNECTED 2
52#define NBD_RUNNING 3
9b4a6ba9 53
13e71d69 54struct nbd_device {
22d109c1 55 u32 flags;
9b4a6ba9 56 unsigned long runtime_flags;
9561a7ad 57 struct nbd_sock **socks;
13e71d69
MP
58 int magic;
59
fd8383fd 60 struct blk_mq_tag_set tag_set;
13e71d69 61
9561a7ad 62 struct mutex config_lock;
13e71d69 63 struct gendisk *disk;
9561a7ad
JB
64 int num_connections;
65 atomic_t recv_threads;
66 wait_queue_head_t recv_wq;
ef77b515 67 loff_t blksize;
b9c495bb 68 loff_t bytesize;
7e2893a1 69
7e2893a1 70 struct task_struct *task_recv;
9561a7ad 71 struct task_struct *task_setup;
30d53d9c
MP
72
73#if IS_ENABLED(CONFIG_DEBUG_FS)
74 struct dentry *dbg_dir;
75#endif
13e71d69
MP
76};
77
fd8383fd
JB
78struct nbd_cmd {
79 struct nbd_device *nbd;
9561a7ad 80 struct completion send_complete;
fd8383fd
JB
81};
82
30d53d9c
MP
83#if IS_ENABLED(CONFIG_DEBUG_FS)
84static struct dentry *nbd_dbg_dir;
85#endif
86
87#define nbd_name(nbd) ((nbd)->disk->disk_name)
88
f4507164 89#define NBD_MAGIC 0x68797548
1da177e4 90
9c7a4169 91static unsigned int nbds_max = 16;
20a8143e 92static struct nbd_device *nbd_dev;
d71a6d73 93static int max_part;
1da177e4 94
d18509f5 95static inline struct device *nbd_to_dev(struct nbd_device *nbd)
1da177e4 96{
d18509f5 97 return disk_to_dev(nbd->disk);
1da177e4
LT
98}
99
37091fdd
MP
100static bool nbd_is_connected(struct nbd_device *nbd)
101{
102 return !!nbd->task_recv;
103}
104
1da177e4
LT
105static const char *nbdcmd_to_ascii(int cmd)
106{
107 switch (cmd) {
108 case NBD_CMD_READ: return "read";
109 case NBD_CMD_WRITE: return "write";
110 case NBD_CMD_DISC: return "disconnect";
75f187ab 111 case NBD_CMD_FLUSH: return "flush";
a336d298 112 case NBD_CMD_TRIM: return "trim/discard";
1da177e4
LT
113 }
114 return "invalid";
115}
1da177e4 116
37091fdd
MP
117static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
118{
119 bdev->bd_inode->i_size = 0;
120 set_capacity(nbd->disk, 0);
121 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
122
123 return 0;
124}
125
126static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
127{
128 if (!nbd_is_connected(nbd))
129 return;
130
131 bdev->bd_inode->i_size = nbd->bytesize;
132 set_capacity(nbd->disk, nbd->bytesize >> 9);
133 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
134}
135
136static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
ef77b515 137 loff_t blocksize, loff_t nr_blocks)
37091fdd
MP
138{
139 int ret;
140
141 ret = set_blocksize(bdev, blocksize);
142 if (ret)
143 return ret;
144
145 nbd->blksize = blocksize;
ef77b515 146 nbd->bytesize = blocksize * nr_blocks;
37091fdd
MP
147
148 nbd_size_update(nbd, bdev);
149
150 return 0;
151}
152
fd8383fd 153static void nbd_end_request(struct nbd_cmd *cmd)
1da177e4 154{
fd8383fd
JB
155 struct nbd_device *nbd = cmd->nbd;
156 struct request *req = blk_mq_rq_from_pdu(cmd);
097c94a4 157 int error = req->errors ? -EIO : 0;
1da177e4 158
fd8383fd 159 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
d18509f5 160 error ? "failed" : "done");
1da177e4 161
fd8383fd 162 blk_mq_complete_request(req, error);
1da177e4
LT
163}
164
e018e757
MP
165/*
166 * Forcibly shutdown the socket causing all listeners to error
167 */
36e47bee 168static void sock_shutdown(struct nbd_device *nbd)
7fdfd406 169{
9561a7ad 170 int i;
23272a67 171
9561a7ad
JB
172 if (nbd->num_connections == 0)
173 return;
174 if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
260bbce4 175 return;
23272a67 176
9561a7ad
JB
177 for (i = 0; i < nbd->num_connections; i++) {
178 struct nbd_sock *nsock = nbd->socks[i];
179 mutex_lock(&nsock->tx_lock);
180 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
181 mutex_unlock(&nsock->tx_lock);
182 }
183 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
7fdfd406
PC
184}
185
0eadf37a
JB
186static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
187 bool reserved)
7fdfd406 188{
0eadf37a
JB
189 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
190 struct nbd_device *nbd = cmd->nbd;
dcc909d9 191
9561a7ad 192 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
9b4a6ba9 193 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
0eadf37a 194 req->errors++;
9561a7ad 195
9561a7ad
JB
196 mutex_lock(&nbd->config_lock);
197 sock_shutdown(nbd);
198 mutex_unlock(&nbd->config_lock);
0eadf37a 199 return BLK_EH_HANDLED;
7fdfd406
PC
200}
201
1da177e4
LT
202/*
203 * Send or receive packet.
204 */
9561a7ad
JB
205static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
206 int size, int msg_flags)
1da177e4 207{
9561a7ad 208 struct socket *sock = nbd->socks[index]->sock;
1da177e4
LT
209 int result;
210 struct msghdr msg;
211 struct kvec iov;
7f338fe4 212 unsigned long pflags = current->flags;
1da177e4 213
ffc41cf8 214 if (unlikely(!sock)) {
a897b666 215 dev_err_ratelimited(disk_to_dev(nbd->disk),
7f1b90f9
WC
216 "Attempted %s on closed socket in sock_xmit\n",
217 (send ? "send" : "recv"));
ffc41cf8
MS
218 return -EINVAL;
219 }
220
7f338fe4 221 current->flags |= PF_MEMALLOC;
1da177e4 222 do {
7f338fe4 223 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
1da177e4
LT
224 iov.iov_base = buf;
225 iov.iov_len = size;
226 msg.msg_name = NULL;
227 msg.msg_namelen = 0;
228 msg.msg_control = NULL;
229 msg.msg_controllen = 0;
1da177e4
LT
230 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
231
7e2893a1 232 if (send)
1da177e4 233 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
7e2893a1 234 else
35fbf5bc
NK
235 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
236 msg.msg_flags);
1da177e4 237
1da177e4
LT
238 if (result <= 0) {
239 if (result == 0)
240 result = -EPIPE; /* short read */
241 break;
242 }
243 size -= result;
244 buf += result;
245 } while (size > 0);
246
7f338fe4 247 tsk_restore_flags(current, pflags, PF_MEMALLOC);
1da177e4
LT
248
249 return result;
250}
251
9561a7ad
JB
252static inline int sock_send_bvec(struct nbd_device *nbd, int index,
253 struct bio_vec *bvec, int flags)
1da177e4
LT
254{
255 int result;
256 void *kaddr = kmap(bvec->bv_page);
9561a7ad 257 result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
f4507164 258 bvec->bv_len, flags);
1da177e4
LT
259 kunmap(bvec->bv_page);
260 return result;
261}
262
7fdfd406 263/* always call with the tx_lock held */
9561a7ad 264static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1da177e4 265{
fd8383fd 266 struct request *req = blk_mq_rq_from_pdu(cmd);
d61b7f97 267 int result;
1da177e4 268 struct nbd_request request;
1011c1b9 269 unsigned long size = blk_rq_bytes(req);
429a787b 270 struct bio *bio;
9dc6c806 271 u32 type;
9561a7ad 272 u32 tag = blk_mq_unique_tag(req);
9dc6c806 273
aebf526b
CH
274 switch (req_op(req)) {
275 case REQ_OP_DISCARD:
9dc6c806 276 type = NBD_CMD_TRIM;
aebf526b
CH
277 break;
278 case REQ_OP_FLUSH:
9dc6c806 279 type = NBD_CMD_FLUSH;
aebf526b
CH
280 break;
281 case REQ_OP_WRITE:
9dc6c806 282 type = NBD_CMD_WRITE;
aebf526b
CH
283 break;
284 case REQ_OP_READ:
9dc6c806 285 type = NBD_CMD_READ;
aebf526b
CH
286 break;
287 default:
288 return -EIO;
289 }
1da177e4 290
09fc54cc
CH
291 if (rq_data_dir(req) == WRITE &&
292 (nbd->flags & NBD_FLAG_READ_ONLY)) {
293 dev_err_ratelimited(disk_to_dev(nbd->disk),
294 "Write on read-only\n");
295 return -EIO;
296 }
297
04cfac4e 298 memset(&request, 0, sizeof(request));
1da177e4 299 request.magic = htonl(NBD_REQUEST_MAGIC);
9dc6c806 300 request.type = htonl(type);
9561a7ad 301 if (type != NBD_CMD_FLUSH) {
75f187ab
AB
302 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
303 request.len = htonl(size);
304 }
9561a7ad 305 memcpy(request.handle, &tag, sizeof(tag));
1da177e4 306
d18509f5 307 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
fd8383fd 308 cmd, nbdcmd_to_ascii(type),
d18509f5 309 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
9561a7ad 310 result = sock_xmit(nbd, index, 1, &request, sizeof(request),
9dc6c806 311 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
1da177e4 312 if (result <= 0) {
a897b666 313 dev_err_ratelimited(disk_to_dev(nbd->disk),
7f1b90f9 314 "Send control failed (result %d)\n", result);
dab5313a 315 return -EIO;
1da177e4
LT
316 }
317
429a787b
JA
318 if (type != NBD_CMD_WRITE)
319 return 0;
320
429a787b
JA
321 bio = req->bio;
322 while (bio) {
323 struct bio *next = bio->bi_next;
324 struct bvec_iter iter;
7988613b 325 struct bio_vec bvec;
429a787b
JA
326
327 bio_for_each_segment(bvec, bio, iter) {
328 bool is_last = !next && bio_iter_last(bvec, iter);
d61b7f97 329 int flags = is_last ? 0 : MSG_MORE;
429a787b 330
d18509f5 331 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
fd8383fd 332 cmd, bvec.bv_len);
9561a7ad 333 result = sock_send_bvec(nbd, index, &bvec, flags);
6c92e699 334 if (result <= 0) {
f4507164 335 dev_err(disk_to_dev(nbd->disk),
7f1b90f9
WC
336 "Send data failed (result %d)\n",
337 result);
dab5313a 338 return -EIO;
6c92e699 339 }
429a787b
JA
340 /*
341 * The completion might already have come in,
342 * so break for the last one instead of letting
343 * the iterator do it. This prevents use-after-free
344 * of the bio.
345 */
346 if (is_last)
347 break;
1da177e4 348 }
429a787b 349 bio = next;
1da177e4 350 }
1da177e4 351 return 0;
1da177e4
LT
352}
353
9561a7ad
JB
354static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
355 struct bio_vec *bvec)
1da177e4
LT
356{
357 int result;
358 void *kaddr = kmap(bvec->bv_page);
9561a7ad
JB
359 result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
360 bvec->bv_len, MSG_WAITALL);
1da177e4
LT
361 kunmap(bvec->bv_page);
362 return result;
363}
364
365/* NULL returned = something went wrong, inform userspace */
9561a7ad 366static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1da177e4
LT
367{
368 int result;
369 struct nbd_reply reply;
fd8383fd
JB
370 struct nbd_cmd *cmd;
371 struct request *req = NULL;
372 u16 hwq;
9561a7ad 373 u32 tag;
1da177e4
LT
374
375 reply.magic = 0;
9561a7ad 376 result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
1da177e4 377 if (result <= 0) {
9561a7ad
JB
378 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
379 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
380 dev_err(disk_to_dev(nbd->disk),
381 "Receive control failed (result %d)\n", result);
19391830 382 return ERR_PTR(result);
1da177e4 383 }
e4b57e08
MF
384
385 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
f4507164 386 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
e4b57e08 387 (unsigned long)ntohl(reply.magic));
19391830 388 return ERR_PTR(-EPROTO);
e4b57e08
MF
389 }
390
9561a7ad 391 memcpy(&tag, reply.handle, sizeof(u32));
4b2f0260 392
fd8383fd
JB
393 hwq = blk_mq_unique_tag_to_hwq(tag);
394 if (hwq < nbd->tag_set.nr_hw_queues)
395 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
396 blk_mq_unique_tag_to_tag(tag));
397 if (!req || !blk_mq_request_started(req)) {
398 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
399 tag, req);
400 return ERR_PTR(-ENOENT);
1da177e4 401 }
fd8383fd 402 cmd = blk_mq_rq_to_pdu(req);
1da177e4 403 if (ntohl(reply.error)) {
f4507164 404 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
7f1b90f9 405 ntohl(reply.error));
1da177e4 406 req->errors++;
fd8383fd 407 return cmd;
1da177e4
LT
408 }
409
fd8383fd 410 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
9dc6c806 411 if (rq_data_dir(req) != WRITE) {
5705f702 412 struct req_iterator iter;
7988613b 413 struct bio_vec bvec;
5705f702
N
414
415 rq_for_each_segment(bvec, req, iter) {
9561a7ad 416 result = sock_recv_bvec(nbd, index, &bvec);
6c92e699 417 if (result <= 0) {
f4507164 418 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
7f1b90f9 419 result);
6c92e699 420 req->errors++;
fd8383fd 421 return cmd;
6c92e699 422 }
d18509f5 423 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
fd8383fd 424 cmd, bvec.bv_len);
1da177e4 425 }
9561a7ad
JB
426 } else {
427 /* See the comment in nbd_queue_rq. */
428 wait_for_completion(&cmd->send_complete);
1da177e4 429 }
fd8383fd 430 return cmd;
1da177e4
LT
431}
432
edfaa7c3
KS
433static ssize_t pid_show(struct device *dev,
434 struct device_attribute *attr, char *buf)
6b39bb65 435{
edfaa7c3 436 struct gendisk *disk = dev_to_disk(dev);
6521d39a 437 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
edfaa7c3 438
6521d39a 439 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
6b39bb65
PC
440}
441
edfaa7c3 442static struct device_attribute pid_attr = {
01e8ef11 443 .attr = { .name = "pid", .mode = S_IRUGO},
6b39bb65
PC
444 .show = pid_show,
445};
446
9561a7ad
JB
447struct recv_thread_args {
448 struct work_struct work;
449 struct nbd_device *nbd;
450 int index;
451};
452
453static void recv_work(struct work_struct *work)
1da177e4 454{
9561a7ad
JB
455 struct recv_thread_args *args = container_of(work,
456 struct recv_thread_args,
457 work);
458 struct nbd_device *nbd = args->nbd;
fd8383fd 459 struct nbd_cmd *cmd;
9561a7ad 460 int ret = 0;
1da177e4 461
f4507164 462 BUG_ON(nbd->magic != NBD_MAGIC);
19391830 463 while (1) {
9561a7ad 464 cmd = nbd_read_stat(nbd, args->index);
fd8383fd
JB
465 if (IS_ERR(cmd)) {
466 ret = PTR_ERR(cmd);
19391830
MP
467 break;
468 }
469
fd8383fd 470 nbd_end_request(cmd);
19391830 471 }
6b39bb65 472
9561a7ad
JB
473 /*
474 * We got an error, shut everybody down if this wasn't the result of a
475 * disconnect request.
476 */
477 if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
478 sock_shutdown(nbd);
479 atomic_dec(&nbd->recv_threads);
480 wake_up(&nbd->recv_wq);
1da177e4
LT
481}
482
fd8383fd 483static void nbd_clear_req(struct request *req, void *data, bool reserved)
1da177e4 484{
fd8383fd 485 struct nbd_cmd *cmd;
1da177e4 486
fd8383fd
JB
487 if (!blk_mq_request_started(req))
488 return;
489 cmd = blk_mq_rq_to_pdu(req);
490 req->errors++;
491 nbd_end_request(cmd);
492}
493
494static void nbd_clear_que(struct nbd_device *nbd)
495{
f4507164 496 BUG_ON(nbd->magic != NBD_MAGIC);
1da177e4 497
fd8383fd 498 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
e78273c8 499 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
1da177e4
LT
500}
501
7fdfd406 502
9561a7ad 503static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
48cf6061 504{
fd8383fd
JB
505 struct request *req = blk_mq_rq_from_pdu(cmd);
506 struct nbd_device *nbd = cmd->nbd;
9561a7ad 507 struct nbd_sock *nsock;
fd8383fd 508
9561a7ad 509 if (index >= nbd->num_connections) {
a897b666
JB
510 dev_err_ratelimited(disk_to_dev(nbd->disk),
511 "Attempted send on invalid socket\n");
48cf6061 512 goto error_out;
9561a7ad 513 }
48cf6061 514
9561a7ad 515 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
a897b666
JB
516 dev_err_ratelimited(disk_to_dev(nbd->disk),
517 "Attempted send on closed socket\n");
9561a7ad
JB
518 goto error_out;
519 }
520
48cf6061
LV
521 req->errors = 0;
522
9561a7ad
JB
523 nsock = nbd->socks[index];
524 mutex_lock(&nsock->tx_lock);
525 if (unlikely(!nsock->sock)) {
526 mutex_unlock(&nsock->tx_lock);
a897b666
JB
527 dev_err_ratelimited(disk_to_dev(nbd->disk),
528 "Attempted send on closed socket\n");
15746fca 529 goto error_out;
48cf6061
LV
530 }
531
9561a7ad 532 if (nbd_send_cmd(nbd, cmd, index) != 0) {
a897b666
JB
533 dev_err_ratelimited(disk_to_dev(nbd->disk),
534 "Request send failed\n");
48cf6061 535 req->errors++;
fd8383fd 536 nbd_end_request(cmd);
48cf6061
LV
537 }
538
9561a7ad 539 mutex_unlock(&nsock->tx_lock);
48cf6061
LV
540
541 return;
542
543error_out:
544 req->errors++;
fd8383fd 545 nbd_end_request(cmd);
48cf6061
LV
546}
547
fd8383fd
JB
548static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
549 const struct blk_mq_queue_data *bd)
1da177e4 550{
fd8383fd 551 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1da177e4 552
9561a7ad
JB
553 /*
554 * Since we look at the bio's to send the request over the network we
555 * need to make sure the completion work doesn't mark this request done
556 * before we are done doing our send. This keeps us from dereferencing
557 * freed data if we have particularly fast completions (ie we get the
558 * completion before we exit sock_xmit on the last bvec) or in the case
559 * that the server is misbehaving (or there was an error) before we're
560 * done sending everything over the wire.
561 */
562 init_completion(&cmd->send_complete);
fd8383fd 563 blk_mq_start_request(bd->rq);
9561a7ad
JB
564 nbd_handle_cmd(cmd, hctx->queue_num);
565 complete(&cmd->send_complete);
566
fd8383fd 567 return BLK_MQ_RQ_QUEUE_OK;
1da177e4
LT
568}
569
9561a7ad 570static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
23272a67 571{
9561a7ad
JB
572 struct nbd_sock **socks;
573 struct nbd_sock *nsock;
23272a67 574
9561a7ad
JB
575 if (!nbd->task_setup)
576 nbd->task_setup = current;
577 if (nbd->task_setup != current) {
578 dev_err(disk_to_dev(nbd->disk),
579 "Device being setup by another task");
580 return -EINVAL;
23272a67
MP
581 }
582
9561a7ad
JB
583 socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
584 sizeof(struct nbd_sock *), GFP_KERNEL);
585 if (!socks)
586 return -ENOMEM;
587 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
588 if (!nsock)
589 return -ENOMEM;
590
591 nbd->socks = socks;
23272a67 592
9561a7ad
JB
593 mutex_init(&nsock->tx_lock);
594 nsock->sock = sock;
595 socks[nbd->num_connections++] = nsock;
23272a67 596
9561a7ad 597 return 0;
23272a67
MP
598}
599
0e4f0f6f
MP
600/* Reset all properties of an NBD device */
601static void nbd_reset(struct nbd_device *nbd)
602{
9561a7ad
JB
603 int i;
604
605 for (i = 0; i < nbd->num_connections; i++)
606 kfree(nbd->socks[i]);
607 kfree(nbd->socks);
608 nbd->socks = NULL;
9b4a6ba9 609 nbd->runtime_flags = 0;
0e4f0f6f
MP
610 nbd->blksize = 1024;
611 nbd->bytesize = 0;
612 set_capacity(nbd->disk, 0);
613 nbd->flags = 0;
0eadf37a 614 nbd->tag_set.timeout = 0;
9561a7ad
JB
615 nbd->num_connections = 0;
616 nbd->task_setup = NULL;
0e4f0f6f 617 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
0e4f0f6f
MP
618}
619
620static void nbd_bdev_reset(struct block_device *bdev)
621{
622 set_device_ro(bdev, false);
623 bdev->bd_inode->i_size = 0;
624 if (max_part > 0) {
625 blkdev_reread_part(bdev);
626 bdev->bd_invalidated = 1;
627 }
628}
629
d02cf531
MP
630static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
631{
632 if (nbd->flags & NBD_FLAG_READ_ONLY)
633 set_device_ro(bdev, true);
634 if (nbd->flags & NBD_FLAG_SEND_TRIM)
635 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
636 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
aafb1eec 637 blk_queue_write_cache(nbd->disk->queue, true, false);
d02cf531 638 else
aafb1eec 639 blk_queue_write_cache(nbd->disk->queue, false, false);
d02cf531
MP
640}
641
9561a7ad
JB
642static void send_disconnects(struct nbd_device *nbd)
643{
644 struct nbd_request request = {};
645 int i, ret;
646
647 request.magic = htonl(NBD_REQUEST_MAGIC);
648 request.type = htonl(NBD_CMD_DISC);
649
650 for (i = 0; i < nbd->num_connections; i++) {
651 ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
652 if (ret <= 0)
653 dev_err(disk_to_dev(nbd->disk),
654 "Send disconnect failed %d\n", ret);
655 }
656}
657
30d53d9c
MP
658static int nbd_dev_dbg_init(struct nbd_device *nbd);
659static void nbd_dev_dbg_close(struct nbd_device *nbd);
660
9561a7ad 661/* Must be called with config_lock held */
f4507164 662static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1a2ad211
PM
663 unsigned int cmd, unsigned long arg)
664{
1da177e4 665 switch (cmd) {
1a2ad211 666 case NBD_DISCONNECT: {
f4507164 667 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
9561a7ad 668 if (!nbd->socks)
3a2d63f8 669 return -EINVAL;
1a2ad211 670
9561a7ad 671 mutex_unlock(&nbd->config_lock);
3a2d63f8 672 fsync_bdev(bdev);
9561a7ad 673 mutex_lock(&nbd->config_lock);
3a2d63f8
PB
674
675 /* Check again after getting mutex back. */
9561a7ad 676 if (!nbd->socks)
1da177e4 677 return -EINVAL;
c378f70a 678
9561a7ad
JB
679 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
680 &nbd->runtime_flags))
681 send_disconnects(nbd);
c378f70a 682 return 0;
1a2ad211 683 }
9561a7ad 684
23272a67
MP
685 case NBD_CLEAR_SOCK:
686 sock_shutdown(nbd);
f4507164 687 nbd_clear_que(nbd);
3a2d63f8 688 kill_bdev(bdev);
9561a7ad
JB
689 nbd_bdev_reset(bdev);
690 /*
691 * We want to give the run thread a chance to wait for everybody
692 * to clean up and then do it's own cleanup.
693 */
694 if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
695 int i;
696
697 for (i = 0; i < nbd->num_connections; i++)
698 kfree(nbd->socks[i]);
699 kfree(nbd->socks);
700 nbd->socks = NULL;
701 nbd->num_connections = 0;
20032ec3 702 nbd->task_setup = NULL;
9561a7ad 703 }
1a2ad211 704 return 0;
1a2ad211
PM
705
706 case NBD_SET_SOCK: {
e2511578 707 int err;
23272a67
MP
708 struct socket *sock = sockfd_lookup(arg, &err);
709
710 if (!sock)
711 return err;
712
9561a7ad 713 err = nbd_add_socket(nbd, sock);
23272a67
MP
714 if (!err && max_part)
715 bdev->bd_invalidated = 1;
716
717 return err;
1a2ad211
PM
718 }
719
37091fdd 720 case NBD_SET_BLKSIZE: {
5e454c67 721 loff_t bsize = div_s64(nbd->bytesize, arg);
37091fdd
MP
722
723 return nbd_size_set(nbd, bdev, arg, bsize);
724 }
1a2ad211 725
1da177e4 726 case NBD_SET_SIZE:
37091fdd 727 return nbd_size_set(nbd, bdev, nbd->blksize,
e88f72cb 728 div_s64(arg, nbd->blksize));
37091fdd
MP
729
730 case NBD_SET_SIZE_BLOCKS:
731 return nbd_size_set(nbd, bdev, nbd->blksize, arg);
1a2ad211 732
7fdfd406 733 case NBD_SET_TIMEOUT:
0eadf37a 734 nbd->tag_set.timeout = arg * HZ;
7fdfd406 735 return 0;
1a2ad211 736
2f012508
PC
737 case NBD_SET_FLAGS:
738 nbd->flags = arg;
739 return 0;
740
1a2ad211 741 case NBD_DO_IT: {
9561a7ad
JB
742 struct recv_thread_args *args;
743 int num_connections = nbd->num_connections;
feffa5cc 744 int error = 0, i;
1a2ad211 745
6521d39a 746 if (nbd->task_recv)
c91192d6 747 return -EBUSY;
9561a7ad 748 if (!nbd->socks)
1da177e4 749 return -EINVAL;
9561a7ad
JB
750 if (num_connections > 1 &&
751 !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
752 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
feffa5cc 753 error = -EINVAL;
9561a7ad
JB
754 goto out_err;
755 }
1a2ad211 756
9561a7ad
JB
757 set_bit(NBD_RUNNING, &nbd->runtime_flags);
758 blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
759 args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
feffa5cc
JA
760 if (!args) {
761 error = -ENOMEM;
9561a7ad 762 goto out_err;
feffa5cc 763 }
97240963 764 nbd->task_recv = current;
9561a7ad 765 mutex_unlock(&nbd->config_lock);
1a2ad211 766
d02cf531 767 nbd_parse_flags(nbd, bdev);
a336d298 768
9561a7ad
JB
769 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
770 if (error) {
771 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
772 goto out_recv;
773 }
774
775 nbd_size_update(nbd, bdev);
776
30d53d9c 777 nbd_dev_dbg_init(nbd);
9561a7ad
JB
778 for (i = 0; i < num_connections; i++) {
779 sk_set_memalloc(nbd->socks[i]->sock->sk);
780 atomic_inc(&nbd->recv_threads);
781 INIT_WORK(&args[i].work, recv_work);
782 args[i].nbd = nbd;
783 args[i].index = i;
784 queue_work(system_long_wq, &args[i].work);
785 }
786 wait_event_interruptible(nbd->recv_wq,
787 atomic_read(&nbd->recv_threads) == 0);
788 for (i = 0; i < num_connections; i++)
789 flush_work(&args[i].work);
30d53d9c 790 nbd_dev_dbg_close(nbd);
9561a7ad
JB
791 nbd_size_clear(nbd, bdev);
792 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
793out_recv:
794 mutex_lock(&nbd->config_lock);
97240963 795 nbd->task_recv = NULL;
9561a7ad 796out_err:
36e47bee 797 sock_shutdown(nbd);
f4507164 798 nbd_clear_que(nbd);
3a2d63f8 799 kill_bdev(bdev);
0e4f0f6f
MP
800 nbd_bdev_reset(bdev);
801
9b4a6ba9
JB
802 /* user requested, ignore socket errors */
803 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
1f7b5cf1 804 error = 0;
9b4a6ba9 805 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
1f7b5cf1
MP
806 error = -ETIMEDOUT;
807
0e4f0f6f 808 nbd_reset(nbd);
19391830 809 return error;
1a2ad211
PM
810 }
811
1da177e4 812 case NBD_CLEAR_QUE:
4b2f0260
HX
813 /*
814 * This is for compatibility only. The queue is always cleared
815 * by NBD_DO_IT or NBD_CLEAR_SOCK.
816 */
1da177e4 817 return 0;
1a2ad211 818
1da177e4 819 case NBD_PRINT_DEBUG:
fd8383fd
JB
820 /*
821 * For compatibility only, we no longer keep a list of
822 * outstanding requests.
823 */
1da177e4
LT
824 return 0;
825 }
1a2ad211
PM
826 return -ENOTTY;
827}
828
829static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
830 unsigned int cmd, unsigned long arg)
831{
f4507164 832 struct nbd_device *nbd = bdev->bd_disk->private_data;
1a2ad211
PM
833 int error;
834
835 if (!capable(CAP_SYS_ADMIN))
836 return -EPERM;
837
f4507164 838 BUG_ON(nbd->magic != NBD_MAGIC);
1a2ad211 839
9561a7ad 840 mutex_lock(&nbd->config_lock);
f4507164 841 error = __nbd_ioctl(bdev, nbd, cmd, arg);
9561a7ad 842 mutex_unlock(&nbd->config_lock);
1a2ad211
PM
843
844 return error;
1da177e4
LT
845}
846
83d5cde4 847static const struct block_device_operations nbd_fops =
1da177e4
LT
848{
849 .owner = THIS_MODULE,
8a6cfeb6 850 .ioctl = nbd_ioctl,
263a3df1 851 .compat_ioctl = nbd_ioctl,
1da177e4
LT
852};
853
30d53d9c
MP
854#if IS_ENABLED(CONFIG_DEBUG_FS)
855
856static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
857{
858 struct nbd_device *nbd = s->private;
859
860 if (nbd->task_recv)
861 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
30d53d9c
MP
862
863 return 0;
864}
865
866static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
867{
868 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
869}
870
871static const struct file_operations nbd_dbg_tasks_ops = {
872 .open = nbd_dbg_tasks_open,
873 .read = seq_read,
874 .llseek = seq_lseek,
875 .release = single_release,
876};
877
878static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
879{
880 struct nbd_device *nbd = s->private;
881 u32 flags = nbd->flags;
882
883 seq_printf(s, "Hex: 0x%08x\n\n", flags);
884
885 seq_puts(s, "Known flags:\n");
886
887 if (flags & NBD_FLAG_HAS_FLAGS)
888 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
889 if (flags & NBD_FLAG_READ_ONLY)
890 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
891 if (flags & NBD_FLAG_SEND_FLUSH)
892 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
893 if (flags & NBD_FLAG_SEND_TRIM)
894 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
895
896 return 0;
897}
898
899static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
900{
901 return single_open(file, nbd_dbg_flags_show, inode->i_private);
902}
903
904static const struct file_operations nbd_dbg_flags_ops = {
905 .open = nbd_dbg_flags_open,
906 .read = seq_read,
907 .llseek = seq_lseek,
908 .release = single_release,
909};
910
911static int nbd_dev_dbg_init(struct nbd_device *nbd)
912{
913 struct dentry *dir;
27ea43fe
MP
914
915 if (!nbd_dbg_dir)
916 return -EIO;
30d53d9c
MP
917
918 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
27ea43fe
MP
919 if (!dir) {
920 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
921 nbd_name(nbd));
922 return -EIO;
30d53d9c
MP
923 }
924 nbd->dbg_dir = dir;
925
27ea43fe
MP
926 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
927 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
0eadf37a 928 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
ef77b515 929 debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
d366a0ff 930 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
30d53d9c
MP
931
932 return 0;
933}
934
935static void nbd_dev_dbg_close(struct nbd_device *nbd)
936{
937 debugfs_remove_recursive(nbd->dbg_dir);
938}
939
940static int nbd_dbg_init(void)
941{
942 struct dentry *dbg_dir;
943
944 dbg_dir = debugfs_create_dir("nbd", NULL);
27ea43fe
MP
945 if (!dbg_dir)
946 return -EIO;
30d53d9c
MP
947
948 nbd_dbg_dir = dbg_dir;
949
950 return 0;
951}
952
953static void nbd_dbg_close(void)
954{
955 debugfs_remove_recursive(nbd_dbg_dir);
956}
957
958#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
959
960static int nbd_dev_dbg_init(struct nbd_device *nbd)
961{
962 return 0;
963}
964
965static void nbd_dev_dbg_close(struct nbd_device *nbd)
966{
967}
968
969static int nbd_dbg_init(void)
970{
971 return 0;
972}
973
974static void nbd_dbg_close(void)
975{
976}
977
978#endif
979
fd8383fd
JB
980static int nbd_init_request(void *data, struct request *rq,
981 unsigned int hctx_idx, unsigned int request_idx,
982 unsigned int numa_node)
983{
984 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
fd8383fd 985 cmd->nbd = data;
fd8383fd
JB
986 return 0;
987}
988
989static struct blk_mq_ops nbd_mq_ops = {
990 .queue_rq = nbd_queue_rq,
fd8383fd 991 .init_request = nbd_init_request,
0eadf37a 992 .timeout = nbd_xmit_timeout,
fd8383fd
JB
993};
994
1da177e4
LT
995/*
996 * And here should be modules and kernel interface
997 * (Just smiley confuses emacs :-)
998 */
999
1000static int __init nbd_init(void)
1001{
1002 int err = -ENOMEM;
1003 int i;
d71a6d73 1004 int part_shift;
1da177e4 1005
5b7b18cc 1006 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1da177e4 1007
d71a6d73 1008 if (max_part < 0) {
7742ce4a 1009 printk(KERN_ERR "nbd: max_part must be >= 0\n");
d71a6d73
LV
1010 return -EINVAL;
1011 }
1012
1013 part_shift = 0;
5988ce23 1014 if (max_part > 0) {
d71a6d73
LV
1015 part_shift = fls(max_part);
1016
5988ce23
NK
1017 /*
1018 * Adjust max_part according to part_shift as it is exported
1019 * to user space so that user can know the max number of
1020 * partition kernel should be able to manage.
1021 *
1022 * Note that -1 is required because partition 0 is reserved
1023 * for the whole disk.
1024 */
1025 max_part = (1UL << part_shift) - 1;
1026 }
1027
3b271082
NK
1028 if ((1UL << part_shift) > DISK_MAX_PARTS)
1029 return -EINVAL;
1030
1031 if (nbds_max > 1UL << (MINORBITS - part_shift))
1032 return -EINVAL;
1033
ff6b8090
SM
1034 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
1035 if (!nbd_dev)
1036 return -ENOMEM;
1037
40be0c28 1038 for (i = 0; i < nbds_max; i++) {
25b4acfc 1039 struct request_queue *q;
d71a6d73 1040 struct gendisk *disk = alloc_disk(1 << part_shift);
1da177e4
LT
1041 if (!disk)
1042 goto out;
1043 nbd_dev[i].disk = disk;
fd8383fd
JB
1044
1045 nbd_dev[i].tag_set.ops = &nbd_mq_ops;
1046 nbd_dev[i].tag_set.nr_hw_queues = 1;
1047 nbd_dev[i].tag_set.queue_depth = 128;
1048 nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
1049 nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
1050 nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
005043ac 1051 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
fd8383fd
JB
1052 nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
1053
1054 err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
1055 if (err) {
1056 put_disk(disk);
1057 goto out;
1058 }
1059
1da177e4
LT
1060 /*
1061 * The new linux 2.5 block layer implementation requires
1062 * every gendisk to have its very own request_queue struct.
1063 * These structs are big so we dynamically allocate them.
1064 */
25b4acfc
JM
1065 q = blk_mq_init_queue(&nbd_dev[i].tag_set);
1066 if (IS_ERR(q)) {
fd8383fd 1067 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1da177e4
LT
1068 put_disk(disk);
1069 goto out;
1070 }
25b4acfc 1071 disk->queue = q;
fd8383fd 1072
31dcfab0
JA
1073 /*
1074 * Tell the block layer that we are not a rotational device
1075 */
1076 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
b277da0a 1077 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
a336d298 1078 disk->queue->limits.discard_granularity = 512;
2bb4cd5c 1079 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
a336d298 1080 disk->queue->limits.discard_zeroes_data = 0;
078be02b
MB
1081 blk_queue_max_hw_sectors(disk->queue, 65536);
1082 disk->queue->limits.max_sectors = 256;
1da177e4
LT
1083 }
1084
1085 if (register_blkdev(NBD_MAJOR, "nbd")) {
1086 err = -EIO;
1087 goto out;
1088 }
1089
1090 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
1da177e4 1091
30d53d9c
MP
1092 nbd_dbg_init();
1093
40be0c28 1094 for (i = 0; i < nbds_max; i++) {
1da177e4 1095 struct gendisk *disk = nbd_dev[i].disk;
f4507164 1096 nbd_dev[i].magic = NBD_MAGIC;
9561a7ad 1097 mutex_init(&nbd_dev[i].config_lock);
1da177e4 1098 disk->major = NBD_MAJOR;
d71a6d73 1099 disk->first_minor = i << part_shift;
1da177e4
LT
1100 disk->fops = &nbd_fops;
1101 disk->private_data = &nbd_dev[i];
1da177e4 1102 sprintf(disk->disk_name, "nbd%d", i);
9561a7ad 1103 init_waitqueue_head(&nbd_dev[i].recv_wq);
0e4f0f6f 1104 nbd_reset(&nbd_dev[i]);
1da177e4
LT
1105 add_disk(disk);
1106 }
1107
1108 return 0;
1109out:
1110 while (i--) {
fd8383fd 1111 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1da177e4
LT
1112 blk_cleanup_queue(nbd_dev[i].disk->queue);
1113 put_disk(nbd_dev[i].disk);
1114 }
f3944d61 1115 kfree(nbd_dev);
1da177e4
LT
1116 return err;
1117}
1118
1119static void __exit nbd_cleanup(void)
1120{
1121 int i;
30d53d9c
MP
1122
1123 nbd_dbg_close();
1124
40be0c28 1125 for (i = 0; i < nbds_max; i++) {
1da177e4 1126 struct gendisk *disk = nbd_dev[i].disk;
40be0c28 1127 nbd_dev[i].magic = 0;
1da177e4
LT
1128 if (disk) {
1129 del_gendisk(disk);
1130 blk_cleanup_queue(disk->queue);
fd8383fd 1131 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1da177e4
LT
1132 put_disk(disk);
1133 }
1134 }
1da177e4 1135 unregister_blkdev(NBD_MAJOR, "nbd");
f3944d61 1136 kfree(nbd_dev);
1da177e4
LT
1137 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1138}
1139
1140module_init(nbd_init);
1141module_exit(nbd_cleanup);
1142
1143MODULE_DESCRIPTION("Network Block Device");
1144MODULE_LICENSE("GPL");
1145
40be0c28 1146module_param(nbds_max, int, 0444);
d71a6d73
LV
1147MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1148module_param(max_part, int, 0444);
1149MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");