]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/block/nbd.c
blk-mq: don't allocate driver tag upfront for flush rq
[mirror_ubuntu-jammy-kernel.git] / drivers / block / nbd.c
CommitLineData
1da177e4
LT
1/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
a2531293 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
1da177e4
LT
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
dbf492d6 10 * This file is released under GPLv2 or later.
1da177e4 11 *
dbf492d6 12 * (part of code stolen from loop.c)
1da177e4
LT
13 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
f1083048 21#include <linux/sched/mm.h>
1da177e4
LT
22#include <linux/fs.h>
23#include <linux/bio.h>
24#include <linux/stat.h>
25#include <linux/errno.h>
26#include <linux/file.h>
27#include <linux/ioctl.h>
2a48fc0a 28#include <linux/mutex.h>
4b2f0260
HX
29#include <linux/compiler.h>
30#include <linux/err.h>
31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
1da177e4 33#include <net/sock.h>
91cf45f0 34#include <linux/net.h>
48cf6061 35#include <linux/kthread.h>
b9c495bb 36#include <linux/types.h>
30d53d9c 37#include <linux/debugfs.h>
fd8383fd 38#include <linux/blk-mq.h>
1da177e4 39
7c0f6ba6 40#include <linux/uaccess.h>
1da177e4
LT
41#include <asm/types.h>
42
43#include <linux/nbd.h>
e46c7287
JB
44#include <linux/nbd-netlink.h>
45#include <net/genetlink.h>
1da177e4 46
b0d9111a
JB
47static DEFINE_IDR(nbd_index_idr);
48static DEFINE_MUTEX(nbd_index_mutex);
47d902b9 49static int nbd_total_devices = 0;
b0d9111a 50
9561a7ad
JB
51struct nbd_sock {
52 struct socket *sock;
53 struct mutex tx_lock;
9dd5d3ab
JB
54 struct request *pending;
55 int sent;
f3733247
JB
56 bool dead;
57 int fallback_index;
799f9a38 58 int cookie;
9561a7ad
JB
59};
60
5ea8d108
JB
61struct recv_thread_args {
62 struct work_struct work;
63 struct nbd_device *nbd;
64 int index;
65};
66
799f9a38
JB
67struct link_dead_args {
68 struct work_struct work;
69 int index;
70};
71
9b4a6ba9
JB
72#define NBD_TIMEDOUT 0
73#define NBD_DISCONNECT_REQUESTED 1
9561a7ad 74#define NBD_DISCONNECTED 2
5ea8d108 75#define NBD_HAS_PID_FILE 3
e46c7287
JB
76#define NBD_HAS_CONFIG_REF 4
77#define NBD_BOUND 5
a2c97909 78#define NBD_DESTROY_ON_DISCONNECT 6
9b4a6ba9 79
5ea8d108 80struct nbd_config {
22d109c1 81 u32 flags;
9b4a6ba9 82 unsigned long runtime_flags;
560bc4b3 83 u64 dead_conn_timeout;
13e71d69 84
5ea8d108 85 struct nbd_sock **socks;
9561a7ad 86 int num_connections;
560bc4b3
JB
87 atomic_t live_connections;
88 wait_queue_head_t conn_wait;
5ea8d108 89
9561a7ad
JB
90 atomic_t recv_threads;
91 wait_queue_head_t recv_wq;
ef77b515 92 loff_t blksize;
b9c495bb 93 loff_t bytesize;
30d53d9c
MP
94#if IS_ENABLED(CONFIG_DEBUG_FS)
95 struct dentry *dbg_dir;
96#endif
13e71d69
MP
97};
98
5ea8d108
JB
99struct nbd_device {
100 struct blk_mq_tag_set tag_set;
101
e46c7287 102 int index;
5ea8d108 103 refcount_t config_refs;
c6a4759e 104 refcount_t refs;
5ea8d108
JB
105 struct nbd_config *config;
106 struct mutex config_lock;
107 struct gendisk *disk;
108
c6a4759e 109 struct list_head list;
5ea8d108
JB
110 struct task_struct *task_recv;
111 struct task_struct *task_setup;
112};
113
fd8383fd
JB
114struct nbd_cmd {
115 struct nbd_device *nbd;
f3733247 116 int index;
799f9a38 117 int cookie;
9561a7ad 118 struct completion send_complete;
2a842aca 119 blk_status_t status;
fd8383fd
JB
120};
121
30d53d9c
MP
122#if IS_ENABLED(CONFIG_DEBUG_FS)
123static struct dentry *nbd_dbg_dir;
124#endif
125
126#define nbd_name(nbd) ((nbd)->disk->disk_name)
127
f4507164 128#define NBD_MAGIC 0x68797548
1da177e4 129
9c7a4169 130static unsigned int nbds_max = 16;
7a8362a0 131static int max_part = 16;
124d6db0 132static struct workqueue_struct *recv_workqueue;
b0d9111a 133static int part_shift;
1da177e4 134
9442b739
JB
135static int nbd_dev_dbg_init(struct nbd_device *nbd);
136static void nbd_dev_dbg_close(struct nbd_device *nbd);
5ea8d108 137static void nbd_config_put(struct nbd_device *nbd);
e46c7287 138static void nbd_connect_reply(struct genl_info *info, int index);
47d902b9 139static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
799f9a38 140static void nbd_dead_link_work(struct work_struct *work);
9442b739 141
d18509f5 142static inline struct device *nbd_to_dev(struct nbd_device *nbd)
1da177e4 143{
d18509f5 144 return disk_to_dev(nbd->disk);
1da177e4
LT
145}
146
147static const char *nbdcmd_to_ascii(int cmd)
148{
149 switch (cmd) {
150 case NBD_CMD_READ: return "read";
151 case NBD_CMD_WRITE: return "write";
152 case NBD_CMD_DISC: return "disconnect";
75f187ab 153 case NBD_CMD_FLUSH: return "flush";
a336d298 154 case NBD_CMD_TRIM: return "trim/discard";
1da177e4
LT
155 }
156 return "invalid";
157}
1da177e4 158
5ea8d108
JB
159static ssize_t pid_show(struct device *dev,
160 struct device_attribute *attr, char *buf)
161{
162 struct gendisk *disk = dev_to_disk(dev);
163 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
164
165 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
166}
167
dfbde552 168static const struct device_attribute pid_attr = {
5ea8d108
JB
169 .attr = { .name = "pid", .mode = S_IRUGO},
170 .show = pid_show,
171};
172
c6a4759e
JB
173static void nbd_dev_remove(struct nbd_device *nbd)
174{
175 struct gendisk *disk = nbd->disk;
176 if (disk) {
177 del_gendisk(disk);
178 blk_cleanup_queue(disk->queue);
179 blk_mq_free_tag_set(&nbd->tag_set);
a2c97909 180 disk->private_data = NULL;
c6a4759e
JB
181 put_disk(disk);
182 }
183 kfree(nbd);
184}
185
186static void nbd_put(struct nbd_device *nbd)
187{
188 if (refcount_dec_and_mutex_lock(&nbd->refs,
189 &nbd_index_mutex)) {
190 idr_remove(&nbd_index_idr, nbd->index);
191 mutex_unlock(&nbd_index_mutex);
192 nbd_dev_remove(nbd);
193 }
194}
195
799f9a38
JB
196static int nbd_disconnected(struct nbd_config *config)
197{
198 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
199 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
200}
201
202static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
203 int notify)
f3733247 204{
799f9a38
JB
205 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
206 struct link_dead_args *args;
207 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
208 if (args) {
209 INIT_WORK(&args->work, nbd_dead_link_work);
210 args->index = nbd->index;
211 queue_work(system_wq, &args->work);
212 }
213 }
560bc4b3 214 if (!nsock->dead) {
f3733247 215 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
560bc4b3
JB
216 atomic_dec(&nbd->config->live_connections);
217 }
f3733247
JB
218 nsock->dead = true;
219 nsock->pending = NULL;
220 nsock->sent = 0;
221}
222
29eaadc0 223static void nbd_size_clear(struct nbd_device *nbd)
37091fdd 224{
5ea8d108 225 if (nbd->config->bytesize) {
5ea8d108
JB
226 set_capacity(nbd->disk, 0);
227 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
228 }
37091fdd
MP
229}
230
29eaadc0 231static void nbd_size_update(struct nbd_device *nbd)
37091fdd 232{
5ea8d108
JB
233 struct nbd_config *config = nbd->config;
234 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
235 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
5ea8d108 236 set_capacity(nbd->disk, config->bytesize >> 9);
37091fdd
MP
237 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
238}
239
29eaadc0
JB
240static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
241 loff_t nr_blocks)
37091fdd 242{
5ea8d108
JB
243 struct nbd_config *config = nbd->config;
244 config->blksize = blocksize;
245 config->bytesize = blocksize * nr_blocks;
29eaadc0 246 nbd_size_update(nbd);
37091fdd
MP
247}
248
1e388ae0 249static void nbd_complete_rq(struct request *req)
1da177e4 250{
1e388ae0 251 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
1da177e4 252
1e388ae0
CH
253 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd,
254 cmd->status ? "failed" : "done");
1da177e4 255
1e388ae0 256 blk_mq_end_request(req, cmd->status);
1da177e4
LT
257}
258
e018e757
MP
259/*
260 * Forcibly shutdown the socket causing all listeners to error
261 */
36e47bee 262static void sock_shutdown(struct nbd_device *nbd)
7fdfd406 263{
5ea8d108 264 struct nbd_config *config = nbd->config;
9561a7ad 265 int i;
23272a67 266
5ea8d108 267 if (config->num_connections == 0)
9561a7ad 268 return;
5ea8d108 269 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
260bbce4 270 return;
23272a67 271
5ea8d108
JB
272 for (i = 0; i < config->num_connections; i++) {
273 struct nbd_sock *nsock = config->socks[i];
9561a7ad 274 mutex_lock(&nsock->tx_lock);
799f9a38 275 nbd_mark_nsock_dead(nbd, nsock, 0);
9561a7ad
JB
276 mutex_unlock(&nsock->tx_lock);
277 }
278 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
7fdfd406
PC
279}
280
0eadf37a
JB
281static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
282 bool reserved)
7fdfd406 283{
0eadf37a
JB
284 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
285 struct nbd_device *nbd = cmd->nbd;
5ea8d108
JB
286 struct nbd_config *config;
287
288 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2a842aca 289 cmd->status = BLK_STS_TIMEOUT;
5ea8d108
JB
290 return BLK_EH_HANDLED;
291 }
292
560bc4b3
JB
293 /* If we are waiting on our dead timer then we could get timeout
294 * callbacks for our request. For this we just want to reset the timer
295 * and let the queue side take care of everything.
296 */
297 if (!completion_done(&cmd->send_complete)) {
298 nbd_config_put(nbd);
299 return BLK_EH_RESET_TIMER;
300 }
5ea8d108 301 config = nbd->config;
dcc909d9 302
5ea8d108 303 if (config->num_connections > 1) {
f3733247
JB
304 dev_err_ratelimited(nbd_to_dev(nbd),
305 "Connection timed out, retrying\n");
f3733247
JB
306 /*
307 * Hooray we have more connections, requeue this IO, the submit
308 * path will put it on a real connection.
309 */
5ea8d108
JB
310 if (config->socks && config->num_connections > 1) {
311 if (cmd->index < config->num_connections) {
f3733247 312 struct nbd_sock *nsock =
5ea8d108 313 config->socks[cmd->index];
f3733247 314 mutex_lock(&nsock->tx_lock);
799f9a38
JB
315 /* We can have multiple outstanding requests, so
316 * we don't want to mark the nsock dead if we've
317 * already reconnected with a new socket, so
318 * only mark it dead if its the same socket we
319 * were sent out on.
320 */
321 if (cmd->cookie == nsock->cookie)
322 nbd_mark_nsock_dead(nbd, nsock, 1);
f3733247
JB
323 mutex_unlock(&nsock->tx_lock);
324 }
f3733247 325 blk_mq_requeue_request(req, true);
5ea8d108 326 nbd_config_put(nbd);
f3733247
JB
327 return BLK_EH_NOT_HANDLED;
328 }
f3733247
JB
329 } else {
330 dev_err_ratelimited(nbd_to_dev(nbd),
331 "Connection timed out\n");
332 }
5ea8d108 333 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
2a842aca 334 cmd->status = BLK_STS_IOERR;
9561a7ad 335 sock_shutdown(nbd);
5ea8d108
JB
336 nbd_config_put(nbd);
337
0eadf37a 338 return BLK_EH_HANDLED;
7fdfd406
PC
339}
340
1da177e4
LT
341/*
342 * Send or receive packet.
343 */
c9f2b6ae 344static int sock_xmit(struct nbd_device *nbd, int index, int send,
9dd5d3ab 345 struct iov_iter *iter, int msg_flags, int *sent)
1da177e4 346{
5ea8d108
JB
347 struct nbd_config *config = nbd->config;
348 struct socket *sock = config->socks[index]->sock;
1da177e4
LT
349 int result;
350 struct msghdr msg;
f1083048 351 unsigned int noreclaim_flag;
1da177e4 352
ffc41cf8 353 if (unlikely(!sock)) {
a897b666 354 dev_err_ratelimited(disk_to_dev(nbd->disk),
7f1b90f9
WC
355 "Attempted %s on closed socket in sock_xmit\n",
356 (send ? "send" : "recv"));
ffc41cf8
MS
357 return -EINVAL;
358 }
359
c9f2b6ae 360 msg.msg_iter = *iter;
c1696cab 361
f1083048 362 noreclaim_flag = memalloc_noreclaim_save();
1da177e4 363 do {
7f338fe4 364 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
1da177e4
LT
365 msg.msg_name = NULL;
366 msg.msg_namelen = 0;
367 msg.msg_control = NULL;
368 msg.msg_controllen = 0;
1da177e4
LT
369 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
370
7e2893a1 371 if (send)
c1696cab 372 result = sock_sendmsg(sock, &msg);
7e2893a1 373 else
c1696cab 374 result = sock_recvmsg(sock, &msg, msg.msg_flags);
1da177e4 375
1da177e4
LT
376 if (result <= 0) {
377 if (result == 0)
378 result = -EPIPE; /* short read */
379 break;
380 }
9dd5d3ab
JB
381 if (sent)
382 *sent += result;
c1696cab 383 } while (msg_data_left(&msg));
1da177e4 384
f1083048 385 memalloc_noreclaim_restore(noreclaim_flag);
1da177e4
LT
386
387 return result;
388}
389
7fdfd406 390/* always call with the tx_lock held */
9561a7ad 391static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1da177e4 392{
fd8383fd 393 struct request *req = blk_mq_rq_from_pdu(cmd);
5ea8d108
JB
394 struct nbd_config *config = nbd->config;
395 struct nbd_sock *nsock = config->socks[index];
d61b7f97 396 int result;
c9f2b6ae
AV
397 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
398 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
399 struct iov_iter from;
1011c1b9 400 unsigned long size = blk_rq_bytes(req);
429a787b 401 struct bio *bio;
9dc6c806 402 u32 type;
685c9b24 403 u32 nbd_cmd_flags = 0;
9561a7ad 404 u32 tag = blk_mq_unique_tag(req);
9dd5d3ab 405 int sent = nsock->sent, skip = 0;
9dc6c806 406
c9f2b6ae
AV
407 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
408
aebf526b
CH
409 switch (req_op(req)) {
410 case REQ_OP_DISCARD:
9dc6c806 411 type = NBD_CMD_TRIM;
aebf526b
CH
412 break;
413 case REQ_OP_FLUSH:
9dc6c806 414 type = NBD_CMD_FLUSH;
aebf526b
CH
415 break;
416 case REQ_OP_WRITE:
9dc6c806 417 type = NBD_CMD_WRITE;
aebf526b
CH
418 break;
419 case REQ_OP_READ:
9dc6c806 420 type = NBD_CMD_READ;
aebf526b
CH
421 break;
422 default:
423 return -EIO;
424 }
1da177e4 425
09fc54cc 426 if (rq_data_dir(req) == WRITE &&
5ea8d108 427 (config->flags & NBD_FLAG_READ_ONLY)) {
09fc54cc
CH
428 dev_err_ratelimited(disk_to_dev(nbd->disk),
429 "Write on read-only\n");
430 return -EIO;
431 }
432
685c9b24
SM
433 if (req->cmd_flags & REQ_FUA)
434 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
435
9dd5d3ab
JB
436 /* We did a partial send previously, and we at least sent the whole
437 * request struct, so just go and send the rest of the pages in the
438 * request.
439 */
440 if (sent) {
441 if (sent >= sizeof(request)) {
442 skip = sent - sizeof(request);
443 goto send_pages;
444 }
445 iov_iter_advance(&from, sent);
446 }
f3733247 447 cmd->index = index;
799f9a38 448 cmd->cookie = nsock->cookie;
685c9b24 449 request.type = htonl(type | nbd_cmd_flags);
9561a7ad 450 if (type != NBD_CMD_FLUSH) {
75f187ab
AB
451 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
452 request.len = htonl(size);
453 }
9561a7ad 454 memcpy(request.handle, &tag, sizeof(tag));
1da177e4 455
d18509f5 456 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
fd8383fd 457 cmd, nbdcmd_to_ascii(type),
d18509f5 458 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
c9f2b6ae 459 result = sock_xmit(nbd, index, 1, &from,
9dd5d3ab 460 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
1da177e4 461 if (result <= 0) {
9dd5d3ab
JB
462 if (result == -ERESTARTSYS) {
463 /* If we havne't sent anything we can just return BUSY,
464 * however if we have sent something we need to make
465 * sure we only allow this req to be sent until we are
466 * completely done.
467 */
468 if (sent) {
469 nsock->pending = req;
470 nsock->sent = sent;
471 }
fc17b653 472 return BLK_STS_RESOURCE;
9dd5d3ab 473 }
a897b666 474 dev_err_ratelimited(disk_to_dev(nbd->disk),
7f1b90f9 475 "Send control failed (result %d)\n", result);
f3733247 476 return -EAGAIN;
1da177e4 477 }
9dd5d3ab 478send_pages:
429a787b 479 if (type != NBD_CMD_WRITE)
9dd5d3ab 480 goto out;
429a787b 481
429a787b
JA
482 bio = req->bio;
483 while (bio) {
484 struct bio *next = bio->bi_next;
485 struct bvec_iter iter;
7988613b 486 struct bio_vec bvec;
429a787b
JA
487
488 bio_for_each_segment(bvec, bio, iter) {
489 bool is_last = !next && bio_iter_last(bvec, iter);
d61b7f97 490 int flags = is_last ? 0 : MSG_MORE;
429a787b 491
d18509f5 492 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
fd8383fd 493 cmd, bvec.bv_len);
c9f2b6ae
AV
494 iov_iter_bvec(&from, ITER_BVEC | WRITE,
495 &bvec, 1, bvec.bv_len);
9dd5d3ab
JB
496 if (skip) {
497 if (skip >= iov_iter_count(&from)) {
498 skip -= iov_iter_count(&from);
499 continue;
500 }
501 iov_iter_advance(&from, skip);
502 skip = 0;
503 }
504 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
6c92e699 505 if (result <= 0) {
9dd5d3ab
JB
506 if (result == -ERESTARTSYS) {
507 /* We've already sent the header, we
508 * have no choice but to set pending and
509 * return BUSY.
510 */
511 nsock->pending = req;
512 nsock->sent = sent;
fc17b653 513 return BLK_STS_RESOURCE;
9dd5d3ab 514 }
f4507164 515 dev_err(disk_to_dev(nbd->disk),
7f1b90f9
WC
516 "Send data failed (result %d)\n",
517 result);
f3733247 518 return -EAGAIN;
6c92e699 519 }
429a787b
JA
520 /*
521 * The completion might already have come in,
522 * so break for the last one instead of letting
523 * the iterator do it. This prevents use-after-free
524 * of the bio.
525 */
526 if (is_last)
527 break;
1da177e4 528 }
429a787b 529 bio = next;
1da177e4 530 }
9dd5d3ab
JB
531out:
532 nsock->pending = NULL;
533 nsock->sent = 0;
1da177e4 534 return 0;
1da177e4
LT
535}
536
1da177e4 537/* NULL returned = something went wrong, inform userspace */
9561a7ad 538static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1da177e4 539{
5ea8d108 540 struct nbd_config *config = nbd->config;
1da177e4
LT
541 int result;
542 struct nbd_reply reply;
fd8383fd
JB
543 struct nbd_cmd *cmd;
544 struct request *req = NULL;
545 u16 hwq;
9561a7ad 546 u32 tag;
c9f2b6ae
AV
547 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
548 struct iov_iter to;
1da177e4
LT
549
550 reply.magic = 0;
c9f2b6ae 551 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
9dd5d3ab 552 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
1da177e4 553 if (result <= 0) {
5ea8d108 554 if (!nbd_disconnected(config))
9561a7ad
JB
555 dev_err(disk_to_dev(nbd->disk),
556 "Receive control failed (result %d)\n", result);
19391830 557 return ERR_PTR(result);
1da177e4 558 }
e4b57e08
MF
559
560 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
f4507164 561 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
e4b57e08 562 (unsigned long)ntohl(reply.magic));
19391830 563 return ERR_PTR(-EPROTO);
e4b57e08
MF
564 }
565
9561a7ad 566 memcpy(&tag, reply.handle, sizeof(u32));
4b2f0260 567
fd8383fd
JB
568 hwq = blk_mq_unique_tag_to_hwq(tag);
569 if (hwq < nbd->tag_set.nr_hw_queues)
570 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
571 blk_mq_unique_tag_to_tag(tag));
572 if (!req || !blk_mq_request_started(req)) {
573 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
574 tag, req);
575 return ERR_PTR(-ENOENT);
1da177e4 576 }
fd8383fd 577 cmd = blk_mq_rq_to_pdu(req);
1da177e4 578 if (ntohl(reply.error)) {
f4507164 579 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
7f1b90f9 580 ntohl(reply.error));
2a842aca 581 cmd->status = BLK_STS_IOERR;
fd8383fd 582 return cmd;
1da177e4
LT
583 }
584
fd8383fd 585 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
9dc6c806 586 if (rq_data_dir(req) != WRITE) {
5705f702 587 struct req_iterator iter;
7988613b 588 struct bio_vec bvec;
5705f702
N
589
590 rq_for_each_segment(bvec, req, iter) {
c9f2b6ae
AV
591 iov_iter_bvec(&to, ITER_BVEC | READ,
592 &bvec, 1, bvec.bv_len);
9dd5d3ab 593 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
6c92e699 594 if (result <= 0) {
f4507164 595 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
7f1b90f9 596 result);
f3733247
JB
597 /*
598 * If we've disconnected or we only have 1
599 * connection then we need to make sure we
600 * complete this request, otherwise error out
601 * and let the timeout stuff handle resubmitting
602 * this request onto another connection.
603 */
5ea8d108
JB
604 if (nbd_disconnected(config) ||
605 config->num_connections <= 1) {
2a842aca 606 cmd->status = BLK_STS_IOERR;
f3733247
JB
607 return cmd;
608 }
609 return ERR_PTR(-EIO);
6c92e699 610 }
d18509f5 611 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
fd8383fd 612 cmd, bvec.bv_len);
1da177e4 613 }
9561a7ad
JB
614 } else {
615 /* See the comment in nbd_queue_rq. */
616 wait_for_completion(&cmd->send_complete);
1da177e4 617 }
fd8383fd 618 return cmd;
1da177e4
LT
619}
620
9561a7ad 621static void recv_work(struct work_struct *work)
1da177e4 622{
9561a7ad
JB
623 struct recv_thread_args *args = container_of(work,
624 struct recv_thread_args,
625 work);
626 struct nbd_device *nbd = args->nbd;
5ea8d108 627 struct nbd_config *config = nbd->config;
fd8383fd 628 struct nbd_cmd *cmd;
1da177e4 629
19391830 630 while (1) {
9561a7ad 631 cmd = nbd_read_stat(nbd, args->index);
fd8383fd 632 if (IS_ERR(cmd)) {
5ea8d108 633 struct nbd_sock *nsock = config->socks[args->index];
f3733247
JB
634
635 mutex_lock(&nsock->tx_lock);
799f9a38 636 nbd_mark_nsock_dead(nbd, nsock, 1);
f3733247 637 mutex_unlock(&nsock->tx_lock);
19391830
MP
638 break;
639 }
640
08e0029a 641 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
19391830 642 }
5ea8d108
JB
643 atomic_dec(&config->recv_threads);
644 wake_up(&config->recv_wq);
645 nbd_config_put(nbd);
646 kfree(args);
1da177e4
LT
647}
648
fd8383fd 649static void nbd_clear_req(struct request *req, void *data, bool reserved)
1da177e4 650{
fd8383fd 651 struct nbd_cmd *cmd;
1da177e4 652
fd8383fd
JB
653 if (!blk_mq_request_started(req))
654 return;
655 cmd = blk_mq_rq_to_pdu(req);
2a842aca 656 cmd->status = BLK_STS_IOERR;
08e0029a 657 blk_mq_complete_request(req);
fd8383fd
JB
658}
659
660static void nbd_clear_que(struct nbd_device *nbd)
661{
b52c2e92 662 blk_mq_quiesce_queue(nbd->disk->queue);
fd8383fd 663 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
b52c2e92 664 blk_mq_unquiesce_queue(nbd->disk->queue);
e78273c8 665 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
1da177e4
LT
666}
667
f3733247
JB
668static int find_fallback(struct nbd_device *nbd, int index)
669{
5ea8d108 670 struct nbd_config *config = nbd->config;
f3733247 671 int new_index = -1;
5ea8d108 672 struct nbd_sock *nsock = config->socks[index];
f3733247
JB
673 int fallback = nsock->fallback_index;
674
5ea8d108 675 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
f3733247
JB
676 return new_index;
677
5ea8d108 678 if (config->num_connections <= 1) {
f3733247
JB
679 dev_err_ratelimited(disk_to_dev(nbd->disk),
680 "Attempted send on invalid socket\n");
681 return new_index;
682 }
683
5ea8d108
JB
684 if (fallback >= 0 && fallback < config->num_connections &&
685 !config->socks[fallback]->dead)
f3733247
JB
686 return fallback;
687
688 if (nsock->fallback_index < 0 ||
5ea8d108
JB
689 nsock->fallback_index >= config->num_connections ||
690 config->socks[nsock->fallback_index]->dead) {
f3733247 691 int i;
5ea8d108 692 for (i = 0; i < config->num_connections; i++) {
f3733247
JB
693 if (i == index)
694 continue;
5ea8d108 695 if (!config->socks[i]->dead) {
f3733247
JB
696 new_index = i;
697 break;
698 }
699 }
700 nsock->fallback_index = new_index;
701 if (new_index < 0) {
702 dev_err_ratelimited(disk_to_dev(nbd->disk),
703 "Dead connection, failed to find a fallback\n");
704 return new_index;
705 }
706 }
707 new_index = nsock->fallback_index;
708 return new_index;
709}
7fdfd406 710
560bc4b3
JB
711static int wait_for_reconnect(struct nbd_device *nbd)
712{
713 struct nbd_config *config = nbd->config;
714 if (!config->dead_conn_timeout)
715 return 0;
716 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
717 return 0;
718 wait_event_interruptible_timeout(config->conn_wait,
719 atomic_read(&config->live_connections),
720 config->dead_conn_timeout);
721 return atomic_read(&config->live_connections);
722}
723
9dd5d3ab 724static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
48cf6061 725{
fd8383fd
JB
726 struct request *req = blk_mq_rq_from_pdu(cmd);
727 struct nbd_device *nbd = cmd->nbd;
5ea8d108 728 struct nbd_config *config;
9561a7ad 729 struct nbd_sock *nsock;
9dd5d3ab 730 int ret;
fd8383fd 731
5ea8d108
JB
732 if (!refcount_inc_not_zero(&nbd->config_refs)) {
733 dev_err_ratelimited(disk_to_dev(nbd->disk),
734 "Socks array is empty\n");
735 return -EINVAL;
736 }
737 config = nbd->config;
738
739 if (index >= config->num_connections) {
a897b666
JB
740 dev_err_ratelimited(disk_to_dev(nbd->disk),
741 "Attempted send on invalid socket\n");
5ea8d108 742 nbd_config_put(nbd);
9dd5d3ab 743 return -EINVAL;
9561a7ad 744 }
2a842aca 745 cmd->status = BLK_STS_OK;
f3733247 746again:
5ea8d108 747 nsock = config->socks[index];
9561a7ad 748 mutex_lock(&nsock->tx_lock);
f3733247 749 if (nsock->dead) {
560bc4b3 750 int old_index = index;
f3733247 751 index = find_fallback(nbd, index);
560bc4b3 752 mutex_unlock(&nsock->tx_lock);
5ea8d108 753 if (index < 0) {
560bc4b3
JB
754 if (wait_for_reconnect(nbd)) {
755 index = old_index;
756 goto again;
757 }
758 /* All the sockets should already be down at this point,
759 * we just want to make sure that DISCONNECTED is set so
760 * any requests that come in that were queue'ed waiting
761 * for the reconnect timer don't trigger the timer again
762 * and instead just error out.
763 */
764 sock_shutdown(nbd);
765 nbd_config_put(nbd);
766 return -EIO;
5ea8d108 767 }
f3733247 768 goto again;
48cf6061
LV
769 }
770
9dd5d3ab
JB
771 /* Handle the case that we have a pending request that was partially
772 * transmitted that _has_ to be serviced first. We need to call requeue
773 * here so that it gets put _after_ the request that is already on the
774 * dispatch list.
775 */
776 if (unlikely(nsock->pending && nsock->pending != req)) {
777 blk_mq_requeue_request(req, true);
778 ret = 0;
779 goto out;
48cf6061 780 }
f3733247
JB
781 /*
782 * Some failures are related to the link going down, so anything that
783 * returns EAGAIN can be retried on a different socket.
784 */
9dd5d3ab 785 ret = nbd_send_cmd(nbd, cmd, index);
f3733247
JB
786 if (ret == -EAGAIN) {
787 dev_err_ratelimited(disk_to_dev(nbd->disk),
788 "Request send failed trying another connection\n");
799f9a38 789 nbd_mark_nsock_dead(nbd, nsock, 1);
f3733247
JB
790 mutex_unlock(&nsock->tx_lock);
791 goto again;
792 }
9dd5d3ab 793out:
9561a7ad 794 mutex_unlock(&nsock->tx_lock);
5ea8d108 795 nbd_config_put(nbd);
9dd5d3ab 796 return ret;
48cf6061
LV
797}
798
fc17b653 799static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
fd8383fd 800 const struct blk_mq_queue_data *bd)
1da177e4 801{
fd8383fd 802 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
9dd5d3ab 803 int ret;
1da177e4 804
9561a7ad
JB
805 /*
806 * Since we look at the bio's to send the request over the network we
807 * need to make sure the completion work doesn't mark this request done
808 * before we are done doing our send. This keeps us from dereferencing
809 * freed data if we have particularly fast completions (ie we get the
810 * completion before we exit sock_xmit on the last bvec) or in the case
811 * that the server is misbehaving (or there was an error) before we're
812 * done sending everything over the wire.
813 */
814 init_completion(&cmd->send_complete);
fd8383fd 815 blk_mq_start_request(bd->rq);
9dd5d3ab
JB
816
817 /* We can be called directly from the user space process, which means we
818 * could possibly have signals pending so our sendmsg will fail. In
819 * this case we need to return that we are busy, otherwise error out as
820 * appropriate.
821 */
822 ret = nbd_handle_cmd(cmd, hctx->queue_num);
9561a7ad
JB
823 complete(&cmd->send_complete);
824
fc17b653 825 return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
1da177e4
LT
826}
827
e46c7287
JB
828static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
829 bool netlink)
23272a67 830{
5ea8d108 831 struct nbd_config *config = nbd->config;
9442b739 832 struct socket *sock;
9561a7ad
JB
833 struct nbd_sock **socks;
834 struct nbd_sock *nsock;
9442b739
JB
835 int err;
836
837 sock = sockfd_lookup(arg, &err);
838 if (!sock)
839 return err;
23272a67 840
e46c7287
JB
841 if (!netlink && !nbd->task_setup &&
842 !test_bit(NBD_BOUND, &config->runtime_flags))
9561a7ad 843 nbd->task_setup = current;
e46c7287
JB
844
845 if (!netlink &&
846 (nbd->task_setup != current ||
847 test_bit(NBD_BOUND, &config->runtime_flags))) {
9561a7ad
JB
848 dev_err(disk_to_dev(nbd->disk),
849 "Device being setup by another task");
9b1355d5 850 sockfd_put(sock);
e46c7287 851 return -EBUSY;
23272a67
MP
852 }
853
5ea8d108 854 socks = krealloc(config->socks, (config->num_connections + 1) *
9561a7ad 855 sizeof(struct nbd_sock *), GFP_KERNEL);
9b1355d5
JB
856 if (!socks) {
857 sockfd_put(sock);
9561a7ad 858 return -ENOMEM;
9b1355d5 859 }
9561a7ad 860 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
9b1355d5
JB
861 if (!nsock) {
862 sockfd_put(sock);
9561a7ad 863 return -ENOMEM;
9b1355d5 864 }
9561a7ad 865
5ea8d108 866 config->socks = socks;
23272a67 867
f3733247
JB
868 nsock->fallback_index = -1;
869 nsock->dead = false;
9561a7ad
JB
870 mutex_init(&nsock->tx_lock);
871 nsock->sock = sock;
9dd5d3ab
JB
872 nsock->pending = NULL;
873 nsock->sent = 0;
799f9a38 874 nsock->cookie = 0;
5ea8d108 875 socks[config->num_connections++] = nsock;
560bc4b3 876 atomic_inc(&config->live_connections);
23272a67 877
9561a7ad 878 return 0;
23272a67
MP
879}
880
b7aa3d39
JB
881static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
882{
883 struct nbd_config *config = nbd->config;
884 struct socket *sock, *old;
885 struct recv_thread_args *args;
886 int i;
887 int err;
888
889 sock = sockfd_lookup(arg, &err);
890 if (!sock)
891 return err;
892
893 args = kzalloc(sizeof(*args), GFP_KERNEL);
894 if (!args) {
895 sockfd_put(sock);
896 return -ENOMEM;
897 }
898
899 for (i = 0; i < config->num_connections; i++) {
900 struct nbd_sock *nsock = config->socks[i];
901
902 if (!nsock->dead)
903 continue;
904
905 mutex_lock(&nsock->tx_lock);
906 if (!nsock->dead) {
907 mutex_unlock(&nsock->tx_lock);
908 continue;
909 }
910 sk_set_memalloc(sock->sk);
a7ee8cf1
JB
911 if (nbd->tag_set.timeout)
912 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
b7aa3d39
JB
913 atomic_inc(&config->recv_threads);
914 refcount_inc(&nbd->config_refs);
915 old = nsock->sock;
916 nsock->fallback_index = -1;
917 nsock->sock = sock;
918 nsock->dead = false;
919 INIT_WORK(&args->work, recv_work);
920 args->index = i;
921 args->nbd = nbd;
799f9a38 922 nsock->cookie++;
b7aa3d39
JB
923 mutex_unlock(&nsock->tx_lock);
924 sockfd_put(old);
925
7a362ea9
JB
926 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
927
b7aa3d39
JB
928 /* We take the tx_mutex in an error path in the recv_work, so we
929 * need to queue_work outside of the tx_mutex.
930 */
931 queue_work(recv_workqueue, &args->work);
560bc4b3
JB
932
933 atomic_inc(&config->live_connections);
934 wake_up(&config->conn_wait);
b7aa3d39
JB
935 return 0;
936 }
937 sockfd_put(sock);
938 kfree(args);
939 return -ENOSPC;
940}
941
0e4f0f6f
MP
942static void nbd_bdev_reset(struct block_device *bdev)
943{
abbbdf12
RMB
944 if (bdev->bd_openers > 1)
945 return;
29eaadc0 946 bd_set_size(bdev, 0);
0e4f0f6f
MP
947 if (max_part > 0) {
948 blkdev_reread_part(bdev);
949 bdev->bd_invalidated = 1;
950 }
951}
952
29eaadc0 953static void nbd_parse_flags(struct nbd_device *nbd)
d02cf531 954{
5ea8d108
JB
955 struct nbd_config *config = nbd->config;
956 if (config->flags & NBD_FLAG_READ_ONLY)
29eaadc0
JB
957 set_disk_ro(nbd->disk, true);
958 else
959 set_disk_ro(nbd->disk, false);
5ea8d108 960 if (config->flags & NBD_FLAG_SEND_TRIM)
d02cf531 961 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
685c9b24
SM
962 if (config->flags & NBD_FLAG_SEND_FLUSH) {
963 if (config->flags & NBD_FLAG_SEND_FUA)
964 blk_queue_write_cache(nbd->disk->queue, true, true);
965 else
966 blk_queue_write_cache(nbd->disk->queue, true, false);
967 }
d02cf531 968 else
aafb1eec 969 blk_queue_write_cache(nbd->disk->queue, false, false);
d02cf531
MP
970}
971
9561a7ad
JB
972static void send_disconnects(struct nbd_device *nbd)
973{
5ea8d108 974 struct nbd_config *config = nbd->config;
c9f2b6ae
AV
975 struct nbd_request request = {
976 .magic = htonl(NBD_REQUEST_MAGIC),
977 .type = htonl(NBD_CMD_DISC),
978 };
979 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
980 struct iov_iter from;
9561a7ad
JB
981 int i, ret;
982
5ea8d108 983 for (i = 0; i < config->num_connections; i++) {
b4b2aecc
JB
984 struct nbd_sock *nsock = config->socks[i];
985
c9f2b6ae 986 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
b4b2aecc 987 mutex_lock(&nsock->tx_lock);
9dd5d3ab 988 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
9561a7ad
JB
989 if (ret <= 0)
990 dev_err(disk_to_dev(nbd->disk),
991 "Send disconnect failed %d\n", ret);
b4b2aecc 992 mutex_unlock(&nsock->tx_lock);
9561a7ad
JB
993 }
994}
995
29eaadc0 996static int nbd_disconnect(struct nbd_device *nbd)
9442b739 997{
5ea8d108 998 struct nbd_config *config = nbd->config;
30d53d9c 999
5ea8d108 1000 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
2e13456f
JB
1001 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
1002 send_disconnects(nbd);
9442b739
JB
1003 return 0;
1004}
1005
29eaadc0 1006static void nbd_clear_sock(struct nbd_device *nbd)
1a2ad211 1007{
9442b739
JB
1008 sock_shutdown(nbd);
1009 nbd_clear_que(nbd);
5ea8d108 1010 nbd->task_setup = NULL;
5ea8d108
JB
1011}
1012
1013static void nbd_config_put(struct nbd_device *nbd)
1014{
1015 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1016 &nbd->config_lock)) {
5ea8d108 1017 struct nbd_config *config = nbd->config;
5ea8d108 1018 nbd_dev_dbg_close(nbd);
29eaadc0 1019 nbd_size_clear(nbd);
5ea8d108
JB
1020 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1021 &config->runtime_flags))
1022 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1023 nbd->task_recv = NULL;
29eaadc0 1024 nbd_clear_sock(nbd);
5ea8d108
JB
1025 if (config->num_connections) {
1026 int i;
1027 for (i = 0; i < config->num_connections; i++) {
1028 sockfd_put(config->socks[i]->sock);
1029 kfree(config->socks[i]);
1030 }
1031 kfree(config->socks);
1032 }
fa976532 1033 kfree(nbd->config);
af622b86
ID
1034 nbd->config = NULL;
1035
1036 nbd->tag_set.timeout = 0;
1037 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
a2c97909 1038
5ea8d108 1039 mutex_unlock(&nbd->config_lock);
c6a4759e 1040 nbd_put(nbd);
5ea8d108
JB
1041 module_put(THIS_MODULE);
1042 }
9442b739
JB
1043}
1044
e46c7287 1045static int nbd_start_device(struct nbd_device *nbd)
9442b739 1046{
5ea8d108
JB
1047 struct nbd_config *config = nbd->config;
1048 int num_connections = config->num_connections;
9442b739 1049 int error = 0, i;
1a2ad211 1050
9442b739
JB
1051 if (nbd->task_recv)
1052 return -EBUSY;
5ea8d108 1053 if (!config->socks)
9442b739
JB
1054 return -EINVAL;
1055 if (num_connections > 1 &&
5ea8d108 1056 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
9442b739 1057 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
5ea8d108 1058 return -EINVAL;
9442b739 1059 }
23272a67 1060
5ea8d108 1061 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
9442b739 1062 nbd->task_recv = current;
23272a67 1063
29eaadc0 1064 nbd_parse_flags(nbd);
23272a67 1065
9442b739
JB
1066 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1067 if (error) {
1068 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
5ea8d108 1069 return error;
1a2ad211 1070 }
29eaadc0 1071 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
37091fdd 1072
9442b739
JB
1073 nbd_dev_dbg_init(nbd);
1074 for (i = 0; i < num_connections; i++) {
5ea8d108
JB
1075 struct recv_thread_args *args;
1076
1077 args = kzalloc(sizeof(*args), GFP_KERNEL);
1078 if (!args) {
1079 sock_shutdown(nbd);
1080 return -ENOMEM;
1081 }
1082 sk_set_memalloc(config->socks[i]->sock->sk);
a7ee8cf1
JB
1083 if (nbd->tag_set.timeout)
1084 config->socks[i]->sock->sk->sk_sndtimeo =
1085 nbd->tag_set.timeout;
5ea8d108
JB
1086 atomic_inc(&config->recv_threads);
1087 refcount_inc(&nbd->config_refs);
1088 INIT_WORK(&args->work, recv_work);
1089 args->nbd = nbd;
1090 args->index = i;
1091 queue_work(recv_workqueue, &args->work);
37091fdd 1092 }
e46c7287
JB
1093 return error;
1094}
1095
1096static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1097{
1098 struct nbd_config *config = nbd->config;
1099 int ret;
1100
1101 ret = nbd_start_device(nbd);
1102 if (ret)
1103 return ret;
1104
1105 bd_set_size(bdev, config->bytesize);
1106 if (max_part)
1107 bdev->bd_invalidated = 1;
1108 mutex_unlock(&nbd->config_lock);
1109 ret = wait_event_interruptible(config->recv_wq,
5ea8d108 1110 atomic_read(&config->recv_threads) == 0);
e46c7287 1111 if (ret)
5ea8d108 1112 sock_shutdown(nbd);
9442b739 1113 mutex_lock(&nbd->config_lock);
e46c7287 1114 bd_set_size(bdev, 0);
9442b739 1115 /* user requested, ignore socket errors */
5ea8d108 1116 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
e46c7287 1117 ret = 0;
5ea8d108 1118 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
e46c7287
JB
1119 ret = -ETIMEDOUT;
1120 return ret;
9442b739
JB
1121}
1122
29eaadc0
JB
1123static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1124 struct block_device *bdev)
1125{
2516ab15 1126 sock_shutdown(nbd);
29eaadc0
JB
1127 kill_bdev(bdev);
1128 nbd_bdev_reset(bdev);
e46c7287
JB
1129 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1130 &nbd->config->runtime_flags))
1131 nbd_config_put(nbd);
29eaadc0
JB
1132}
1133
9442b739
JB
1134/* Must be called with config_lock held */
1135static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1136 unsigned int cmd, unsigned long arg)
1137{
5ea8d108
JB
1138 struct nbd_config *config = nbd->config;
1139
9442b739
JB
1140 switch (cmd) {
1141 case NBD_DISCONNECT:
29eaadc0 1142 return nbd_disconnect(nbd);
9442b739 1143 case NBD_CLEAR_SOCK:
29eaadc0
JB
1144 nbd_clear_sock_ioctl(nbd, bdev);
1145 return 0;
9442b739 1146 case NBD_SET_SOCK:
e46c7287 1147 return nbd_add_socket(nbd, arg, false);
9442b739 1148 case NBD_SET_BLKSIZE:
29eaadc0 1149 nbd_size_set(nbd, arg,
5ea8d108 1150 div_s64(config->bytesize, arg));
e544541b 1151 return 0;
1da177e4 1152 case NBD_SET_SIZE:
29eaadc0 1153 nbd_size_set(nbd, config->blksize,
5ea8d108 1154 div_s64(arg, config->blksize));
e544541b 1155 return 0;
37091fdd 1156 case NBD_SET_SIZE_BLOCKS:
29eaadc0 1157 nbd_size_set(nbd, config->blksize, arg);
e544541b 1158 return 0;
7fdfd406 1159 case NBD_SET_TIMEOUT:
f8586855
JB
1160 if (arg) {
1161 nbd->tag_set.timeout = arg * HZ;
1162 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1163 }
7fdfd406 1164 return 0;
1a2ad211 1165
2f012508 1166 case NBD_SET_FLAGS:
5ea8d108 1167 config->flags = arg;
2f012508 1168 return 0;
9442b739 1169 case NBD_DO_IT:
e46c7287 1170 return nbd_start_device_ioctl(nbd, bdev);
1da177e4 1171 case NBD_CLEAR_QUE:
4b2f0260
HX
1172 /*
1173 * This is for compatibility only. The queue is always cleared
1174 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1175 */
1da177e4
LT
1176 return 0;
1177 case NBD_PRINT_DEBUG:
fd8383fd
JB
1178 /*
1179 * For compatibility only, we no longer keep a list of
1180 * outstanding requests.
1181 */
1da177e4
LT
1182 return 0;
1183 }
1a2ad211
PM
1184 return -ENOTTY;
1185}
1186
1187static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1188 unsigned int cmd, unsigned long arg)
1189{
f4507164 1190 struct nbd_device *nbd = bdev->bd_disk->private_data;
e46c7287
JB
1191 struct nbd_config *config = nbd->config;
1192 int error = -EINVAL;
1a2ad211
PM
1193
1194 if (!capable(CAP_SYS_ADMIN))
1195 return -EPERM;
1196
1dae69be
JB
1197 /* The block layer will pass back some non-nbd ioctls in case we have
1198 * special handling for them, but we don't so just return an error.
1199 */
1200 if (_IOC_TYPE(cmd) != 0xab)
1201 return -EINVAL;
1202
9561a7ad 1203 mutex_lock(&nbd->config_lock);
e46c7287
JB
1204
1205 /* Don't allow ioctl operations on a nbd device that was created with
1206 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1207 */
1208 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1209 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1210 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1211 else
1212 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
9561a7ad 1213 mutex_unlock(&nbd->config_lock);
1a2ad211 1214 return error;
1da177e4
LT
1215}
1216
5ea8d108
JB
1217static struct nbd_config *nbd_alloc_config(void)
1218{
1219 struct nbd_config *config;
1220
1221 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1222 if (!config)
1223 return NULL;
1224 atomic_set(&config->recv_threads, 0);
1225 init_waitqueue_head(&config->recv_wq);
560bc4b3 1226 init_waitqueue_head(&config->conn_wait);
5ea8d108 1227 config->blksize = 1024;
560bc4b3 1228 atomic_set(&config->live_connections, 0);
5ea8d108
JB
1229 try_module_get(THIS_MODULE);
1230 return config;
1231}
1232
1233static int nbd_open(struct block_device *bdev, fmode_t mode)
1234{
1235 struct nbd_device *nbd;
1236 int ret = 0;
1237
1238 mutex_lock(&nbd_index_mutex);
1239 nbd = bdev->bd_disk->private_data;
1240 if (!nbd) {
1241 ret = -ENXIO;
1242 goto out;
1243 }
c6a4759e
JB
1244 if (!refcount_inc_not_zero(&nbd->refs)) {
1245 ret = -ENXIO;
1246 goto out;
1247 }
5ea8d108
JB
1248 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1249 struct nbd_config *config;
1250
1251 mutex_lock(&nbd->config_lock);
1252 if (refcount_inc_not_zero(&nbd->config_refs)) {
1253 mutex_unlock(&nbd->config_lock);
1254 goto out;
1255 }
1256 config = nbd->config = nbd_alloc_config();
1257 if (!config) {
1258 ret = -ENOMEM;
1259 mutex_unlock(&nbd->config_lock);
1260 goto out;
1261 }
1262 refcount_set(&nbd->config_refs, 1);
c6a4759e 1263 refcount_inc(&nbd->refs);
5ea8d108
JB
1264 mutex_unlock(&nbd->config_lock);
1265 }
1266out:
1267 mutex_unlock(&nbd_index_mutex);
1268 return ret;
1269}
1270
1271static void nbd_release(struct gendisk *disk, fmode_t mode)
1272{
1273 struct nbd_device *nbd = disk->private_data;
1274 nbd_config_put(nbd);
c6a4759e 1275 nbd_put(nbd);
5ea8d108
JB
1276}
1277
83d5cde4 1278static const struct block_device_operations nbd_fops =
1da177e4
LT
1279{
1280 .owner = THIS_MODULE,
5ea8d108
JB
1281 .open = nbd_open,
1282 .release = nbd_release,
8a6cfeb6 1283 .ioctl = nbd_ioctl,
263a3df1 1284 .compat_ioctl = nbd_ioctl,
1da177e4
LT
1285};
1286
30d53d9c
MP
1287#if IS_ENABLED(CONFIG_DEBUG_FS)
1288
1289static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1290{
1291 struct nbd_device *nbd = s->private;
1292
1293 if (nbd->task_recv)
1294 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
30d53d9c
MP
1295
1296 return 0;
1297}
1298
1299static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1300{
1301 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1302}
1303
1304static const struct file_operations nbd_dbg_tasks_ops = {
1305 .open = nbd_dbg_tasks_open,
1306 .read = seq_read,
1307 .llseek = seq_lseek,
1308 .release = single_release,
1309};
1310
1311static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1312{
1313 struct nbd_device *nbd = s->private;
5ea8d108 1314 u32 flags = nbd->config->flags;
30d53d9c
MP
1315
1316 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1317
1318 seq_puts(s, "Known flags:\n");
1319
1320 if (flags & NBD_FLAG_HAS_FLAGS)
1321 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1322 if (flags & NBD_FLAG_READ_ONLY)
1323 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1324 if (flags & NBD_FLAG_SEND_FLUSH)
1325 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
685c9b24
SM
1326 if (flags & NBD_FLAG_SEND_FUA)
1327 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
30d53d9c
MP
1328 if (flags & NBD_FLAG_SEND_TRIM)
1329 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1330
1331 return 0;
1332}
1333
1334static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1335{
1336 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1337}
1338
1339static const struct file_operations nbd_dbg_flags_ops = {
1340 .open = nbd_dbg_flags_open,
1341 .read = seq_read,
1342 .llseek = seq_lseek,
1343 .release = single_release,
1344};
1345
1346static int nbd_dev_dbg_init(struct nbd_device *nbd)
1347{
1348 struct dentry *dir;
5ea8d108 1349 struct nbd_config *config = nbd->config;
27ea43fe
MP
1350
1351 if (!nbd_dbg_dir)
1352 return -EIO;
30d53d9c
MP
1353
1354 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
27ea43fe
MP
1355 if (!dir) {
1356 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1357 nbd_name(nbd));
1358 return -EIO;
30d53d9c 1359 }
5ea8d108 1360 config->dbg_dir = dir;
30d53d9c 1361
27ea43fe 1362 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
5ea8d108 1363 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
0eadf37a 1364 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
5ea8d108 1365 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
d366a0ff 1366 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
30d53d9c
MP
1367
1368 return 0;
1369}
1370
1371static void nbd_dev_dbg_close(struct nbd_device *nbd)
1372{
5ea8d108 1373 debugfs_remove_recursive(nbd->config->dbg_dir);
30d53d9c
MP
1374}
1375
1376static int nbd_dbg_init(void)
1377{
1378 struct dentry *dbg_dir;
1379
1380 dbg_dir = debugfs_create_dir("nbd", NULL);
27ea43fe
MP
1381 if (!dbg_dir)
1382 return -EIO;
30d53d9c
MP
1383
1384 nbd_dbg_dir = dbg_dir;
1385
1386 return 0;
1387}
1388
1389static void nbd_dbg_close(void)
1390{
1391 debugfs_remove_recursive(nbd_dbg_dir);
1392}
1393
1394#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1395
1396static int nbd_dev_dbg_init(struct nbd_device *nbd)
1397{
1398 return 0;
1399}
1400
1401static void nbd_dev_dbg_close(struct nbd_device *nbd)
1402{
1403}
1404
1405static int nbd_dbg_init(void)
1406{
1407 return 0;
1408}
1409
1410static void nbd_dbg_close(void)
1411{
1412}
1413
1414#endif
1415
d6296d39
CH
1416static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1417 unsigned int hctx_idx, unsigned int numa_node)
fd8383fd
JB
1418{
1419 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
d6296d39 1420 cmd->nbd = set->driver_data;
fd8383fd
JB
1421 return 0;
1422}
1423
f363b089 1424static const struct blk_mq_ops nbd_mq_ops = {
fd8383fd 1425 .queue_rq = nbd_queue_rq,
1e388ae0 1426 .complete = nbd_complete_rq,
fd8383fd 1427 .init_request = nbd_init_request,
0eadf37a 1428 .timeout = nbd_xmit_timeout,
fd8383fd
JB
1429};
1430
b0d9111a
JB
1431static int nbd_dev_add(int index)
1432{
1433 struct nbd_device *nbd;
1434 struct gendisk *disk;
1435 struct request_queue *q;
1436 int err = -ENOMEM;
1437
1438 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1439 if (!nbd)
1440 goto out;
1441
1442 disk = alloc_disk(1 << part_shift);
1443 if (!disk)
1444 goto out_free_nbd;
1445
1446 if (index >= 0) {
1447 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1448 GFP_KERNEL);
1449 if (err == -ENOSPC)
1450 err = -EEXIST;
1451 } else {
1452 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1453 if (err >= 0)
1454 index = err;
1455 }
1456 if (err < 0)
1457 goto out_free_disk;
1458
e46c7287 1459 nbd->index = index;
b0d9111a
JB
1460 nbd->disk = disk;
1461 nbd->tag_set.ops = &nbd_mq_ops;
1462 nbd->tag_set.nr_hw_queues = 1;
1463 nbd->tag_set.queue_depth = 128;
1464 nbd->tag_set.numa_node = NUMA_NO_NODE;
1465 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1466 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1467 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1468 nbd->tag_set.driver_data = nbd;
1469
1470 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1471 if (err)
1472 goto out_free_idr;
1473
1474 q = blk_mq_init_queue(&nbd->tag_set);
1475 if (IS_ERR(q)) {
1476 err = PTR_ERR(q);
1477 goto out_free_tags;
1478 }
1479 disk->queue = q;
1480
1481 /*
1482 * Tell the block layer that we are not a rotational device
1483 */
1484 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1485 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1486 disk->queue->limits.discard_granularity = 512;
1487 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
ebb16d0d 1488 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1cc1f17a 1489 blk_queue_max_segments(disk->queue, USHRT_MAX);
b0d9111a
JB
1490 blk_queue_max_hw_sectors(disk->queue, 65536);
1491 disk->queue->limits.max_sectors = 256;
1492
b0d9111a 1493 mutex_init(&nbd->config_lock);
5ea8d108 1494 refcount_set(&nbd->config_refs, 0);
c6a4759e
JB
1495 refcount_set(&nbd->refs, 1);
1496 INIT_LIST_HEAD(&nbd->list);
b0d9111a
JB
1497 disk->major = NBD_MAJOR;
1498 disk->first_minor = index << part_shift;
1499 disk->fops = &nbd_fops;
1500 disk->private_data = nbd;
1501 sprintf(disk->disk_name, "nbd%d", index);
b0d9111a 1502 add_disk(disk);
47d902b9 1503 nbd_total_devices++;
b0d9111a
JB
1504 return index;
1505
1506out_free_tags:
1507 blk_mq_free_tag_set(&nbd->tag_set);
1508out_free_idr:
1509 idr_remove(&nbd_index_idr, index);
1510out_free_disk:
1511 put_disk(disk);
1512out_free_nbd:
1513 kfree(nbd);
1514out:
1515 return err;
1516}
1517
e46c7287
JB
1518static int find_free_cb(int id, void *ptr, void *data)
1519{
1520 struct nbd_device *nbd = ptr;
1521 struct nbd_device **found = data;
1522
1523 if (!refcount_read(&nbd->config_refs)) {
1524 *found = nbd;
1525 return 1;
1526 }
1527 return 0;
1528}
1529
1530/* Netlink interface. */
1531static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1532 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1533 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1534 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1535 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1536 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1537 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1538 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
560bc4b3 1539 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
47d902b9 1540 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
e46c7287
JB
1541};
1542
1543static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1544 [NBD_SOCK_FD] = { .type = NLA_U32 },
1545};
1546
47d902b9
JB
1547/* We don't use this right now since we don't parse the incoming list, but we
1548 * still want it here so userspace knows what to expect.
1549 */
1550static struct nla_policy __attribute__((unused))
1551nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1552 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1553 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1554};
1555
e46c7287
JB
1556static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1557{
1558 struct nbd_device *nbd = NULL;
1559 struct nbd_config *config;
1560 int index = -1;
1561 int ret;
a2c97909 1562 bool put_dev = false;
e46c7287
JB
1563
1564 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1565 return -EPERM;
1566
1567 if (info->attrs[NBD_ATTR_INDEX])
1568 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1569 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1570 printk(KERN_ERR "nbd: must specify at least one socket\n");
1571 return -EINVAL;
1572 }
1573 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1574 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1575 return -EINVAL;
1576 }
1577again:
1578 mutex_lock(&nbd_index_mutex);
1579 if (index == -1) {
1580 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1581 if (ret == 0) {
1582 int new_index;
1583 new_index = nbd_dev_add(-1);
1584 if (new_index < 0) {
1585 mutex_unlock(&nbd_index_mutex);
1586 printk(KERN_ERR "nbd: failed to add new device\n");
1587 return ret;
1588 }
1589 nbd = idr_find(&nbd_index_idr, new_index);
1590 }
1591 } else {
1592 nbd = idr_find(&nbd_index_idr, index);
e6a76272
JB
1593 if (!nbd) {
1594 ret = nbd_dev_add(index);
1595 if (ret < 0) {
1596 mutex_unlock(&nbd_index_mutex);
1597 printk(KERN_ERR "nbd: failed to add new device\n");
1598 return ret;
1599 }
1600 nbd = idr_find(&nbd_index_idr, index);
1601 }
e46c7287 1602 }
e46c7287
JB
1603 if (!nbd) {
1604 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1605 index);
c6a4759e
JB
1606 mutex_unlock(&nbd_index_mutex);
1607 return -EINVAL;
1608 }
1609 if (!refcount_inc_not_zero(&nbd->refs)) {
1610 mutex_unlock(&nbd_index_mutex);
1611 if (index == -1)
1612 goto again;
1613 printk(KERN_ERR "nbd: device at index %d is going down\n",
1614 index);
e46c7287
JB
1615 return -EINVAL;
1616 }
c6a4759e 1617 mutex_unlock(&nbd_index_mutex);
e46c7287
JB
1618
1619 mutex_lock(&nbd->config_lock);
1620 if (refcount_read(&nbd->config_refs)) {
1621 mutex_unlock(&nbd->config_lock);
c6a4759e 1622 nbd_put(nbd);
e46c7287
JB
1623 if (index == -1)
1624 goto again;
1625 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1626 return -EBUSY;
1627 }
1628 if (WARN_ON(nbd->config)) {
1629 mutex_unlock(&nbd->config_lock);
c6a4759e 1630 nbd_put(nbd);
e46c7287
JB
1631 return -EINVAL;
1632 }
1633 config = nbd->config = nbd_alloc_config();
1634 if (!nbd->config) {
1635 mutex_unlock(&nbd->config_lock);
c6a4759e 1636 nbd_put(nbd);
e46c7287
JB
1637 printk(KERN_ERR "nbd: couldn't allocate config\n");
1638 return -ENOMEM;
1639 }
1640 refcount_set(&nbd->config_refs, 1);
1641 set_bit(NBD_BOUND, &config->runtime_flags);
1642
1643 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1644 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1645 nbd_size_set(nbd, config->blksize,
1646 div64_u64(bytes, config->blksize));
1647 }
1648 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1649 u64 bsize =
1650 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1651 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1652 }
1653 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1654 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1655 nbd->tag_set.timeout = timeout * HZ;
1656 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1657 }
560bc4b3
JB
1658 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1659 config->dead_conn_timeout =
1660 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1661 config->dead_conn_timeout *= HZ;
1662 }
e46c7287
JB
1663 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1664 config->flags =
1665 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
a2c97909
JB
1666 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1667 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1668 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1669 set_bit(NBD_DESTROY_ON_DISCONNECT,
1670 &config->runtime_flags);
1671 put_dev = true;
1672 }
1673 }
1674
e46c7287
JB
1675 if (info->attrs[NBD_ATTR_SOCKETS]) {
1676 struct nlattr *attr;
1677 int rem, fd;
1678
1679 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1680 rem) {
1681 struct nlattr *socks[NBD_SOCK_MAX+1];
1682
1683 if (nla_type(attr) != NBD_SOCK_ITEM) {
1684 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1685 ret = -EINVAL;
1686 goto out;
1687 }
1688 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
8d65b08d 1689 nbd_sock_policy, info->extack);
e46c7287
JB
1690 if (ret != 0) {
1691 printk(KERN_ERR "nbd: error processing sock list\n");
1692 ret = -EINVAL;
1693 goto out;
1694 }
1695 if (!socks[NBD_SOCK_FD])
1696 continue;
1697 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1698 ret = nbd_add_socket(nbd, fd, true);
1699 if (ret)
1700 goto out;
1701 }
1702 }
1703 ret = nbd_start_device(nbd);
1704out:
1705 mutex_unlock(&nbd->config_lock);
1706 if (!ret) {
1707 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1708 refcount_inc(&nbd->config_refs);
1709 nbd_connect_reply(info, nbd->index);
1710 }
1711 nbd_config_put(nbd);
a2c97909
JB
1712 if (put_dev)
1713 nbd_put(nbd);
e46c7287
JB
1714 return ret;
1715}
1716
1717static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1718{
1719 struct nbd_device *nbd;
1720 int index;
1721
1722 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1723 return -EPERM;
1724
1725 if (!info->attrs[NBD_ATTR_INDEX]) {
1726 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1727 return -EINVAL;
1728 }
1729 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1730 mutex_lock(&nbd_index_mutex);
1731 nbd = idr_find(&nbd_index_idr, index);
e46c7287 1732 if (!nbd) {
c6a4759e 1733 mutex_unlock(&nbd_index_mutex);
e46c7287
JB
1734 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1735 index);
1736 return -EINVAL;
1737 }
c6a4759e
JB
1738 if (!refcount_inc_not_zero(&nbd->refs)) {
1739 mutex_unlock(&nbd_index_mutex);
1740 printk(KERN_ERR "nbd: device at index %d is going down\n",
1741 index);
1742 return -EINVAL;
1743 }
1744 mutex_unlock(&nbd_index_mutex);
1745 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1746 nbd_put(nbd);
e46c7287 1747 return 0;
c6a4759e 1748 }
e46c7287
JB
1749 mutex_lock(&nbd->config_lock);
1750 nbd_disconnect(nbd);
1751 mutex_unlock(&nbd->config_lock);
1752 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1753 &nbd->config->runtime_flags))
1754 nbd_config_put(nbd);
1755 nbd_config_put(nbd);
c6a4759e 1756 nbd_put(nbd);
e46c7287
JB
1757 return 0;
1758}
1759
b7aa3d39
JB
1760static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1761{
1762 struct nbd_device *nbd = NULL;
1763 struct nbd_config *config;
1764 int index;
1765 int ret = -EINVAL;
a2c97909 1766 bool put_dev = false;
b7aa3d39
JB
1767
1768 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1769 return -EPERM;
1770
1771 if (!info->attrs[NBD_ATTR_INDEX]) {
1772 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1773 return -EINVAL;
1774 }
1775 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1776 mutex_lock(&nbd_index_mutex);
1777 nbd = idr_find(&nbd_index_idr, index);
b7aa3d39 1778 if (!nbd) {
c6a4759e 1779 mutex_unlock(&nbd_index_mutex);
b7aa3d39
JB
1780 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1781 index);
1782 return -EINVAL;
1783 }
c6a4759e
JB
1784 if (!refcount_inc_not_zero(&nbd->refs)) {
1785 mutex_unlock(&nbd_index_mutex);
1786 printk(KERN_ERR "nbd: device at index %d is going down\n",
1787 index);
1788 return -EINVAL;
1789 }
1790 mutex_unlock(&nbd_index_mutex);
b7aa3d39
JB
1791
1792 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1793 dev_err(nbd_to_dev(nbd),
1794 "not configured, cannot reconfigure\n");
c6a4759e 1795 nbd_put(nbd);
b7aa3d39
JB
1796 return -EINVAL;
1797 }
1798
1799 mutex_lock(&nbd->config_lock);
1800 config = nbd->config;
1801 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1802 !nbd->task_recv) {
1803 dev_err(nbd_to_dev(nbd),
1804 "not configured, cannot reconfigure\n");
1805 goto out;
1806 }
1807
1808 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1809 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1810 nbd->tag_set.timeout = timeout * HZ;
1811 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1812 }
560bc4b3
JB
1813 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1814 config->dead_conn_timeout =
1815 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1816 config->dead_conn_timeout *= HZ;
1817 }
a2c97909
JB
1818 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1819 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1820 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1821 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1822 &config->runtime_flags))
1823 put_dev = true;
1824 } else {
1825 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1826 &config->runtime_flags))
1827 refcount_inc(&nbd->refs);
1828 }
1829 }
b7aa3d39
JB
1830
1831 if (info->attrs[NBD_ATTR_SOCKETS]) {
1832 struct nlattr *attr;
1833 int rem, fd;
1834
1835 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1836 rem) {
1837 struct nlattr *socks[NBD_SOCK_MAX+1];
1838
1839 if (nla_type(attr) != NBD_SOCK_ITEM) {
1840 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1841 ret = -EINVAL;
1842 goto out;
1843 }
1844 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
8d65b08d 1845 nbd_sock_policy, info->extack);
b7aa3d39
JB
1846 if (ret != 0) {
1847 printk(KERN_ERR "nbd: error processing sock list\n");
1848 ret = -EINVAL;
1849 goto out;
1850 }
1851 if (!socks[NBD_SOCK_FD])
1852 continue;
1853 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1854 ret = nbd_reconnect_socket(nbd, fd);
1855 if (ret) {
1856 if (ret == -ENOSPC)
1857 ret = 0;
1858 goto out;
1859 }
1860 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
1861 }
1862 }
1863out:
1864 mutex_unlock(&nbd->config_lock);
1865 nbd_config_put(nbd);
c6a4759e 1866 nbd_put(nbd);
a2c97909
JB
1867 if (put_dev)
1868 nbd_put(nbd);
b7aa3d39
JB
1869 return ret;
1870}
1871
e46c7287
JB
1872static const struct genl_ops nbd_connect_genl_ops[] = {
1873 {
1874 .cmd = NBD_CMD_CONNECT,
1875 .policy = nbd_attr_policy,
1876 .doit = nbd_genl_connect,
1877 },
1878 {
1879 .cmd = NBD_CMD_DISCONNECT,
1880 .policy = nbd_attr_policy,
1881 .doit = nbd_genl_disconnect,
1882 },
b7aa3d39
JB
1883 {
1884 .cmd = NBD_CMD_RECONFIGURE,
1885 .policy = nbd_attr_policy,
1886 .doit = nbd_genl_reconfigure,
1887 },
47d902b9
JB
1888 {
1889 .cmd = NBD_CMD_STATUS,
1890 .policy = nbd_attr_policy,
1891 .doit = nbd_genl_status,
1892 },
e46c7287
JB
1893};
1894
799f9a38
JB
1895static const struct genl_multicast_group nbd_mcast_grps[] = {
1896 { .name = NBD_GENL_MCAST_GROUP_NAME, },
1897};
1898
e46c7287
JB
1899static struct genl_family nbd_genl_family __ro_after_init = {
1900 .hdrsize = 0,
1901 .name = NBD_GENL_FAMILY_NAME,
1902 .version = NBD_GENL_VERSION,
1903 .module = THIS_MODULE,
1904 .ops = nbd_connect_genl_ops,
1905 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
1906 .maxattr = NBD_ATTR_MAX,
799f9a38
JB
1907 .mcgrps = nbd_mcast_grps,
1908 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
e46c7287
JB
1909};
1910
47d902b9
JB
1911static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
1912{
1913 struct nlattr *dev_opt;
1914 u8 connected = 0;
1915 int ret;
1916
1917 /* This is a little racey, but for status it's ok. The
1918 * reason we don't take a ref here is because we can't
1919 * take a ref in the index == -1 case as we would need
1920 * to put under the nbd_index_mutex, which could
1921 * deadlock if we are configured to remove ourselves
1922 * once we're disconnected.
1923 */
1924 if (refcount_read(&nbd->config_refs))
1925 connected = 1;
1926 dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
1927 if (!dev_opt)
1928 return -EMSGSIZE;
1929 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
1930 if (ret)
1931 return -EMSGSIZE;
1932 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
1933 connected);
1934 if (ret)
1935 return -EMSGSIZE;
1936 nla_nest_end(reply, dev_opt);
1937 return 0;
1938}
1939
1940static int status_cb(int id, void *ptr, void *data)
1941{
1942 struct nbd_device *nbd = ptr;
1943 return populate_nbd_status(nbd, (struct sk_buff *)data);
1944}
1945
1946static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
1947{
1948 struct nlattr *dev_list;
1949 struct sk_buff *reply;
1950 void *reply_head;
1951 size_t msg_size;
1952 int index = -1;
1953 int ret = -ENOMEM;
1954
1955 if (info->attrs[NBD_ATTR_INDEX])
1956 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1957
1958 mutex_lock(&nbd_index_mutex);
1959
1960 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
1961 nla_attr_size(sizeof(u8)));
1962 msg_size *= (index == -1) ? nbd_total_devices : 1;
1963
1964 reply = genlmsg_new(msg_size, GFP_KERNEL);
1965 if (!reply)
1966 goto out;
1967 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
1968 NBD_CMD_STATUS);
1969 if (!reply_head) {
1970 nlmsg_free(reply);
1971 goto out;
1972 }
1973
1974 dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
1975 if (index == -1) {
1976 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
1977 if (ret) {
1978 nlmsg_free(reply);
1979 goto out;
1980 }
1981 } else {
1982 struct nbd_device *nbd;
1983 nbd = idr_find(&nbd_index_idr, index);
1984 if (nbd) {
1985 ret = populate_nbd_status(nbd, reply);
1986 if (ret) {
1987 nlmsg_free(reply);
1988 goto out;
1989 }
1990 }
1991 }
1992 nla_nest_end(reply, dev_list);
1993 genlmsg_end(reply, reply_head);
1994 genlmsg_reply(reply, info);
1995 ret = 0;
1996out:
1997 mutex_unlock(&nbd_index_mutex);
1998 return ret;
1999}
2000
e46c7287
JB
2001static void nbd_connect_reply(struct genl_info *info, int index)
2002{
2003 struct sk_buff *skb;
2004 void *msg_head;
2005 int ret;
2006
2007 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2008 if (!skb)
2009 return;
2010 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2011 NBD_CMD_CONNECT);
2012 if (!msg_head) {
2013 nlmsg_free(skb);
2014 return;
2015 }
2016 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2017 if (ret) {
2018 nlmsg_free(skb);
2019 return;
2020 }
2021 genlmsg_end(skb, msg_head);
2022 genlmsg_reply(skb, info);
2023}
1da177e4 2024
799f9a38
JB
2025static void nbd_mcast_index(int index)
2026{
2027 struct sk_buff *skb;
2028 void *msg_head;
2029 int ret;
2030
2031 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2032 if (!skb)
2033 return;
2034 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2035 NBD_CMD_LINK_DEAD);
2036 if (!msg_head) {
2037 nlmsg_free(skb);
2038 return;
2039 }
2040 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2041 if (ret) {
2042 nlmsg_free(skb);
2043 return;
2044 }
2045 genlmsg_end(skb, msg_head);
2046 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2047}
2048
2049static void nbd_dead_link_work(struct work_struct *work)
2050{
2051 struct link_dead_args *args = container_of(work, struct link_dead_args,
2052 work);
2053 nbd_mcast_index(args->index);
2054 kfree(args);
2055}
2056
1da177e4
LT
2057static int __init nbd_init(void)
2058{
1da177e4
LT
2059 int i;
2060
5b7b18cc 2061 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1da177e4 2062
d71a6d73 2063 if (max_part < 0) {
7742ce4a 2064 printk(KERN_ERR "nbd: max_part must be >= 0\n");
d71a6d73
LV
2065 return -EINVAL;
2066 }
2067
2068 part_shift = 0;
5988ce23 2069 if (max_part > 0) {
d71a6d73
LV
2070 part_shift = fls(max_part);
2071
5988ce23
NK
2072 /*
2073 * Adjust max_part according to part_shift as it is exported
2074 * to user space so that user can know the max number of
2075 * partition kernel should be able to manage.
2076 *
2077 * Note that -1 is required because partition 0 is reserved
2078 * for the whole disk.
2079 */
2080 max_part = (1UL << part_shift) - 1;
2081 }
2082
3b271082
NK
2083 if ((1UL << part_shift) > DISK_MAX_PARTS)
2084 return -EINVAL;
2085
2086 if (nbds_max > 1UL << (MINORBITS - part_shift))
2087 return -EINVAL;
124d6db0
JB
2088 recv_workqueue = alloc_workqueue("knbd-recv",
2089 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2090 if (!recv_workqueue)
2091 return -ENOMEM;
3b271082 2092
6330a2d0
JB
2093 if (register_blkdev(NBD_MAJOR, "nbd")) {
2094 destroy_workqueue(recv_workqueue);
b0d9111a 2095 return -EIO;
6330a2d0 2096 }
1da177e4 2097
e46c7287
JB
2098 if (genl_register_family(&nbd_genl_family)) {
2099 unregister_blkdev(NBD_MAJOR, "nbd");
2100 destroy_workqueue(recv_workqueue);
2101 return -EINVAL;
2102 }
30d53d9c
MP
2103 nbd_dbg_init();
2104
b0d9111a
JB
2105 mutex_lock(&nbd_index_mutex);
2106 for (i = 0; i < nbds_max; i++)
2107 nbd_dev_add(i);
2108 mutex_unlock(&nbd_index_mutex);
2109 return 0;
2110}
1da177e4 2111
b0d9111a
JB
2112static int nbd_exit_cb(int id, void *ptr, void *data)
2113{
c6a4759e 2114 struct list_head *list = (struct list_head *)data;
b0d9111a 2115 struct nbd_device *nbd = ptr;
c6a4759e 2116
c6a4759e 2117 list_add_tail(&nbd->list, list);
1da177e4 2118 return 0;
1da177e4
LT
2119}
2120
2121static void __exit nbd_cleanup(void)
2122{
c6a4759e
JB
2123 struct nbd_device *nbd;
2124 LIST_HEAD(del_list);
2125
30d53d9c
MP
2126 nbd_dbg_close();
2127
c6a4759e
JB
2128 mutex_lock(&nbd_index_mutex);
2129 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2130 mutex_unlock(&nbd_index_mutex);
2131
60ae36ad
JB
2132 while (!list_empty(&del_list)) {
2133 nbd = list_first_entry(&del_list, struct nbd_device, list);
2134 list_del_init(&nbd->list);
2135 if (refcount_read(&nbd->refs) != 1)
c6a4759e
JB
2136 printk(KERN_ERR "nbd: possibly leaking a device\n");
2137 nbd_put(nbd);
c6a4759e
JB
2138 }
2139
b0d9111a 2140 idr_destroy(&nbd_index_idr);
e46c7287 2141 genl_unregister_family(&nbd_genl_family);
124d6db0 2142 destroy_workqueue(recv_workqueue);
1da177e4 2143 unregister_blkdev(NBD_MAJOR, "nbd");
1da177e4
LT
2144}
2145
2146module_init(nbd_init);
2147module_exit(nbd_cleanup);
2148
2149MODULE_DESCRIPTION("Network Block Device");
2150MODULE_LICENSE("GPL");
2151
40be0c28 2152module_param(nbds_max, int, 0444);
d71a6d73
LV
2153MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2154module_param(max_part, int, 0444);
7a8362a0 2155MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");