]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Network block device - make block devices work over TCP | |
3 | * | |
4 | * Note that you can not swap over this thing, yet. Seems to work but | |
5 | * deadlocks sometimes - you can not swap over TCP in general. | |
6 | * | |
a2531293 | 7 | * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
8 | * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> |
9 | * | |
dbf492d6 | 10 | * This file is released under GPLv2 or later. |
1da177e4 | 11 | * |
dbf492d6 | 12 | * (part of code stolen from loop.c) |
1da177e4 LT |
13 | */ |
14 | ||
15 | #include <linux/major.h> | |
16 | ||
17 | #include <linux/blkdev.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/bio.h> | |
23 | #include <linux/stat.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/file.h> | |
26 | #include <linux/ioctl.h> | |
2a48fc0a | 27 | #include <linux/mutex.h> |
4b2f0260 HX |
28 | #include <linux/compiler.h> |
29 | #include <linux/err.h> | |
30 | #include <linux/kernel.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
1da177e4 | 32 | #include <net/sock.h> |
91cf45f0 | 33 | #include <linux/net.h> |
48cf6061 | 34 | #include <linux/kthread.h> |
b9c495bb | 35 | #include <linux/types.h> |
30d53d9c | 36 | #include <linux/debugfs.h> |
fd8383fd | 37 | #include <linux/blk-mq.h> |
1da177e4 | 38 | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <asm/types.h> |
41 | ||
42 | #include <linux/nbd.h> | |
43 | ||
b0d9111a JB |
44 | static DEFINE_IDR(nbd_index_idr); |
45 | static DEFINE_MUTEX(nbd_index_mutex); | |
46 | ||
9561a7ad JB |
47 | struct nbd_sock { |
48 | struct socket *sock; | |
49 | struct mutex tx_lock; | |
9dd5d3ab JB |
50 | struct request *pending; |
51 | int sent; | |
f3733247 JB |
52 | bool dead; |
53 | int fallback_index; | |
9561a7ad JB |
54 | }; |
55 | ||
5ea8d108 JB |
56 | struct recv_thread_args { |
57 | struct work_struct work; | |
58 | struct nbd_device *nbd; | |
59 | int index; | |
60 | }; | |
61 | ||
9b4a6ba9 JB |
62 | #define NBD_TIMEDOUT 0 |
63 | #define NBD_DISCONNECT_REQUESTED 1 | |
9561a7ad | 64 | #define NBD_DISCONNECTED 2 |
5ea8d108 | 65 | #define NBD_HAS_PID_FILE 3 |
9b4a6ba9 | 66 | |
5ea8d108 | 67 | struct nbd_config { |
22d109c1 | 68 | u32 flags; |
9b4a6ba9 | 69 | unsigned long runtime_flags; |
13e71d69 | 70 | |
5ea8d108 | 71 | struct nbd_sock **socks; |
9561a7ad | 72 | int num_connections; |
5ea8d108 | 73 | |
9561a7ad JB |
74 | atomic_t recv_threads; |
75 | wait_queue_head_t recv_wq; | |
ef77b515 | 76 | loff_t blksize; |
b9c495bb | 77 | loff_t bytesize; |
30d53d9c MP |
78 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
79 | struct dentry *dbg_dir; | |
80 | #endif | |
13e71d69 MP |
81 | }; |
82 | ||
5ea8d108 JB |
83 | struct nbd_device { |
84 | struct blk_mq_tag_set tag_set; | |
85 | ||
86 | refcount_t config_refs; | |
87 | struct nbd_config *config; | |
88 | struct mutex config_lock; | |
89 | struct gendisk *disk; | |
90 | ||
91 | struct task_struct *task_recv; | |
92 | struct task_struct *task_setup; | |
93 | }; | |
94 | ||
fd8383fd JB |
95 | struct nbd_cmd { |
96 | struct nbd_device *nbd; | |
f3733247 | 97 | int index; |
9561a7ad | 98 | struct completion send_complete; |
fd8383fd JB |
99 | }; |
100 | ||
30d53d9c MP |
101 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
102 | static struct dentry *nbd_dbg_dir; | |
103 | #endif | |
104 | ||
105 | #define nbd_name(nbd) ((nbd)->disk->disk_name) | |
106 | ||
f4507164 | 107 | #define NBD_MAGIC 0x68797548 |
1da177e4 | 108 | |
9c7a4169 | 109 | static unsigned int nbds_max = 16; |
d71a6d73 | 110 | static int max_part; |
124d6db0 | 111 | static struct workqueue_struct *recv_workqueue; |
b0d9111a | 112 | static int part_shift; |
1da177e4 | 113 | |
9442b739 JB |
114 | static int nbd_dev_dbg_init(struct nbd_device *nbd); |
115 | static void nbd_dev_dbg_close(struct nbd_device *nbd); | |
5ea8d108 | 116 | static void nbd_config_put(struct nbd_device *nbd); |
9442b739 | 117 | |
d18509f5 | 118 | static inline struct device *nbd_to_dev(struct nbd_device *nbd) |
1da177e4 | 119 | { |
d18509f5 | 120 | return disk_to_dev(nbd->disk); |
1da177e4 LT |
121 | } |
122 | ||
123 | static const char *nbdcmd_to_ascii(int cmd) | |
124 | { | |
125 | switch (cmd) { | |
126 | case NBD_CMD_READ: return "read"; | |
127 | case NBD_CMD_WRITE: return "write"; | |
128 | case NBD_CMD_DISC: return "disconnect"; | |
75f187ab | 129 | case NBD_CMD_FLUSH: return "flush"; |
a336d298 | 130 | case NBD_CMD_TRIM: return "trim/discard"; |
1da177e4 LT |
131 | } |
132 | return "invalid"; | |
133 | } | |
1da177e4 | 134 | |
5ea8d108 JB |
135 | static ssize_t pid_show(struct device *dev, |
136 | struct device_attribute *attr, char *buf) | |
137 | { | |
138 | struct gendisk *disk = dev_to_disk(dev); | |
139 | struct nbd_device *nbd = (struct nbd_device *)disk->private_data; | |
140 | ||
141 | return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); | |
142 | } | |
143 | ||
144 | static struct device_attribute pid_attr = { | |
145 | .attr = { .name = "pid", .mode = S_IRUGO}, | |
146 | .show = pid_show, | |
147 | }; | |
148 | ||
f3733247 JB |
149 | static void nbd_mark_nsock_dead(struct nbd_sock *nsock) |
150 | { | |
151 | if (!nsock->dead) | |
152 | kernel_sock_shutdown(nsock->sock, SHUT_RDWR); | |
153 | nsock->dead = true; | |
154 | nsock->pending = NULL; | |
155 | nsock->sent = 0; | |
156 | } | |
157 | ||
29eaadc0 | 158 | static void nbd_size_clear(struct nbd_device *nbd) |
37091fdd | 159 | { |
5ea8d108 | 160 | if (nbd->config->bytesize) { |
5ea8d108 JB |
161 | set_capacity(nbd->disk, 0); |
162 | kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); | |
163 | } | |
37091fdd MP |
164 | } |
165 | ||
29eaadc0 | 166 | static void nbd_size_update(struct nbd_device *nbd) |
37091fdd | 167 | { |
5ea8d108 JB |
168 | struct nbd_config *config = nbd->config; |
169 | blk_queue_logical_block_size(nbd->disk->queue, config->blksize); | |
170 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); | |
5ea8d108 | 171 | set_capacity(nbd->disk, config->bytesize >> 9); |
37091fdd MP |
172 | kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); |
173 | } | |
174 | ||
29eaadc0 JB |
175 | static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, |
176 | loff_t nr_blocks) | |
37091fdd | 177 | { |
5ea8d108 JB |
178 | struct nbd_config *config = nbd->config; |
179 | config->blksize = blocksize; | |
180 | config->bytesize = blocksize * nr_blocks; | |
29eaadc0 | 181 | nbd_size_update(nbd); |
37091fdd MP |
182 | } |
183 | ||
fd8383fd | 184 | static void nbd_end_request(struct nbd_cmd *cmd) |
1da177e4 | 185 | { |
fd8383fd JB |
186 | struct nbd_device *nbd = cmd->nbd; |
187 | struct request *req = blk_mq_rq_from_pdu(cmd); | |
097c94a4 | 188 | int error = req->errors ? -EIO : 0; |
1da177e4 | 189 | |
fd8383fd | 190 | dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd, |
d18509f5 | 191 | error ? "failed" : "done"); |
1da177e4 | 192 | |
fd8383fd | 193 | blk_mq_complete_request(req, error); |
1da177e4 LT |
194 | } |
195 | ||
e018e757 MP |
196 | /* |
197 | * Forcibly shutdown the socket causing all listeners to error | |
198 | */ | |
36e47bee | 199 | static void sock_shutdown(struct nbd_device *nbd) |
7fdfd406 | 200 | { |
5ea8d108 | 201 | struct nbd_config *config = nbd->config; |
9561a7ad | 202 | int i; |
23272a67 | 203 | |
5ea8d108 | 204 | if (config->num_connections == 0) |
9561a7ad | 205 | return; |
5ea8d108 | 206 | if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags)) |
260bbce4 | 207 | return; |
23272a67 | 208 | |
5ea8d108 JB |
209 | for (i = 0; i < config->num_connections; i++) { |
210 | struct nbd_sock *nsock = config->socks[i]; | |
9561a7ad JB |
211 | mutex_lock(&nsock->tx_lock); |
212 | kernel_sock_shutdown(nsock->sock, SHUT_RDWR); | |
5ea8d108 | 213 | nbd_mark_nsock_dead(nsock); |
9561a7ad JB |
214 | mutex_unlock(&nsock->tx_lock); |
215 | } | |
216 | dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); | |
7fdfd406 PC |
217 | } |
218 | ||
0eadf37a JB |
219 | static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, |
220 | bool reserved) | |
7fdfd406 | 221 | { |
0eadf37a JB |
222 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); |
223 | struct nbd_device *nbd = cmd->nbd; | |
5ea8d108 JB |
224 | struct nbd_config *config; |
225 | ||
226 | if (!refcount_inc_not_zero(&nbd->config_refs)) { | |
227 | req->errors = -EIO; | |
228 | return BLK_EH_HANDLED; | |
229 | } | |
230 | ||
231 | config = nbd->config; | |
dcc909d9 | 232 | |
5ea8d108 | 233 | if (config->num_connections > 1) { |
f3733247 JB |
234 | dev_err_ratelimited(nbd_to_dev(nbd), |
235 | "Connection timed out, retrying\n"); | |
f3733247 JB |
236 | /* |
237 | * Hooray we have more connections, requeue this IO, the submit | |
238 | * path will put it on a real connection. | |
239 | */ | |
5ea8d108 JB |
240 | if (config->socks && config->num_connections > 1) { |
241 | if (cmd->index < config->num_connections) { | |
f3733247 | 242 | struct nbd_sock *nsock = |
5ea8d108 | 243 | config->socks[cmd->index]; |
f3733247 JB |
244 | mutex_lock(&nsock->tx_lock); |
245 | nbd_mark_nsock_dead(nsock); | |
246 | mutex_unlock(&nsock->tx_lock); | |
247 | } | |
f3733247 | 248 | blk_mq_requeue_request(req, true); |
5ea8d108 | 249 | nbd_config_put(nbd); |
f3733247 JB |
250 | return BLK_EH_NOT_HANDLED; |
251 | } | |
f3733247 JB |
252 | } else { |
253 | dev_err_ratelimited(nbd_to_dev(nbd), | |
254 | "Connection timed out\n"); | |
255 | } | |
5ea8d108 | 256 | set_bit(NBD_TIMEDOUT, &config->runtime_flags); |
c103b4da | 257 | req->errors = -EIO; |
9561a7ad | 258 | sock_shutdown(nbd); |
5ea8d108 JB |
259 | nbd_config_put(nbd); |
260 | ||
0eadf37a | 261 | return BLK_EH_HANDLED; |
7fdfd406 PC |
262 | } |
263 | ||
1da177e4 LT |
264 | /* |
265 | * Send or receive packet. | |
266 | */ | |
c9f2b6ae | 267 | static int sock_xmit(struct nbd_device *nbd, int index, int send, |
9dd5d3ab | 268 | struct iov_iter *iter, int msg_flags, int *sent) |
1da177e4 | 269 | { |
5ea8d108 JB |
270 | struct nbd_config *config = nbd->config; |
271 | struct socket *sock = config->socks[index]->sock; | |
1da177e4 LT |
272 | int result; |
273 | struct msghdr msg; | |
7f338fe4 | 274 | unsigned long pflags = current->flags; |
1da177e4 | 275 | |
ffc41cf8 | 276 | if (unlikely(!sock)) { |
a897b666 | 277 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
7f1b90f9 WC |
278 | "Attempted %s on closed socket in sock_xmit\n", |
279 | (send ? "send" : "recv")); | |
ffc41cf8 MS |
280 | return -EINVAL; |
281 | } | |
282 | ||
c9f2b6ae | 283 | msg.msg_iter = *iter; |
c1696cab | 284 | |
7f338fe4 | 285 | current->flags |= PF_MEMALLOC; |
1da177e4 | 286 | do { |
7f338fe4 | 287 | sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; |
1da177e4 LT |
288 | msg.msg_name = NULL; |
289 | msg.msg_namelen = 0; | |
290 | msg.msg_control = NULL; | |
291 | msg.msg_controllen = 0; | |
1da177e4 LT |
292 | msg.msg_flags = msg_flags | MSG_NOSIGNAL; |
293 | ||
7e2893a1 | 294 | if (send) |
c1696cab | 295 | result = sock_sendmsg(sock, &msg); |
7e2893a1 | 296 | else |
c1696cab | 297 | result = sock_recvmsg(sock, &msg, msg.msg_flags); |
1da177e4 | 298 | |
1da177e4 LT |
299 | if (result <= 0) { |
300 | if (result == 0) | |
301 | result = -EPIPE; /* short read */ | |
302 | break; | |
303 | } | |
9dd5d3ab JB |
304 | if (sent) |
305 | *sent += result; | |
c1696cab | 306 | } while (msg_data_left(&msg)); |
1da177e4 | 307 | |
7f338fe4 | 308 | tsk_restore_flags(current, pflags, PF_MEMALLOC); |
1da177e4 LT |
309 | |
310 | return result; | |
311 | } | |
312 | ||
7fdfd406 | 313 | /* always call with the tx_lock held */ |
9561a7ad | 314 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) |
1da177e4 | 315 | { |
fd8383fd | 316 | struct request *req = blk_mq_rq_from_pdu(cmd); |
5ea8d108 JB |
317 | struct nbd_config *config = nbd->config; |
318 | struct nbd_sock *nsock = config->socks[index]; | |
d61b7f97 | 319 | int result; |
c9f2b6ae AV |
320 | struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; |
321 | struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; | |
322 | struct iov_iter from; | |
1011c1b9 | 323 | unsigned long size = blk_rq_bytes(req); |
429a787b | 324 | struct bio *bio; |
9dc6c806 | 325 | u32 type; |
9561a7ad | 326 | u32 tag = blk_mq_unique_tag(req); |
9dd5d3ab | 327 | int sent = nsock->sent, skip = 0; |
9dc6c806 | 328 | |
c9f2b6ae AV |
329 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
330 | ||
aebf526b CH |
331 | switch (req_op(req)) { |
332 | case REQ_OP_DISCARD: | |
9dc6c806 | 333 | type = NBD_CMD_TRIM; |
aebf526b CH |
334 | break; |
335 | case REQ_OP_FLUSH: | |
9dc6c806 | 336 | type = NBD_CMD_FLUSH; |
aebf526b CH |
337 | break; |
338 | case REQ_OP_WRITE: | |
9dc6c806 | 339 | type = NBD_CMD_WRITE; |
aebf526b CH |
340 | break; |
341 | case REQ_OP_READ: | |
9dc6c806 | 342 | type = NBD_CMD_READ; |
aebf526b CH |
343 | break; |
344 | default: | |
345 | return -EIO; | |
346 | } | |
1da177e4 | 347 | |
09fc54cc | 348 | if (rq_data_dir(req) == WRITE && |
5ea8d108 | 349 | (config->flags & NBD_FLAG_READ_ONLY)) { |
09fc54cc CH |
350 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
351 | "Write on read-only\n"); | |
352 | return -EIO; | |
353 | } | |
354 | ||
9dd5d3ab JB |
355 | /* We did a partial send previously, and we at least sent the whole |
356 | * request struct, so just go and send the rest of the pages in the | |
357 | * request. | |
358 | */ | |
359 | if (sent) { | |
360 | if (sent >= sizeof(request)) { | |
361 | skip = sent - sizeof(request); | |
362 | goto send_pages; | |
363 | } | |
364 | iov_iter_advance(&from, sent); | |
365 | } | |
f3733247 | 366 | cmd->index = index; |
9dc6c806 | 367 | request.type = htonl(type); |
9561a7ad | 368 | if (type != NBD_CMD_FLUSH) { |
75f187ab AB |
369 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); |
370 | request.len = htonl(size); | |
371 | } | |
9561a7ad | 372 | memcpy(request.handle, &tag, sizeof(tag)); |
1da177e4 | 373 | |
d18509f5 | 374 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", |
fd8383fd | 375 | cmd, nbdcmd_to_ascii(type), |
d18509f5 | 376 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); |
c9f2b6ae | 377 | result = sock_xmit(nbd, index, 1, &from, |
9dd5d3ab | 378 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); |
1da177e4 | 379 | if (result <= 0) { |
9dd5d3ab JB |
380 | if (result == -ERESTARTSYS) { |
381 | /* If we havne't sent anything we can just return BUSY, | |
382 | * however if we have sent something we need to make | |
383 | * sure we only allow this req to be sent until we are | |
384 | * completely done. | |
385 | */ | |
386 | if (sent) { | |
387 | nsock->pending = req; | |
388 | nsock->sent = sent; | |
389 | } | |
390 | return BLK_MQ_RQ_QUEUE_BUSY; | |
391 | } | |
a897b666 | 392 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
7f1b90f9 | 393 | "Send control failed (result %d)\n", result); |
f3733247 | 394 | return -EAGAIN; |
1da177e4 | 395 | } |
9dd5d3ab | 396 | send_pages: |
429a787b | 397 | if (type != NBD_CMD_WRITE) |
9dd5d3ab | 398 | goto out; |
429a787b | 399 | |
429a787b JA |
400 | bio = req->bio; |
401 | while (bio) { | |
402 | struct bio *next = bio->bi_next; | |
403 | struct bvec_iter iter; | |
7988613b | 404 | struct bio_vec bvec; |
429a787b JA |
405 | |
406 | bio_for_each_segment(bvec, bio, iter) { | |
407 | bool is_last = !next && bio_iter_last(bvec, iter); | |
d61b7f97 | 408 | int flags = is_last ? 0 : MSG_MORE; |
429a787b | 409 | |
d18509f5 | 410 | dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", |
fd8383fd | 411 | cmd, bvec.bv_len); |
c9f2b6ae AV |
412 | iov_iter_bvec(&from, ITER_BVEC | WRITE, |
413 | &bvec, 1, bvec.bv_len); | |
9dd5d3ab JB |
414 | if (skip) { |
415 | if (skip >= iov_iter_count(&from)) { | |
416 | skip -= iov_iter_count(&from); | |
417 | continue; | |
418 | } | |
419 | iov_iter_advance(&from, skip); | |
420 | skip = 0; | |
421 | } | |
422 | result = sock_xmit(nbd, index, 1, &from, flags, &sent); | |
6c92e699 | 423 | if (result <= 0) { |
9dd5d3ab JB |
424 | if (result == -ERESTARTSYS) { |
425 | /* We've already sent the header, we | |
426 | * have no choice but to set pending and | |
427 | * return BUSY. | |
428 | */ | |
429 | nsock->pending = req; | |
430 | nsock->sent = sent; | |
431 | return BLK_MQ_RQ_QUEUE_BUSY; | |
432 | } | |
f4507164 | 433 | dev_err(disk_to_dev(nbd->disk), |
7f1b90f9 WC |
434 | "Send data failed (result %d)\n", |
435 | result); | |
f3733247 | 436 | return -EAGAIN; |
6c92e699 | 437 | } |
429a787b JA |
438 | /* |
439 | * The completion might already have come in, | |
440 | * so break for the last one instead of letting | |
441 | * the iterator do it. This prevents use-after-free | |
442 | * of the bio. | |
443 | */ | |
444 | if (is_last) | |
445 | break; | |
1da177e4 | 446 | } |
429a787b | 447 | bio = next; |
1da177e4 | 448 | } |
9dd5d3ab JB |
449 | out: |
450 | nsock->pending = NULL; | |
451 | nsock->sent = 0; | |
1da177e4 | 452 | return 0; |
1da177e4 LT |
453 | } |
454 | ||
5ea8d108 | 455 | static int nbd_disconnected(struct nbd_config *config) |
f3733247 | 456 | { |
5ea8d108 JB |
457 | return test_bit(NBD_DISCONNECTED, &config->runtime_flags) || |
458 | test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); | |
f3733247 JB |
459 | } |
460 | ||
1da177e4 | 461 | /* NULL returned = something went wrong, inform userspace */ |
9561a7ad | 462 | static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) |
1da177e4 | 463 | { |
5ea8d108 | 464 | struct nbd_config *config = nbd->config; |
1da177e4 LT |
465 | int result; |
466 | struct nbd_reply reply; | |
fd8383fd JB |
467 | struct nbd_cmd *cmd; |
468 | struct request *req = NULL; | |
469 | u16 hwq; | |
9561a7ad | 470 | u32 tag; |
c9f2b6ae AV |
471 | struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; |
472 | struct iov_iter to; | |
1da177e4 LT |
473 | |
474 | reply.magic = 0; | |
c9f2b6ae | 475 | iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); |
9dd5d3ab | 476 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); |
1da177e4 | 477 | if (result <= 0) { |
5ea8d108 | 478 | if (!nbd_disconnected(config)) |
9561a7ad JB |
479 | dev_err(disk_to_dev(nbd->disk), |
480 | "Receive control failed (result %d)\n", result); | |
19391830 | 481 | return ERR_PTR(result); |
1da177e4 | 482 | } |
e4b57e08 MF |
483 | |
484 | if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { | |
f4507164 | 485 | dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", |
e4b57e08 | 486 | (unsigned long)ntohl(reply.magic)); |
19391830 | 487 | return ERR_PTR(-EPROTO); |
e4b57e08 MF |
488 | } |
489 | ||
9561a7ad | 490 | memcpy(&tag, reply.handle, sizeof(u32)); |
4b2f0260 | 491 | |
fd8383fd JB |
492 | hwq = blk_mq_unique_tag_to_hwq(tag); |
493 | if (hwq < nbd->tag_set.nr_hw_queues) | |
494 | req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], | |
495 | blk_mq_unique_tag_to_tag(tag)); | |
496 | if (!req || !blk_mq_request_started(req)) { | |
497 | dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", | |
498 | tag, req); | |
499 | return ERR_PTR(-ENOENT); | |
1da177e4 | 500 | } |
fd8383fd | 501 | cmd = blk_mq_rq_to_pdu(req); |
1da177e4 | 502 | if (ntohl(reply.error)) { |
f4507164 | 503 | dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", |
7f1b90f9 | 504 | ntohl(reply.error)); |
c103b4da | 505 | req->errors = -EIO; |
fd8383fd | 506 | return cmd; |
1da177e4 LT |
507 | } |
508 | ||
fd8383fd | 509 | dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); |
9dc6c806 | 510 | if (rq_data_dir(req) != WRITE) { |
5705f702 | 511 | struct req_iterator iter; |
7988613b | 512 | struct bio_vec bvec; |
5705f702 N |
513 | |
514 | rq_for_each_segment(bvec, req, iter) { | |
c9f2b6ae AV |
515 | iov_iter_bvec(&to, ITER_BVEC | READ, |
516 | &bvec, 1, bvec.bv_len); | |
9dd5d3ab | 517 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); |
6c92e699 | 518 | if (result <= 0) { |
f4507164 | 519 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", |
7f1b90f9 | 520 | result); |
f3733247 JB |
521 | /* |
522 | * If we've disconnected or we only have 1 | |
523 | * connection then we need to make sure we | |
524 | * complete this request, otherwise error out | |
525 | * and let the timeout stuff handle resubmitting | |
526 | * this request onto another connection. | |
527 | */ | |
5ea8d108 JB |
528 | if (nbd_disconnected(config) || |
529 | config->num_connections <= 1) { | |
f3733247 JB |
530 | req->errors = -EIO; |
531 | return cmd; | |
532 | } | |
533 | return ERR_PTR(-EIO); | |
6c92e699 | 534 | } |
d18509f5 | 535 | dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", |
fd8383fd | 536 | cmd, bvec.bv_len); |
1da177e4 | 537 | } |
9561a7ad JB |
538 | } else { |
539 | /* See the comment in nbd_queue_rq. */ | |
540 | wait_for_completion(&cmd->send_complete); | |
1da177e4 | 541 | } |
fd8383fd | 542 | return cmd; |
1da177e4 LT |
543 | } |
544 | ||
9561a7ad | 545 | static void recv_work(struct work_struct *work) |
1da177e4 | 546 | { |
9561a7ad JB |
547 | struct recv_thread_args *args = container_of(work, |
548 | struct recv_thread_args, | |
549 | work); | |
550 | struct nbd_device *nbd = args->nbd; | |
5ea8d108 | 551 | struct nbd_config *config = nbd->config; |
fd8383fd | 552 | struct nbd_cmd *cmd; |
9561a7ad | 553 | int ret = 0; |
1da177e4 | 554 | |
19391830 | 555 | while (1) { |
9561a7ad | 556 | cmd = nbd_read_stat(nbd, args->index); |
fd8383fd | 557 | if (IS_ERR(cmd)) { |
5ea8d108 | 558 | struct nbd_sock *nsock = config->socks[args->index]; |
f3733247 JB |
559 | |
560 | mutex_lock(&nsock->tx_lock); | |
561 | nbd_mark_nsock_dead(nsock); | |
562 | mutex_unlock(&nsock->tx_lock); | |
fd8383fd | 563 | ret = PTR_ERR(cmd); |
19391830 MP |
564 | break; |
565 | } | |
566 | ||
fd8383fd | 567 | nbd_end_request(cmd); |
19391830 | 568 | } |
5ea8d108 JB |
569 | atomic_dec(&config->recv_threads); |
570 | wake_up(&config->recv_wq); | |
571 | nbd_config_put(nbd); | |
572 | kfree(args); | |
1da177e4 LT |
573 | } |
574 | ||
fd8383fd | 575 | static void nbd_clear_req(struct request *req, void *data, bool reserved) |
1da177e4 | 576 | { |
fd8383fd | 577 | struct nbd_cmd *cmd; |
1da177e4 | 578 | |
fd8383fd JB |
579 | if (!blk_mq_request_started(req)) |
580 | return; | |
581 | cmd = blk_mq_rq_to_pdu(req); | |
c103b4da | 582 | req->errors = -EIO; |
fd8383fd JB |
583 | nbd_end_request(cmd); |
584 | } | |
585 | ||
586 | static void nbd_clear_que(struct nbd_device *nbd) | |
587 | { | |
fd8383fd | 588 | blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); |
e78273c8 | 589 | dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); |
1da177e4 LT |
590 | } |
591 | ||
f3733247 JB |
592 | static int find_fallback(struct nbd_device *nbd, int index) |
593 | { | |
5ea8d108 | 594 | struct nbd_config *config = nbd->config; |
f3733247 | 595 | int new_index = -1; |
5ea8d108 | 596 | struct nbd_sock *nsock = config->socks[index]; |
f3733247 JB |
597 | int fallback = nsock->fallback_index; |
598 | ||
5ea8d108 | 599 | if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) |
f3733247 JB |
600 | return new_index; |
601 | ||
5ea8d108 | 602 | if (config->num_connections <= 1) { |
f3733247 JB |
603 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
604 | "Attempted send on invalid socket\n"); | |
605 | return new_index; | |
606 | } | |
607 | ||
5ea8d108 JB |
608 | if (fallback >= 0 && fallback < config->num_connections && |
609 | !config->socks[fallback]->dead) | |
f3733247 JB |
610 | return fallback; |
611 | ||
612 | if (nsock->fallback_index < 0 || | |
5ea8d108 JB |
613 | nsock->fallback_index >= config->num_connections || |
614 | config->socks[nsock->fallback_index]->dead) { | |
f3733247 | 615 | int i; |
5ea8d108 | 616 | for (i = 0; i < config->num_connections; i++) { |
f3733247 JB |
617 | if (i == index) |
618 | continue; | |
5ea8d108 | 619 | if (!config->socks[i]->dead) { |
f3733247 JB |
620 | new_index = i; |
621 | break; | |
622 | } | |
623 | } | |
624 | nsock->fallback_index = new_index; | |
625 | if (new_index < 0) { | |
626 | dev_err_ratelimited(disk_to_dev(nbd->disk), | |
627 | "Dead connection, failed to find a fallback\n"); | |
628 | return new_index; | |
629 | } | |
630 | } | |
631 | new_index = nsock->fallback_index; | |
632 | return new_index; | |
633 | } | |
7fdfd406 | 634 | |
9dd5d3ab | 635 | static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
48cf6061 | 636 | { |
fd8383fd JB |
637 | struct request *req = blk_mq_rq_from_pdu(cmd); |
638 | struct nbd_device *nbd = cmd->nbd; | |
5ea8d108 | 639 | struct nbd_config *config; |
9561a7ad | 640 | struct nbd_sock *nsock; |
9dd5d3ab | 641 | int ret; |
fd8383fd | 642 | |
5ea8d108 JB |
643 | if (!refcount_inc_not_zero(&nbd->config_refs)) { |
644 | dev_err_ratelimited(disk_to_dev(nbd->disk), | |
645 | "Socks array is empty\n"); | |
646 | return -EINVAL; | |
647 | } | |
648 | config = nbd->config; | |
649 | ||
650 | if (index >= config->num_connections) { | |
a897b666 JB |
651 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
652 | "Attempted send on invalid socket\n"); | |
5ea8d108 | 653 | nbd_config_put(nbd); |
9dd5d3ab | 654 | return -EINVAL; |
9561a7ad | 655 | } |
48cf6061 | 656 | req->errors = 0; |
f3733247 | 657 | again: |
5ea8d108 | 658 | nsock = config->socks[index]; |
9561a7ad | 659 | mutex_lock(&nsock->tx_lock); |
f3733247 JB |
660 | if (nsock->dead) { |
661 | index = find_fallback(nbd, index); | |
5ea8d108 JB |
662 | if (index < 0) { |
663 | ret = -EIO; | |
664 | goto out; | |
665 | } | |
9561a7ad | 666 | mutex_unlock(&nsock->tx_lock); |
f3733247 | 667 | goto again; |
48cf6061 LV |
668 | } |
669 | ||
9dd5d3ab JB |
670 | /* Handle the case that we have a pending request that was partially |
671 | * transmitted that _has_ to be serviced first. We need to call requeue | |
672 | * here so that it gets put _after_ the request that is already on the | |
673 | * dispatch list. | |
674 | */ | |
675 | if (unlikely(nsock->pending && nsock->pending != req)) { | |
676 | blk_mq_requeue_request(req, true); | |
677 | ret = 0; | |
678 | goto out; | |
48cf6061 | 679 | } |
f3733247 JB |
680 | /* |
681 | * Some failures are related to the link going down, so anything that | |
682 | * returns EAGAIN can be retried on a different socket. | |
683 | */ | |
9dd5d3ab | 684 | ret = nbd_send_cmd(nbd, cmd, index); |
f3733247 JB |
685 | if (ret == -EAGAIN) { |
686 | dev_err_ratelimited(disk_to_dev(nbd->disk), | |
687 | "Request send failed trying another connection\n"); | |
688 | nbd_mark_nsock_dead(nsock); | |
689 | mutex_unlock(&nsock->tx_lock); | |
690 | goto again; | |
691 | } | |
9dd5d3ab | 692 | out: |
9561a7ad | 693 | mutex_unlock(&nsock->tx_lock); |
5ea8d108 | 694 | nbd_config_put(nbd); |
9dd5d3ab | 695 | return ret; |
48cf6061 LV |
696 | } |
697 | ||
fd8383fd JB |
698 | static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, |
699 | const struct blk_mq_queue_data *bd) | |
1da177e4 | 700 | { |
fd8383fd | 701 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
9dd5d3ab | 702 | int ret; |
1da177e4 | 703 | |
9561a7ad JB |
704 | /* |
705 | * Since we look at the bio's to send the request over the network we | |
706 | * need to make sure the completion work doesn't mark this request done | |
707 | * before we are done doing our send. This keeps us from dereferencing | |
708 | * freed data if we have particularly fast completions (ie we get the | |
709 | * completion before we exit sock_xmit on the last bvec) or in the case | |
710 | * that the server is misbehaving (or there was an error) before we're | |
711 | * done sending everything over the wire. | |
712 | */ | |
713 | init_completion(&cmd->send_complete); | |
fd8383fd | 714 | blk_mq_start_request(bd->rq); |
9dd5d3ab JB |
715 | |
716 | /* We can be called directly from the user space process, which means we | |
717 | * could possibly have signals pending so our sendmsg will fail. In | |
718 | * this case we need to return that we are busy, otherwise error out as | |
719 | * appropriate. | |
720 | */ | |
721 | ret = nbd_handle_cmd(cmd, hctx->queue_num); | |
722 | if (ret < 0) | |
723 | ret = BLK_MQ_RQ_QUEUE_ERROR; | |
724 | if (!ret) | |
725 | ret = BLK_MQ_RQ_QUEUE_OK; | |
9561a7ad JB |
726 | complete(&cmd->send_complete); |
727 | ||
9dd5d3ab | 728 | return ret; |
1da177e4 LT |
729 | } |
730 | ||
29eaadc0 | 731 | static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg) |
23272a67 | 732 | { |
5ea8d108 | 733 | struct nbd_config *config = nbd->config; |
9442b739 | 734 | struct socket *sock; |
9561a7ad JB |
735 | struct nbd_sock **socks; |
736 | struct nbd_sock *nsock; | |
9442b739 JB |
737 | int err; |
738 | ||
739 | sock = sockfd_lookup(arg, &err); | |
740 | if (!sock) | |
741 | return err; | |
23272a67 | 742 | |
9561a7ad JB |
743 | if (!nbd->task_setup) |
744 | nbd->task_setup = current; | |
745 | if (nbd->task_setup != current) { | |
746 | dev_err(disk_to_dev(nbd->disk), | |
747 | "Device being setup by another task"); | |
9b1355d5 | 748 | sockfd_put(sock); |
9561a7ad | 749 | return -EINVAL; |
23272a67 MP |
750 | } |
751 | ||
5ea8d108 | 752 | socks = krealloc(config->socks, (config->num_connections + 1) * |
9561a7ad | 753 | sizeof(struct nbd_sock *), GFP_KERNEL); |
9b1355d5 JB |
754 | if (!socks) { |
755 | sockfd_put(sock); | |
9561a7ad | 756 | return -ENOMEM; |
9b1355d5 | 757 | } |
9561a7ad | 758 | nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); |
9b1355d5 JB |
759 | if (!nsock) { |
760 | sockfd_put(sock); | |
9561a7ad | 761 | return -ENOMEM; |
9b1355d5 | 762 | } |
9561a7ad | 763 | |
5ea8d108 | 764 | config->socks = socks; |
23272a67 | 765 | |
f3733247 JB |
766 | nsock->fallback_index = -1; |
767 | nsock->dead = false; | |
9561a7ad JB |
768 | mutex_init(&nsock->tx_lock); |
769 | nsock->sock = sock; | |
9dd5d3ab JB |
770 | nsock->pending = NULL; |
771 | nsock->sent = 0; | |
5ea8d108 | 772 | socks[config->num_connections++] = nsock; |
23272a67 | 773 | |
9561a7ad | 774 | return 0; |
23272a67 MP |
775 | } |
776 | ||
0e4f0f6f MP |
777 | /* Reset all properties of an NBD device */ |
778 | static void nbd_reset(struct nbd_device *nbd) | |
779 | { | |
5ea8d108 | 780 | nbd->config = NULL; |
0eadf37a | 781 | nbd->tag_set.timeout = 0; |
0e4f0f6f | 782 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); |
0e4f0f6f MP |
783 | } |
784 | ||
785 | static void nbd_bdev_reset(struct block_device *bdev) | |
786 | { | |
abbbdf12 RMB |
787 | if (bdev->bd_openers > 1) |
788 | return; | |
29eaadc0 | 789 | bd_set_size(bdev, 0); |
0e4f0f6f MP |
790 | if (max_part > 0) { |
791 | blkdev_reread_part(bdev); | |
792 | bdev->bd_invalidated = 1; | |
793 | } | |
794 | } | |
795 | ||
29eaadc0 | 796 | static void nbd_parse_flags(struct nbd_device *nbd) |
d02cf531 | 797 | { |
5ea8d108 JB |
798 | struct nbd_config *config = nbd->config; |
799 | if (config->flags & NBD_FLAG_READ_ONLY) | |
29eaadc0 JB |
800 | set_disk_ro(nbd->disk, true); |
801 | else | |
802 | set_disk_ro(nbd->disk, false); | |
5ea8d108 | 803 | if (config->flags & NBD_FLAG_SEND_TRIM) |
d02cf531 | 804 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); |
5ea8d108 | 805 | if (config->flags & NBD_FLAG_SEND_FLUSH) |
aafb1eec | 806 | blk_queue_write_cache(nbd->disk->queue, true, false); |
d02cf531 | 807 | else |
aafb1eec | 808 | blk_queue_write_cache(nbd->disk->queue, false, false); |
d02cf531 MP |
809 | } |
810 | ||
9561a7ad JB |
811 | static void send_disconnects(struct nbd_device *nbd) |
812 | { | |
5ea8d108 | 813 | struct nbd_config *config = nbd->config; |
c9f2b6ae AV |
814 | struct nbd_request request = { |
815 | .magic = htonl(NBD_REQUEST_MAGIC), | |
816 | .type = htonl(NBD_CMD_DISC), | |
817 | }; | |
818 | struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; | |
819 | struct iov_iter from; | |
9561a7ad JB |
820 | int i, ret; |
821 | ||
5ea8d108 | 822 | for (i = 0; i < config->num_connections; i++) { |
c9f2b6ae | 823 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
9dd5d3ab | 824 | ret = sock_xmit(nbd, i, 1, &from, 0, NULL); |
9561a7ad JB |
825 | if (ret <= 0) |
826 | dev_err(disk_to_dev(nbd->disk), | |
827 | "Send disconnect failed %d\n", ret); | |
828 | } | |
829 | } | |
830 | ||
29eaadc0 | 831 | static int nbd_disconnect(struct nbd_device *nbd) |
9442b739 | 832 | { |
5ea8d108 | 833 | struct nbd_config *config = nbd->config; |
30d53d9c | 834 | |
5ea8d108 | 835 | dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); |
9442b739 | 836 | if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, |
5ea8d108 | 837 | &config->runtime_flags)) |
9442b739 JB |
838 | send_disconnects(nbd); |
839 | return 0; | |
840 | } | |
841 | ||
29eaadc0 | 842 | static void nbd_clear_sock(struct nbd_device *nbd) |
1a2ad211 | 843 | { |
9442b739 JB |
844 | sock_shutdown(nbd); |
845 | nbd_clear_que(nbd); | |
5ea8d108 | 846 | nbd->task_setup = NULL; |
5ea8d108 JB |
847 | } |
848 | ||
849 | static void nbd_config_put(struct nbd_device *nbd) | |
850 | { | |
851 | if (refcount_dec_and_mutex_lock(&nbd->config_refs, | |
852 | &nbd->config_lock)) { | |
5ea8d108 | 853 | struct nbd_config *config = nbd->config; |
5ea8d108 | 854 | nbd_dev_dbg_close(nbd); |
29eaadc0 | 855 | nbd_size_clear(nbd); |
5ea8d108 JB |
856 | if (test_and_clear_bit(NBD_HAS_PID_FILE, |
857 | &config->runtime_flags)) | |
858 | device_remove_file(disk_to_dev(nbd->disk), &pid_attr); | |
859 | nbd->task_recv = NULL; | |
29eaadc0 | 860 | nbd_clear_sock(nbd); |
5ea8d108 JB |
861 | if (config->num_connections) { |
862 | int i; | |
863 | for (i = 0; i < config->num_connections; i++) { | |
864 | sockfd_put(config->socks[i]->sock); | |
865 | kfree(config->socks[i]); | |
866 | } | |
867 | kfree(config->socks); | |
868 | } | |
869 | nbd_reset(nbd); | |
870 | mutex_unlock(&nbd->config_lock); | |
5ea8d108 JB |
871 | module_put(THIS_MODULE); |
872 | } | |
9442b739 JB |
873 | } |
874 | ||
875 | static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev) | |
876 | { | |
5ea8d108 JB |
877 | struct nbd_config *config = nbd->config; |
878 | int num_connections = config->num_connections; | |
9442b739 | 879 | int error = 0, i; |
1a2ad211 | 880 | |
9442b739 JB |
881 | if (nbd->task_recv) |
882 | return -EBUSY; | |
5ea8d108 | 883 | if (!config->socks) |
9442b739 JB |
884 | return -EINVAL; |
885 | if (num_connections > 1 && | |
5ea8d108 | 886 | !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) { |
9442b739 | 887 | dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); |
5ea8d108 | 888 | return -EINVAL; |
9442b739 | 889 | } |
23272a67 | 890 | |
29eaadc0 JB |
891 | if (max_part) |
892 | bdev->bd_invalidated = 1; | |
5ea8d108 | 893 | blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); |
9442b739 JB |
894 | nbd->task_recv = current; |
895 | mutex_unlock(&nbd->config_lock); | |
23272a67 | 896 | |
29eaadc0 | 897 | nbd_parse_flags(nbd); |
23272a67 | 898 | |
9442b739 JB |
899 | error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); |
900 | if (error) { | |
901 | dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); | |
5ea8d108 | 902 | return error; |
1a2ad211 PM |
903 | } |
904 | ||
29eaadc0 JB |
905 | set_bit(NBD_HAS_PID_FILE, &config->runtime_flags); |
906 | if (max_part) | |
907 | bdev->bd_invalidated = 1; | |
908 | bd_set_size(bdev, config->bytesize); | |
37091fdd | 909 | |
9442b739 JB |
910 | nbd_dev_dbg_init(nbd); |
911 | for (i = 0; i < num_connections; i++) { | |
5ea8d108 JB |
912 | struct recv_thread_args *args; |
913 | ||
914 | args = kzalloc(sizeof(*args), GFP_KERNEL); | |
915 | if (!args) { | |
916 | sock_shutdown(nbd); | |
917 | return -ENOMEM; | |
918 | } | |
919 | sk_set_memalloc(config->socks[i]->sock->sk); | |
920 | atomic_inc(&config->recv_threads); | |
921 | refcount_inc(&nbd->config_refs); | |
922 | INIT_WORK(&args->work, recv_work); | |
923 | args->nbd = nbd; | |
924 | args->index = i; | |
925 | queue_work(recv_workqueue, &args->work); | |
37091fdd | 926 | } |
5ea8d108 JB |
927 | error = wait_event_interruptible(config->recv_wq, |
928 | atomic_read(&config->recv_threads) == 0); | |
929 | if (error) | |
930 | sock_shutdown(nbd); | |
9442b739 | 931 | mutex_lock(&nbd->config_lock); |
9442b739 JB |
932 | |
933 | /* user requested, ignore socket errors */ | |
5ea8d108 | 934 | if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags)) |
9442b739 | 935 | error = 0; |
5ea8d108 | 936 | if (test_bit(NBD_TIMEDOUT, &config->runtime_flags)) |
9442b739 | 937 | error = -ETIMEDOUT; |
9442b739 JB |
938 | return error; |
939 | } | |
940 | ||
29eaadc0 JB |
941 | static void nbd_clear_sock_ioctl(struct nbd_device *nbd, |
942 | struct block_device *bdev) | |
943 | { | |
944 | nbd_clear_sock(nbd); | |
945 | kill_bdev(bdev); | |
946 | nbd_bdev_reset(bdev); | |
947 | } | |
948 | ||
9442b739 JB |
949 | /* Must be called with config_lock held */ |
950 | static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |
951 | unsigned int cmd, unsigned long arg) | |
952 | { | |
5ea8d108 JB |
953 | struct nbd_config *config = nbd->config; |
954 | ||
9442b739 JB |
955 | switch (cmd) { |
956 | case NBD_DISCONNECT: | |
29eaadc0 | 957 | return nbd_disconnect(nbd); |
9442b739 | 958 | case NBD_CLEAR_SOCK: |
29eaadc0 JB |
959 | nbd_clear_sock_ioctl(nbd, bdev); |
960 | return 0; | |
9442b739 | 961 | case NBD_SET_SOCK: |
29eaadc0 | 962 | return nbd_add_socket(nbd, arg); |
9442b739 | 963 | case NBD_SET_BLKSIZE: |
29eaadc0 | 964 | nbd_size_set(nbd, arg, |
5ea8d108 | 965 | div_s64(config->bytesize, arg)); |
e544541b | 966 | return 0; |
1da177e4 | 967 | case NBD_SET_SIZE: |
29eaadc0 | 968 | nbd_size_set(nbd, config->blksize, |
5ea8d108 | 969 | div_s64(arg, config->blksize)); |
e544541b | 970 | return 0; |
37091fdd | 971 | case NBD_SET_SIZE_BLOCKS: |
29eaadc0 | 972 | nbd_size_set(nbd, config->blksize, arg); |
e544541b | 973 | return 0; |
7fdfd406 | 974 | case NBD_SET_TIMEOUT: |
f8586855 JB |
975 | if (arg) { |
976 | nbd->tag_set.timeout = arg * HZ; | |
977 | blk_queue_rq_timeout(nbd->disk->queue, arg * HZ); | |
978 | } | |
7fdfd406 | 979 | return 0; |
1a2ad211 | 980 | |
2f012508 | 981 | case NBD_SET_FLAGS: |
5ea8d108 | 982 | config->flags = arg; |
2f012508 | 983 | return 0; |
9442b739 JB |
984 | case NBD_DO_IT: |
985 | return nbd_start_device(nbd, bdev); | |
1da177e4 | 986 | case NBD_CLEAR_QUE: |
4b2f0260 HX |
987 | /* |
988 | * This is for compatibility only. The queue is always cleared | |
989 | * by NBD_DO_IT or NBD_CLEAR_SOCK. | |
990 | */ | |
1da177e4 LT |
991 | return 0; |
992 | case NBD_PRINT_DEBUG: | |
fd8383fd JB |
993 | /* |
994 | * For compatibility only, we no longer keep a list of | |
995 | * outstanding requests. | |
996 | */ | |
1da177e4 LT |
997 | return 0; |
998 | } | |
1a2ad211 PM |
999 | return -ENOTTY; |
1000 | } | |
1001 | ||
1002 | static int nbd_ioctl(struct block_device *bdev, fmode_t mode, | |
1003 | unsigned int cmd, unsigned long arg) | |
1004 | { | |
f4507164 | 1005 | struct nbd_device *nbd = bdev->bd_disk->private_data; |
1a2ad211 PM |
1006 | int error; |
1007 | ||
1008 | if (!capable(CAP_SYS_ADMIN)) | |
1009 | return -EPERM; | |
1010 | ||
9561a7ad | 1011 | mutex_lock(&nbd->config_lock); |
f4507164 | 1012 | error = __nbd_ioctl(bdev, nbd, cmd, arg); |
9561a7ad | 1013 | mutex_unlock(&nbd->config_lock); |
1a2ad211 | 1014 | return error; |
1da177e4 LT |
1015 | } |
1016 | ||
5ea8d108 JB |
1017 | static struct nbd_config *nbd_alloc_config(void) |
1018 | { | |
1019 | struct nbd_config *config; | |
1020 | ||
1021 | config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); | |
1022 | if (!config) | |
1023 | return NULL; | |
1024 | atomic_set(&config->recv_threads, 0); | |
1025 | init_waitqueue_head(&config->recv_wq); | |
1026 | config->blksize = 1024; | |
1027 | try_module_get(THIS_MODULE); | |
1028 | return config; | |
1029 | } | |
1030 | ||
1031 | static int nbd_open(struct block_device *bdev, fmode_t mode) | |
1032 | { | |
1033 | struct nbd_device *nbd; | |
1034 | int ret = 0; | |
1035 | ||
1036 | mutex_lock(&nbd_index_mutex); | |
1037 | nbd = bdev->bd_disk->private_data; | |
1038 | if (!nbd) { | |
1039 | ret = -ENXIO; | |
1040 | goto out; | |
1041 | } | |
1042 | if (!refcount_inc_not_zero(&nbd->config_refs)) { | |
1043 | struct nbd_config *config; | |
1044 | ||
1045 | mutex_lock(&nbd->config_lock); | |
1046 | if (refcount_inc_not_zero(&nbd->config_refs)) { | |
1047 | mutex_unlock(&nbd->config_lock); | |
1048 | goto out; | |
1049 | } | |
1050 | config = nbd->config = nbd_alloc_config(); | |
1051 | if (!config) { | |
1052 | ret = -ENOMEM; | |
1053 | mutex_unlock(&nbd->config_lock); | |
1054 | goto out; | |
1055 | } | |
1056 | refcount_set(&nbd->config_refs, 1); | |
1057 | mutex_unlock(&nbd->config_lock); | |
1058 | } | |
1059 | out: | |
1060 | mutex_unlock(&nbd_index_mutex); | |
1061 | return ret; | |
1062 | } | |
1063 | ||
1064 | static void nbd_release(struct gendisk *disk, fmode_t mode) | |
1065 | { | |
1066 | struct nbd_device *nbd = disk->private_data; | |
1067 | nbd_config_put(nbd); | |
1068 | } | |
1069 | ||
83d5cde4 | 1070 | static const struct block_device_operations nbd_fops = |
1da177e4 LT |
1071 | { |
1072 | .owner = THIS_MODULE, | |
5ea8d108 JB |
1073 | .open = nbd_open, |
1074 | .release = nbd_release, | |
8a6cfeb6 | 1075 | .ioctl = nbd_ioctl, |
263a3df1 | 1076 | .compat_ioctl = nbd_ioctl, |
1da177e4 LT |
1077 | }; |
1078 | ||
30d53d9c MP |
1079 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
1080 | ||
1081 | static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) | |
1082 | { | |
1083 | struct nbd_device *nbd = s->private; | |
1084 | ||
1085 | if (nbd->task_recv) | |
1086 | seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); | |
30d53d9c MP |
1087 | |
1088 | return 0; | |
1089 | } | |
1090 | ||
1091 | static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) | |
1092 | { | |
1093 | return single_open(file, nbd_dbg_tasks_show, inode->i_private); | |
1094 | } | |
1095 | ||
1096 | static const struct file_operations nbd_dbg_tasks_ops = { | |
1097 | .open = nbd_dbg_tasks_open, | |
1098 | .read = seq_read, | |
1099 | .llseek = seq_lseek, | |
1100 | .release = single_release, | |
1101 | }; | |
1102 | ||
1103 | static int nbd_dbg_flags_show(struct seq_file *s, void *unused) | |
1104 | { | |
1105 | struct nbd_device *nbd = s->private; | |
5ea8d108 | 1106 | u32 flags = nbd->config->flags; |
30d53d9c MP |
1107 | |
1108 | seq_printf(s, "Hex: 0x%08x\n\n", flags); | |
1109 | ||
1110 | seq_puts(s, "Known flags:\n"); | |
1111 | ||
1112 | if (flags & NBD_FLAG_HAS_FLAGS) | |
1113 | seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); | |
1114 | if (flags & NBD_FLAG_READ_ONLY) | |
1115 | seq_puts(s, "NBD_FLAG_READ_ONLY\n"); | |
1116 | if (flags & NBD_FLAG_SEND_FLUSH) | |
1117 | seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); | |
1118 | if (flags & NBD_FLAG_SEND_TRIM) | |
1119 | seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); | |
1120 | ||
1121 | return 0; | |
1122 | } | |
1123 | ||
1124 | static int nbd_dbg_flags_open(struct inode *inode, struct file *file) | |
1125 | { | |
1126 | return single_open(file, nbd_dbg_flags_show, inode->i_private); | |
1127 | } | |
1128 | ||
1129 | static const struct file_operations nbd_dbg_flags_ops = { | |
1130 | .open = nbd_dbg_flags_open, | |
1131 | .read = seq_read, | |
1132 | .llseek = seq_lseek, | |
1133 | .release = single_release, | |
1134 | }; | |
1135 | ||
1136 | static int nbd_dev_dbg_init(struct nbd_device *nbd) | |
1137 | { | |
1138 | struct dentry *dir; | |
5ea8d108 | 1139 | struct nbd_config *config = nbd->config; |
27ea43fe MP |
1140 | |
1141 | if (!nbd_dbg_dir) | |
1142 | return -EIO; | |
30d53d9c MP |
1143 | |
1144 | dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); | |
27ea43fe MP |
1145 | if (!dir) { |
1146 | dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", | |
1147 | nbd_name(nbd)); | |
1148 | return -EIO; | |
30d53d9c | 1149 | } |
5ea8d108 | 1150 | config->dbg_dir = dir; |
30d53d9c | 1151 | |
27ea43fe | 1152 | debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); |
5ea8d108 | 1153 | debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); |
0eadf37a | 1154 | debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); |
5ea8d108 | 1155 | debugfs_create_u64("blocksize", 0444, dir, &config->blksize); |
d366a0ff | 1156 | debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); |
30d53d9c MP |
1157 | |
1158 | return 0; | |
1159 | } | |
1160 | ||
1161 | static void nbd_dev_dbg_close(struct nbd_device *nbd) | |
1162 | { | |
5ea8d108 | 1163 | debugfs_remove_recursive(nbd->config->dbg_dir); |
30d53d9c MP |
1164 | } |
1165 | ||
1166 | static int nbd_dbg_init(void) | |
1167 | { | |
1168 | struct dentry *dbg_dir; | |
1169 | ||
1170 | dbg_dir = debugfs_create_dir("nbd", NULL); | |
27ea43fe MP |
1171 | if (!dbg_dir) |
1172 | return -EIO; | |
30d53d9c MP |
1173 | |
1174 | nbd_dbg_dir = dbg_dir; | |
1175 | ||
1176 | return 0; | |
1177 | } | |
1178 | ||
1179 | static void nbd_dbg_close(void) | |
1180 | { | |
1181 | debugfs_remove_recursive(nbd_dbg_dir); | |
1182 | } | |
1183 | ||
1184 | #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ | |
1185 | ||
1186 | static int nbd_dev_dbg_init(struct nbd_device *nbd) | |
1187 | { | |
1188 | return 0; | |
1189 | } | |
1190 | ||
1191 | static void nbd_dev_dbg_close(struct nbd_device *nbd) | |
1192 | { | |
1193 | } | |
1194 | ||
1195 | static int nbd_dbg_init(void) | |
1196 | { | |
1197 | return 0; | |
1198 | } | |
1199 | ||
1200 | static void nbd_dbg_close(void) | |
1201 | { | |
1202 | } | |
1203 | ||
1204 | #endif | |
1205 | ||
fd8383fd JB |
1206 | static int nbd_init_request(void *data, struct request *rq, |
1207 | unsigned int hctx_idx, unsigned int request_idx, | |
1208 | unsigned int numa_node) | |
1209 | { | |
1210 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); | |
fd8383fd | 1211 | cmd->nbd = data; |
fd8383fd JB |
1212 | return 0; |
1213 | } | |
1214 | ||
f363b089 | 1215 | static const struct blk_mq_ops nbd_mq_ops = { |
fd8383fd | 1216 | .queue_rq = nbd_queue_rq, |
fd8383fd | 1217 | .init_request = nbd_init_request, |
0eadf37a | 1218 | .timeout = nbd_xmit_timeout, |
fd8383fd JB |
1219 | }; |
1220 | ||
b0d9111a JB |
1221 | static void nbd_dev_remove(struct nbd_device *nbd) |
1222 | { | |
1223 | struct gendisk *disk = nbd->disk; | |
b0d9111a JB |
1224 | if (disk) { |
1225 | del_gendisk(disk); | |
1226 | blk_cleanup_queue(disk->queue); | |
1227 | blk_mq_free_tag_set(&nbd->tag_set); | |
1228 | put_disk(disk); | |
1229 | } | |
1230 | kfree(nbd); | |
1231 | } | |
1232 | ||
1233 | static int nbd_dev_add(int index) | |
1234 | { | |
1235 | struct nbd_device *nbd; | |
1236 | struct gendisk *disk; | |
1237 | struct request_queue *q; | |
1238 | int err = -ENOMEM; | |
1239 | ||
1240 | nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); | |
1241 | if (!nbd) | |
1242 | goto out; | |
1243 | ||
1244 | disk = alloc_disk(1 << part_shift); | |
1245 | if (!disk) | |
1246 | goto out_free_nbd; | |
1247 | ||
1248 | if (index >= 0) { | |
1249 | err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, | |
1250 | GFP_KERNEL); | |
1251 | if (err == -ENOSPC) | |
1252 | err = -EEXIST; | |
1253 | } else { | |
1254 | err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); | |
1255 | if (err >= 0) | |
1256 | index = err; | |
1257 | } | |
1258 | if (err < 0) | |
1259 | goto out_free_disk; | |
1260 | ||
1261 | nbd->disk = disk; | |
1262 | nbd->tag_set.ops = &nbd_mq_ops; | |
1263 | nbd->tag_set.nr_hw_queues = 1; | |
1264 | nbd->tag_set.queue_depth = 128; | |
1265 | nbd->tag_set.numa_node = NUMA_NO_NODE; | |
1266 | nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); | |
1267 | nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | | |
1268 | BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; | |
1269 | nbd->tag_set.driver_data = nbd; | |
1270 | ||
1271 | err = blk_mq_alloc_tag_set(&nbd->tag_set); | |
1272 | if (err) | |
1273 | goto out_free_idr; | |
1274 | ||
1275 | q = blk_mq_init_queue(&nbd->tag_set); | |
1276 | if (IS_ERR(q)) { | |
1277 | err = PTR_ERR(q); | |
1278 | goto out_free_tags; | |
1279 | } | |
1280 | disk->queue = q; | |
1281 | ||
1282 | /* | |
1283 | * Tell the block layer that we are not a rotational device | |
1284 | */ | |
1285 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); | |
1286 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); | |
1287 | disk->queue->limits.discard_granularity = 512; | |
1288 | blk_queue_max_discard_sectors(disk->queue, UINT_MAX); | |
b0d9111a JB |
1289 | blk_queue_max_hw_sectors(disk->queue, 65536); |
1290 | disk->queue->limits.max_sectors = 256; | |
1291 | ||
b0d9111a | 1292 | mutex_init(&nbd->config_lock); |
5ea8d108 | 1293 | refcount_set(&nbd->config_refs, 0); |
b0d9111a JB |
1294 | disk->major = NBD_MAJOR; |
1295 | disk->first_minor = index << part_shift; | |
1296 | disk->fops = &nbd_fops; | |
1297 | disk->private_data = nbd; | |
1298 | sprintf(disk->disk_name, "nbd%d", index); | |
b0d9111a JB |
1299 | nbd_reset(nbd); |
1300 | add_disk(disk); | |
1301 | return index; | |
1302 | ||
1303 | out_free_tags: | |
1304 | blk_mq_free_tag_set(&nbd->tag_set); | |
1305 | out_free_idr: | |
1306 | idr_remove(&nbd_index_idr, index); | |
1307 | out_free_disk: | |
1308 | put_disk(disk); | |
1309 | out_free_nbd: | |
1310 | kfree(nbd); | |
1311 | out: | |
1312 | return err; | |
1313 | } | |
1314 | ||
1da177e4 LT |
1315 | /* |
1316 | * And here should be modules and kernel interface | |
1317 | * (Just smiley confuses emacs :-) | |
1318 | */ | |
1319 | ||
1320 | static int __init nbd_init(void) | |
1321 | { | |
1da177e4 LT |
1322 | int i; |
1323 | ||
5b7b18cc | 1324 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); |
1da177e4 | 1325 | |
d71a6d73 | 1326 | if (max_part < 0) { |
7742ce4a | 1327 | printk(KERN_ERR "nbd: max_part must be >= 0\n"); |
d71a6d73 LV |
1328 | return -EINVAL; |
1329 | } | |
1330 | ||
1331 | part_shift = 0; | |
5988ce23 | 1332 | if (max_part > 0) { |
d71a6d73 LV |
1333 | part_shift = fls(max_part); |
1334 | ||
5988ce23 NK |
1335 | /* |
1336 | * Adjust max_part according to part_shift as it is exported | |
1337 | * to user space so that user can know the max number of | |
1338 | * partition kernel should be able to manage. | |
1339 | * | |
1340 | * Note that -1 is required because partition 0 is reserved | |
1341 | * for the whole disk. | |
1342 | */ | |
1343 | max_part = (1UL << part_shift) - 1; | |
1344 | } | |
1345 | ||
3b271082 NK |
1346 | if ((1UL << part_shift) > DISK_MAX_PARTS) |
1347 | return -EINVAL; | |
1348 | ||
1349 | if (nbds_max > 1UL << (MINORBITS - part_shift)) | |
1350 | return -EINVAL; | |
124d6db0 JB |
1351 | recv_workqueue = alloc_workqueue("knbd-recv", |
1352 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | |
1353 | if (!recv_workqueue) | |
1354 | return -ENOMEM; | |
3b271082 | 1355 | |
6330a2d0 JB |
1356 | if (register_blkdev(NBD_MAJOR, "nbd")) { |
1357 | destroy_workqueue(recv_workqueue); | |
b0d9111a | 1358 | return -EIO; |
6330a2d0 | 1359 | } |
1da177e4 | 1360 | |
30d53d9c MP |
1361 | nbd_dbg_init(); |
1362 | ||
b0d9111a JB |
1363 | mutex_lock(&nbd_index_mutex); |
1364 | for (i = 0; i < nbds_max; i++) | |
1365 | nbd_dev_add(i); | |
1366 | mutex_unlock(&nbd_index_mutex); | |
1367 | return 0; | |
1368 | } | |
1da177e4 | 1369 | |
b0d9111a JB |
1370 | static int nbd_exit_cb(int id, void *ptr, void *data) |
1371 | { | |
1372 | struct nbd_device *nbd = ptr; | |
1373 | nbd_dev_remove(nbd); | |
1da177e4 | 1374 | return 0; |
1da177e4 LT |
1375 | } |
1376 | ||
1377 | static void __exit nbd_cleanup(void) | |
1378 | { | |
30d53d9c MP |
1379 | nbd_dbg_close(); |
1380 | ||
b0d9111a JB |
1381 | idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL); |
1382 | idr_destroy(&nbd_index_idr); | |
124d6db0 | 1383 | destroy_workqueue(recv_workqueue); |
1da177e4 | 1384 | unregister_blkdev(NBD_MAJOR, "nbd"); |
1da177e4 LT |
1385 | } |
1386 | ||
1387 | module_init(nbd_init); | |
1388 | module_exit(nbd_cleanup); | |
1389 | ||
1390 | MODULE_DESCRIPTION("Network Block Device"); | |
1391 | MODULE_LICENSE("GPL"); | |
1392 | ||
40be0c28 | 1393 | module_param(nbds_max, int, 0444); |
d71a6d73 LV |
1394 | MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); |
1395 | module_param(max_part, int, 0444); | |
1396 | MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); |