]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Network block device - make block devices work over TCP | |
3 | * | |
4 | * Note that you can not swap over this thing, yet. Seems to work but | |
5 | * deadlocks sometimes - you can not swap over TCP in general. | |
6 | * | |
a2531293 | 7 | * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
8 | * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> |
9 | * | |
dbf492d6 | 10 | * This file is released under GPLv2 or later. |
1da177e4 | 11 | * |
dbf492d6 | 12 | * (part of code stolen from loop.c) |
1da177e4 LT |
13 | */ |
14 | ||
15 | #include <linux/major.h> | |
16 | ||
17 | #include <linux/blkdev.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/bio.h> | |
23 | #include <linux/stat.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/file.h> | |
26 | #include <linux/ioctl.h> | |
2a48fc0a | 27 | #include <linux/mutex.h> |
4b2f0260 HX |
28 | #include <linux/compiler.h> |
29 | #include <linux/err.h> | |
30 | #include <linux/kernel.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
1da177e4 | 32 | #include <net/sock.h> |
91cf45f0 | 33 | #include <linux/net.h> |
48cf6061 | 34 | #include <linux/kthread.h> |
b9c495bb | 35 | #include <linux/types.h> |
30d53d9c | 36 | #include <linux/debugfs.h> |
fd8383fd | 37 | #include <linux/blk-mq.h> |
1da177e4 | 38 | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <asm/types.h> |
41 | ||
42 | #include <linux/nbd.h> | |
43 | ||
b0d9111a JB |
44 | static DEFINE_IDR(nbd_index_idr); |
45 | static DEFINE_MUTEX(nbd_index_mutex); | |
46 | ||
9561a7ad JB |
47 | struct nbd_sock { |
48 | struct socket *sock; | |
49 | struct mutex tx_lock; | |
9dd5d3ab JB |
50 | struct request *pending; |
51 | int sent; | |
9561a7ad JB |
52 | }; |
53 | ||
9b4a6ba9 JB |
54 | #define NBD_TIMEDOUT 0 |
55 | #define NBD_DISCONNECT_REQUESTED 1 | |
9561a7ad JB |
56 | #define NBD_DISCONNECTED 2 |
57 | #define NBD_RUNNING 3 | |
9b4a6ba9 | 58 | |
13e71d69 | 59 | struct nbd_device { |
22d109c1 | 60 | u32 flags; |
9b4a6ba9 | 61 | unsigned long runtime_flags; |
9561a7ad | 62 | struct nbd_sock **socks; |
13e71d69 MP |
63 | int magic; |
64 | ||
fd8383fd | 65 | struct blk_mq_tag_set tag_set; |
13e71d69 | 66 | |
9561a7ad | 67 | struct mutex config_lock; |
13e71d69 | 68 | struct gendisk *disk; |
9561a7ad JB |
69 | int num_connections; |
70 | atomic_t recv_threads; | |
71 | wait_queue_head_t recv_wq; | |
ef77b515 | 72 | loff_t blksize; |
b9c495bb | 73 | loff_t bytesize; |
7e2893a1 | 74 | |
7e2893a1 | 75 | struct task_struct *task_recv; |
9561a7ad | 76 | struct task_struct *task_setup; |
30d53d9c MP |
77 | |
78 | #if IS_ENABLED(CONFIG_DEBUG_FS) | |
79 | struct dentry *dbg_dir; | |
80 | #endif | |
13e71d69 MP |
81 | }; |
82 | ||
fd8383fd JB |
83 | struct nbd_cmd { |
84 | struct nbd_device *nbd; | |
9561a7ad | 85 | struct completion send_complete; |
fd8383fd JB |
86 | }; |
87 | ||
30d53d9c MP |
88 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
89 | static struct dentry *nbd_dbg_dir; | |
90 | #endif | |
91 | ||
92 | #define nbd_name(nbd) ((nbd)->disk->disk_name) | |
93 | ||
f4507164 | 94 | #define NBD_MAGIC 0x68797548 |
1da177e4 | 95 | |
9c7a4169 | 96 | static unsigned int nbds_max = 16; |
d71a6d73 | 97 | static int max_part; |
124d6db0 | 98 | static struct workqueue_struct *recv_workqueue; |
b0d9111a | 99 | static int part_shift; |
1da177e4 | 100 | |
9442b739 JB |
101 | static int nbd_dev_dbg_init(struct nbd_device *nbd); |
102 | static void nbd_dev_dbg_close(struct nbd_device *nbd); | |
103 | ||
104 | ||
d18509f5 | 105 | static inline struct device *nbd_to_dev(struct nbd_device *nbd) |
1da177e4 | 106 | { |
d18509f5 | 107 | return disk_to_dev(nbd->disk); |
1da177e4 LT |
108 | } |
109 | ||
37091fdd MP |
110 | static bool nbd_is_connected(struct nbd_device *nbd) |
111 | { | |
112 | return !!nbd->task_recv; | |
113 | } | |
114 | ||
1da177e4 LT |
115 | static const char *nbdcmd_to_ascii(int cmd) |
116 | { | |
117 | switch (cmd) { | |
118 | case NBD_CMD_READ: return "read"; | |
119 | case NBD_CMD_WRITE: return "write"; | |
120 | case NBD_CMD_DISC: return "disconnect"; | |
75f187ab | 121 | case NBD_CMD_FLUSH: return "flush"; |
a336d298 | 122 | case NBD_CMD_TRIM: return "trim/discard"; |
1da177e4 LT |
123 | } |
124 | return "invalid"; | |
125 | } | |
1da177e4 | 126 | |
37091fdd MP |
127 | static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) |
128 | { | |
e544541b | 129 | bd_set_size(bdev, 0); |
37091fdd MP |
130 | set_capacity(nbd->disk, 0); |
131 | kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); | |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
136 | static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev) | |
137 | { | |
e544541b JB |
138 | blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize); |
139 | blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize); | |
140 | bd_set_size(bdev, nbd->bytesize); | |
37091fdd MP |
141 | set_capacity(nbd->disk, nbd->bytesize >> 9); |
142 | kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); | |
143 | } | |
144 | ||
e544541b | 145 | static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev, |
ef77b515 | 146 | loff_t blocksize, loff_t nr_blocks) |
37091fdd | 147 | { |
37091fdd | 148 | nbd->blksize = blocksize; |
ef77b515 | 149 | nbd->bytesize = blocksize * nr_blocks; |
e544541b JB |
150 | if (nbd_is_connected(nbd)) |
151 | nbd_size_update(nbd, bdev); | |
37091fdd MP |
152 | } |
153 | ||
fd8383fd | 154 | static void nbd_end_request(struct nbd_cmd *cmd) |
1da177e4 | 155 | { |
fd8383fd JB |
156 | struct nbd_device *nbd = cmd->nbd; |
157 | struct request *req = blk_mq_rq_from_pdu(cmd); | |
097c94a4 | 158 | int error = req->errors ? -EIO : 0; |
1da177e4 | 159 | |
fd8383fd | 160 | dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd, |
d18509f5 | 161 | error ? "failed" : "done"); |
1da177e4 | 162 | |
fd8383fd | 163 | blk_mq_complete_request(req, error); |
1da177e4 LT |
164 | } |
165 | ||
e018e757 MP |
166 | /* |
167 | * Forcibly shutdown the socket causing all listeners to error | |
168 | */ | |
36e47bee | 169 | static void sock_shutdown(struct nbd_device *nbd) |
7fdfd406 | 170 | { |
9561a7ad | 171 | int i; |
23272a67 | 172 | |
9561a7ad JB |
173 | if (nbd->num_connections == 0) |
174 | return; | |
175 | if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) | |
260bbce4 | 176 | return; |
23272a67 | 177 | |
9561a7ad JB |
178 | for (i = 0; i < nbd->num_connections; i++) { |
179 | struct nbd_sock *nsock = nbd->socks[i]; | |
180 | mutex_lock(&nsock->tx_lock); | |
181 | kernel_sock_shutdown(nsock->sock, SHUT_RDWR); | |
182 | mutex_unlock(&nsock->tx_lock); | |
183 | } | |
184 | dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); | |
7fdfd406 PC |
185 | } |
186 | ||
0eadf37a JB |
187 | static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, |
188 | bool reserved) | |
7fdfd406 | 189 | { |
0eadf37a JB |
190 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); |
191 | struct nbd_device *nbd = cmd->nbd; | |
dcc909d9 | 192 | |
9561a7ad | 193 | dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); |
9b4a6ba9 | 194 | set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); |
c103b4da | 195 | req->errors = -EIO; |
9561a7ad | 196 | |
9561a7ad JB |
197 | mutex_lock(&nbd->config_lock); |
198 | sock_shutdown(nbd); | |
199 | mutex_unlock(&nbd->config_lock); | |
0eadf37a | 200 | return BLK_EH_HANDLED; |
7fdfd406 PC |
201 | } |
202 | ||
1da177e4 LT |
203 | /* |
204 | * Send or receive packet. | |
205 | */ | |
c9f2b6ae | 206 | static int sock_xmit(struct nbd_device *nbd, int index, int send, |
9dd5d3ab | 207 | struct iov_iter *iter, int msg_flags, int *sent) |
1da177e4 | 208 | { |
9561a7ad | 209 | struct socket *sock = nbd->socks[index]->sock; |
1da177e4 LT |
210 | int result; |
211 | struct msghdr msg; | |
7f338fe4 | 212 | unsigned long pflags = current->flags; |
1da177e4 | 213 | |
ffc41cf8 | 214 | if (unlikely(!sock)) { |
a897b666 | 215 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
7f1b90f9 WC |
216 | "Attempted %s on closed socket in sock_xmit\n", |
217 | (send ? "send" : "recv")); | |
ffc41cf8 MS |
218 | return -EINVAL; |
219 | } | |
220 | ||
c9f2b6ae | 221 | msg.msg_iter = *iter; |
c1696cab | 222 | |
7f338fe4 | 223 | current->flags |= PF_MEMALLOC; |
1da177e4 | 224 | do { |
7f338fe4 | 225 | sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; |
1da177e4 LT |
226 | msg.msg_name = NULL; |
227 | msg.msg_namelen = 0; | |
228 | msg.msg_control = NULL; | |
229 | msg.msg_controllen = 0; | |
1da177e4 LT |
230 | msg.msg_flags = msg_flags | MSG_NOSIGNAL; |
231 | ||
7e2893a1 | 232 | if (send) |
c1696cab | 233 | result = sock_sendmsg(sock, &msg); |
7e2893a1 | 234 | else |
c1696cab | 235 | result = sock_recvmsg(sock, &msg, msg.msg_flags); |
1da177e4 | 236 | |
1da177e4 LT |
237 | if (result <= 0) { |
238 | if (result == 0) | |
239 | result = -EPIPE; /* short read */ | |
240 | break; | |
241 | } | |
9dd5d3ab JB |
242 | if (sent) |
243 | *sent += result; | |
c1696cab | 244 | } while (msg_data_left(&msg)); |
1da177e4 | 245 | |
7f338fe4 | 246 | tsk_restore_flags(current, pflags, PF_MEMALLOC); |
1da177e4 LT |
247 | |
248 | return result; | |
249 | } | |
250 | ||
7fdfd406 | 251 | /* always call with the tx_lock held */ |
9561a7ad | 252 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) |
1da177e4 | 253 | { |
fd8383fd | 254 | struct request *req = blk_mq_rq_from_pdu(cmd); |
9dd5d3ab | 255 | struct nbd_sock *nsock = nbd->socks[index]; |
d61b7f97 | 256 | int result; |
c9f2b6ae AV |
257 | struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; |
258 | struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; | |
259 | struct iov_iter from; | |
1011c1b9 | 260 | unsigned long size = blk_rq_bytes(req); |
429a787b | 261 | struct bio *bio; |
9dc6c806 | 262 | u32 type; |
9561a7ad | 263 | u32 tag = blk_mq_unique_tag(req); |
9dd5d3ab | 264 | int sent = nsock->sent, skip = 0; |
9dc6c806 | 265 | |
c9f2b6ae AV |
266 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
267 | ||
aebf526b CH |
268 | switch (req_op(req)) { |
269 | case REQ_OP_DISCARD: | |
9dc6c806 | 270 | type = NBD_CMD_TRIM; |
aebf526b CH |
271 | break; |
272 | case REQ_OP_FLUSH: | |
9dc6c806 | 273 | type = NBD_CMD_FLUSH; |
aebf526b CH |
274 | break; |
275 | case REQ_OP_WRITE: | |
9dc6c806 | 276 | type = NBD_CMD_WRITE; |
aebf526b CH |
277 | break; |
278 | case REQ_OP_READ: | |
9dc6c806 | 279 | type = NBD_CMD_READ; |
aebf526b CH |
280 | break; |
281 | default: | |
282 | return -EIO; | |
283 | } | |
1da177e4 | 284 | |
09fc54cc CH |
285 | if (rq_data_dir(req) == WRITE && |
286 | (nbd->flags & NBD_FLAG_READ_ONLY)) { | |
287 | dev_err_ratelimited(disk_to_dev(nbd->disk), | |
288 | "Write on read-only\n"); | |
289 | return -EIO; | |
290 | } | |
291 | ||
9dd5d3ab JB |
292 | /* We did a partial send previously, and we at least sent the whole |
293 | * request struct, so just go and send the rest of the pages in the | |
294 | * request. | |
295 | */ | |
296 | if (sent) { | |
297 | if (sent >= sizeof(request)) { | |
298 | skip = sent - sizeof(request); | |
299 | goto send_pages; | |
300 | } | |
301 | iov_iter_advance(&from, sent); | |
302 | } | |
9dc6c806 | 303 | request.type = htonl(type); |
9561a7ad | 304 | if (type != NBD_CMD_FLUSH) { |
75f187ab AB |
305 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); |
306 | request.len = htonl(size); | |
307 | } | |
9561a7ad | 308 | memcpy(request.handle, &tag, sizeof(tag)); |
1da177e4 | 309 | |
d18509f5 | 310 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", |
fd8383fd | 311 | cmd, nbdcmd_to_ascii(type), |
d18509f5 | 312 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); |
c9f2b6ae | 313 | result = sock_xmit(nbd, index, 1, &from, |
9dd5d3ab | 314 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); |
1da177e4 | 315 | if (result <= 0) { |
9dd5d3ab JB |
316 | if (result == -ERESTARTSYS) { |
317 | /* If we havne't sent anything we can just return BUSY, | |
318 | * however if we have sent something we need to make | |
319 | * sure we only allow this req to be sent until we are | |
320 | * completely done. | |
321 | */ | |
322 | if (sent) { | |
323 | nsock->pending = req; | |
324 | nsock->sent = sent; | |
325 | } | |
326 | return BLK_MQ_RQ_QUEUE_BUSY; | |
327 | } | |
a897b666 | 328 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
7f1b90f9 | 329 | "Send control failed (result %d)\n", result); |
dab5313a | 330 | return -EIO; |
1da177e4 | 331 | } |
9dd5d3ab | 332 | send_pages: |
429a787b | 333 | if (type != NBD_CMD_WRITE) |
9dd5d3ab | 334 | goto out; |
429a787b | 335 | |
429a787b JA |
336 | bio = req->bio; |
337 | while (bio) { | |
338 | struct bio *next = bio->bi_next; | |
339 | struct bvec_iter iter; | |
7988613b | 340 | struct bio_vec bvec; |
429a787b JA |
341 | |
342 | bio_for_each_segment(bvec, bio, iter) { | |
343 | bool is_last = !next && bio_iter_last(bvec, iter); | |
d61b7f97 | 344 | int flags = is_last ? 0 : MSG_MORE; |
429a787b | 345 | |
d18509f5 | 346 | dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", |
fd8383fd | 347 | cmd, bvec.bv_len); |
c9f2b6ae AV |
348 | iov_iter_bvec(&from, ITER_BVEC | WRITE, |
349 | &bvec, 1, bvec.bv_len); | |
9dd5d3ab JB |
350 | if (skip) { |
351 | if (skip >= iov_iter_count(&from)) { | |
352 | skip -= iov_iter_count(&from); | |
353 | continue; | |
354 | } | |
355 | iov_iter_advance(&from, skip); | |
356 | skip = 0; | |
357 | } | |
358 | result = sock_xmit(nbd, index, 1, &from, flags, &sent); | |
6c92e699 | 359 | if (result <= 0) { |
9dd5d3ab JB |
360 | if (result == -ERESTARTSYS) { |
361 | /* We've already sent the header, we | |
362 | * have no choice but to set pending and | |
363 | * return BUSY. | |
364 | */ | |
365 | nsock->pending = req; | |
366 | nsock->sent = sent; | |
367 | return BLK_MQ_RQ_QUEUE_BUSY; | |
368 | } | |
f4507164 | 369 | dev_err(disk_to_dev(nbd->disk), |
7f1b90f9 WC |
370 | "Send data failed (result %d)\n", |
371 | result); | |
dab5313a | 372 | return -EIO; |
6c92e699 | 373 | } |
429a787b JA |
374 | /* |
375 | * The completion might already have come in, | |
376 | * so break for the last one instead of letting | |
377 | * the iterator do it. This prevents use-after-free | |
378 | * of the bio. | |
379 | */ | |
380 | if (is_last) | |
381 | break; | |
1da177e4 | 382 | } |
429a787b | 383 | bio = next; |
1da177e4 | 384 | } |
9dd5d3ab JB |
385 | out: |
386 | nsock->pending = NULL; | |
387 | nsock->sent = 0; | |
1da177e4 | 388 | return 0; |
1da177e4 LT |
389 | } |
390 | ||
1da177e4 | 391 | /* NULL returned = something went wrong, inform userspace */ |
9561a7ad | 392 | static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) |
1da177e4 LT |
393 | { |
394 | int result; | |
395 | struct nbd_reply reply; | |
fd8383fd JB |
396 | struct nbd_cmd *cmd; |
397 | struct request *req = NULL; | |
398 | u16 hwq; | |
9561a7ad | 399 | u32 tag; |
c9f2b6ae AV |
400 | struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; |
401 | struct iov_iter to; | |
1da177e4 LT |
402 | |
403 | reply.magic = 0; | |
c9f2b6ae | 404 | iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); |
9dd5d3ab | 405 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); |
1da177e4 | 406 | if (result <= 0) { |
9561a7ad JB |
407 | if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && |
408 | !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) | |
409 | dev_err(disk_to_dev(nbd->disk), | |
410 | "Receive control failed (result %d)\n", result); | |
19391830 | 411 | return ERR_PTR(result); |
1da177e4 | 412 | } |
e4b57e08 MF |
413 | |
414 | if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { | |
f4507164 | 415 | dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", |
e4b57e08 | 416 | (unsigned long)ntohl(reply.magic)); |
19391830 | 417 | return ERR_PTR(-EPROTO); |
e4b57e08 MF |
418 | } |
419 | ||
9561a7ad | 420 | memcpy(&tag, reply.handle, sizeof(u32)); |
4b2f0260 | 421 | |
fd8383fd JB |
422 | hwq = blk_mq_unique_tag_to_hwq(tag); |
423 | if (hwq < nbd->tag_set.nr_hw_queues) | |
424 | req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], | |
425 | blk_mq_unique_tag_to_tag(tag)); | |
426 | if (!req || !blk_mq_request_started(req)) { | |
427 | dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", | |
428 | tag, req); | |
429 | return ERR_PTR(-ENOENT); | |
1da177e4 | 430 | } |
fd8383fd | 431 | cmd = blk_mq_rq_to_pdu(req); |
1da177e4 | 432 | if (ntohl(reply.error)) { |
f4507164 | 433 | dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", |
7f1b90f9 | 434 | ntohl(reply.error)); |
c103b4da | 435 | req->errors = -EIO; |
fd8383fd | 436 | return cmd; |
1da177e4 LT |
437 | } |
438 | ||
fd8383fd | 439 | dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); |
9dc6c806 | 440 | if (rq_data_dir(req) != WRITE) { |
5705f702 | 441 | struct req_iterator iter; |
7988613b | 442 | struct bio_vec bvec; |
5705f702 N |
443 | |
444 | rq_for_each_segment(bvec, req, iter) { | |
c9f2b6ae AV |
445 | iov_iter_bvec(&to, ITER_BVEC | READ, |
446 | &bvec, 1, bvec.bv_len); | |
9dd5d3ab | 447 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); |
6c92e699 | 448 | if (result <= 0) { |
f4507164 | 449 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", |
7f1b90f9 | 450 | result); |
c103b4da | 451 | req->errors = -EIO; |
fd8383fd | 452 | return cmd; |
6c92e699 | 453 | } |
d18509f5 | 454 | dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", |
fd8383fd | 455 | cmd, bvec.bv_len); |
1da177e4 | 456 | } |
9561a7ad JB |
457 | } else { |
458 | /* See the comment in nbd_queue_rq. */ | |
459 | wait_for_completion(&cmd->send_complete); | |
1da177e4 | 460 | } |
fd8383fd | 461 | return cmd; |
1da177e4 LT |
462 | } |
463 | ||
edfaa7c3 KS |
464 | static ssize_t pid_show(struct device *dev, |
465 | struct device_attribute *attr, char *buf) | |
6b39bb65 | 466 | { |
edfaa7c3 | 467 | struct gendisk *disk = dev_to_disk(dev); |
6521d39a | 468 | struct nbd_device *nbd = (struct nbd_device *)disk->private_data; |
edfaa7c3 | 469 | |
6521d39a | 470 | return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); |
6b39bb65 PC |
471 | } |
472 | ||
edfaa7c3 | 473 | static struct device_attribute pid_attr = { |
01e8ef11 | 474 | .attr = { .name = "pid", .mode = S_IRUGO}, |
6b39bb65 PC |
475 | .show = pid_show, |
476 | }; | |
477 | ||
9561a7ad JB |
478 | struct recv_thread_args { |
479 | struct work_struct work; | |
480 | struct nbd_device *nbd; | |
481 | int index; | |
482 | }; | |
483 | ||
484 | static void recv_work(struct work_struct *work) | |
1da177e4 | 485 | { |
9561a7ad JB |
486 | struct recv_thread_args *args = container_of(work, |
487 | struct recv_thread_args, | |
488 | work); | |
489 | struct nbd_device *nbd = args->nbd; | |
fd8383fd | 490 | struct nbd_cmd *cmd; |
9561a7ad | 491 | int ret = 0; |
1da177e4 | 492 | |
f4507164 | 493 | BUG_ON(nbd->magic != NBD_MAGIC); |
19391830 | 494 | while (1) { |
9561a7ad | 495 | cmd = nbd_read_stat(nbd, args->index); |
fd8383fd JB |
496 | if (IS_ERR(cmd)) { |
497 | ret = PTR_ERR(cmd); | |
19391830 MP |
498 | break; |
499 | } | |
500 | ||
fd8383fd | 501 | nbd_end_request(cmd); |
19391830 | 502 | } |
6b39bb65 | 503 | |
9561a7ad JB |
504 | /* |
505 | * We got an error, shut everybody down if this wasn't the result of a | |
506 | * disconnect request. | |
507 | */ | |
508 | if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) | |
509 | sock_shutdown(nbd); | |
510 | atomic_dec(&nbd->recv_threads); | |
511 | wake_up(&nbd->recv_wq); | |
1da177e4 LT |
512 | } |
513 | ||
fd8383fd | 514 | static void nbd_clear_req(struct request *req, void *data, bool reserved) |
1da177e4 | 515 | { |
fd8383fd | 516 | struct nbd_cmd *cmd; |
1da177e4 | 517 | |
fd8383fd JB |
518 | if (!blk_mq_request_started(req)) |
519 | return; | |
520 | cmd = blk_mq_rq_to_pdu(req); | |
c103b4da | 521 | req->errors = -EIO; |
fd8383fd JB |
522 | nbd_end_request(cmd); |
523 | } | |
524 | ||
525 | static void nbd_clear_que(struct nbd_device *nbd) | |
526 | { | |
f4507164 | 527 | BUG_ON(nbd->magic != NBD_MAGIC); |
1da177e4 | 528 | |
fd8383fd | 529 | blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); |
e78273c8 | 530 | dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); |
1da177e4 LT |
531 | } |
532 | ||
7fdfd406 | 533 | |
9dd5d3ab | 534 | static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
48cf6061 | 535 | { |
fd8383fd JB |
536 | struct request *req = blk_mq_rq_from_pdu(cmd); |
537 | struct nbd_device *nbd = cmd->nbd; | |
9561a7ad | 538 | struct nbd_sock *nsock; |
9dd5d3ab | 539 | int ret; |
fd8383fd | 540 | |
9561a7ad | 541 | if (index >= nbd->num_connections) { |
a897b666 JB |
542 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
543 | "Attempted send on invalid socket\n"); | |
9dd5d3ab | 544 | return -EINVAL; |
9561a7ad | 545 | } |
48cf6061 | 546 | |
9561a7ad | 547 | if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { |
a897b666 JB |
548 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
549 | "Attempted send on closed socket\n"); | |
9dd5d3ab | 550 | return -EINVAL; |
9561a7ad JB |
551 | } |
552 | ||
48cf6061 LV |
553 | req->errors = 0; |
554 | ||
9561a7ad JB |
555 | nsock = nbd->socks[index]; |
556 | mutex_lock(&nsock->tx_lock); | |
557 | if (unlikely(!nsock->sock)) { | |
558 | mutex_unlock(&nsock->tx_lock); | |
a897b666 JB |
559 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
560 | "Attempted send on closed socket\n"); | |
9dd5d3ab | 561 | return -EINVAL; |
48cf6061 LV |
562 | } |
563 | ||
9dd5d3ab JB |
564 | /* Handle the case that we have a pending request that was partially |
565 | * transmitted that _has_ to be serviced first. We need to call requeue | |
566 | * here so that it gets put _after_ the request that is already on the | |
567 | * dispatch list. | |
568 | */ | |
569 | if (unlikely(nsock->pending && nsock->pending != req)) { | |
570 | blk_mq_requeue_request(req, true); | |
571 | ret = 0; | |
572 | goto out; | |
48cf6061 | 573 | } |
9dd5d3ab JB |
574 | ret = nbd_send_cmd(nbd, cmd, index); |
575 | out: | |
9561a7ad | 576 | mutex_unlock(&nsock->tx_lock); |
9dd5d3ab | 577 | return ret; |
48cf6061 LV |
578 | } |
579 | ||
fd8383fd JB |
580 | static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, |
581 | const struct blk_mq_queue_data *bd) | |
1da177e4 | 582 | { |
fd8383fd | 583 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
9dd5d3ab | 584 | int ret; |
1da177e4 | 585 | |
9561a7ad JB |
586 | /* |
587 | * Since we look at the bio's to send the request over the network we | |
588 | * need to make sure the completion work doesn't mark this request done | |
589 | * before we are done doing our send. This keeps us from dereferencing | |
590 | * freed data if we have particularly fast completions (ie we get the | |
591 | * completion before we exit sock_xmit on the last bvec) or in the case | |
592 | * that the server is misbehaving (or there was an error) before we're | |
593 | * done sending everything over the wire. | |
594 | */ | |
595 | init_completion(&cmd->send_complete); | |
fd8383fd | 596 | blk_mq_start_request(bd->rq); |
9dd5d3ab JB |
597 | |
598 | /* We can be called directly from the user space process, which means we | |
599 | * could possibly have signals pending so our sendmsg will fail. In | |
600 | * this case we need to return that we are busy, otherwise error out as | |
601 | * appropriate. | |
602 | */ | |
603 | ret = nbd_handle_cmd(cmd, hctx->queue_num); | |
604 | if (ret < 0) | |
605 | ret = BLK_MQ_RQ_QUEUE_ERROR; | |
606 | if (!ret) | |
607 | ret = BLK_MQ_RQ_QUEUE_OK; | |
9561a7ad JB |
608 | complete(&cmd->send_complete); |
609 | ||
9dd5d3ab | 610 | return ret; |
1da177e4 LT |
611 | } |
612 | ||
9442b739 JB |
613 | static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, |
614 | unsigned long arg) | |
23272a67 | 615 | { |
9442b739 | 616 | struct socket *sock; |
9561a7ad JB |
617 | struct nbd_sock **socks; |
618 | struct nbd_sock *nsock; | |
9442b739 JB |
619 | int err; |
620 | ||
621 | sock = sockfd_lookup(arg, &err); | |
622 | if (!sock) | |
623 | return err; | |
23272a67 | 624 | |
9561a7ad JB |
625 | if (!nbd->task_setup) |
626 | nbd->task_setup = current; | |
627 | if (nbd->task_setup != current) { | |
628 | dev_err(disk_to_dev(nbd->disk), | |
629 | "Device being setup by another task"); | |
630 | return -EINVAL; | |
23272a67 MP |
631 | } |
632 | ||
9561a7ad JB |
633 | socks = krealloc(nbd->socks, (nbd->num_connections + 1) * |
634 | sizeof(struct nbd_sock *), GFP_KERNEL); | |
635 | if (!socks) | |
636 | return -ENOMEM; | |
637 | nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); | |
638 | if (!nsock) | |
639 | return -ENOMEM; | |
640 | ||
641 | nbd->socks = socks; | |
23272a67 | 642 | |
9561a7ad JB |
643 | mutex_init(&nsock->tx_lock); |
644 | nsock->sock = sock; | |
9dd5d3ab JB |
645 | nsock->pending = NULL; |
646 | nsock->sent = 0; | |
9561a7ad | 647 | socks[nbd->num_connections++] = nsock; |
23272a67 | 648 | |
9442b739 JB |
649 | if (max_part) |
650 | bdev->bd_invalidated = 1; | |
9561a7ad | 651 | return 0; |
23272a67 MP |
652 | } |
653 | ||
0e4f0f6f MP |
654 | /* Reset all properties of an NBD device */ |
655 | static void nbd_reset(struct nbd_device *nbd) | |
656 | { | |
9b4a6ba9 | 657 | nbd->runtime_flags = 0; |
0e4f0f6f MP |
658 | nbd->blksize = 1024; |
659 | nbd->bytesize = 0; | |
660 | set_capacity(nbd->disk, 0); | |
661 | nbd->flags = 0; | |
0eadf37a | 662 | nbd->tag_set.timeout = 0; |
0e4f0f6f | 663 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); |
0e4f0f6f MP |
664 | } |
665 | ||
666 | static void nbd_bdev_reset(struct block_device *bdev) | |
667 | { | |
668 | set_device_ro(bdev, false); | |
669 | bdev->bd_inode->i_size = 0; | |
670 | if (max_part > 0) { | |
671 | blkdev_reread_part(bdev); | |
672 | bdev->bd_invalidated = 1; | |
673 | } | |
674 | } | |
675 | ||
d02cf531 MP |
676 | static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev) |
677 | { | |
678 | if (nbd->flags & NBD_FLAG_READ_ONLY) | |
679 | set_device_ro(bdev, true); | |
680 | if (nbd->flags & NBD_FLAG_SEND_TRIM) | |
681 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); | |
682 | if (nbd->flags & NBD_FLAG_SEND_FLUSH) | |
aafb1eec | 683 | blk_queue_write_cache(nbd->disk->queue, true, false); |
d02cf531 | 684 | else |
aafb1eec | 685 | blk_queue_write_cache(nbd->disk->queue, false, false); |
d02cf531 MP |
686 | } |
687 | ||
9561a7ad JB |
688 | static void send_disconnects(struct nbd_device *nbd) |
689 | { | |
c9f2b6ae AV |
690 | struct nbd_request request = { |
691 | .magic = htonl(NBD_REQUEST_MAGIC), | |
692 | .type = htonl(NBD_CMD_DISC), | |
693 | }; | |
694 | struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; | |
695 | struct iov_iter from; | |
9561a7ad JB |
696 | int i, ret; |
697 | ||
9561a7ad | 698 | for (i = 0; i < nbd->num_connections; i++) { |
c9f2b6ae | 699 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
9dd5d3ab | 700 | ret = sock_xmit(nbd, i, 1, &from, 0, NULL); |
9561a7ad JB |
701 | if (ret <= 0) |
702 | dev_err(disk_to_dev(nbd->disk), | |
703 | "Send disconnect failed %d\n", ret); | |
704 | } | |
705 | } | |
706 | ||
9442b739 JB |
707 | static int nbd_disconnect(struct nbd_device *nbd, struct block_device *bdev) |
708 | { | |
709 | dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); | |
710 | if (!nbd->socks) | |
711 | return -EINVAL; | |
30d53d9c | 712 | |
9442b739 JB |
713 | mutex_unlock(&nbd->config_lock); |
714 | fsync_bdev(bdev); | |
715 | mutex_lock(&nbd->config_lock); | |
716 | ||
717 | /* Check again after getting mutex back. */ | |
718 | if (!nbd->socks) | |
719 | return -EINVAL; | |
720 | ||
721 | if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, | |
722 | &nbd->runtime_flags)) | |
723 | send_disconnects(nbd); | |
724 | return 0; | |
725 | } | |
726 | ||
727 | static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev) | |
1a2ad211 | 728 | { |
9442b739 JB |
729 | sock_shutdown(nbd); |
730 | nbd_clear_que(nbd); | |
731 | kill_bdev(bdev); | |
732 | nbd_bdev_reset(bdev); | |
733 | /* | |
734 | * We want to give the run thread a chance to wait for everybody | |
735 | * to clean up and then do it's own cleanup. | |
736 | */ | |
737 | if (!test_bit(NBD_RUNNING, &nbd->runtime_flags) && | |
738 | nbd->num_connections) { | |
739 | int i; | |
740 | ||
6a8a2154 JB |
741 | for (i = 0; i < nbd->num_connections; i++) { |
742 | sockfd_put(nbd->socks[i]->sock); | |
9442b739 | 743 | kfree(nbd->socks[i]); |
6a8a2154 | 744 | } |
9442b739 JB |
745 | kfree(nbd->socks); |
746 | nbd->socks = NULL; | |
747 | nbd->num_connections = 0; | |
1a2ad211 | 748 | } |
9442b739 | 749 | nbd->task_setup = NULL; |
9561a7ad | 750 | |
9442b739 JB |
751 | return 0; |
752 | } | |
753 | ||
754 | static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev) | |
755 | { | |
756 | struct recv_thread_args *args; | |
757 | int num_connections = nbd->num_connections; | |
758 | int error = 0, i; | |
1a2ad211 | 759 | |
9442b739 JB |
760 | if (nbd->task_recv) |
761 | return -EBUSY; | |
762 | if (!nbd->socks) | |
763 | return -EINVAL; | |
764 | if (num_connections > 1 && | |
765 | !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) { | |
766 | dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); | |
767 | error = -EINVAL; | |
768 | goto out_err; | |
769 | } | |
23272a67 | 770 | |
9442b739 JB |
771 | set_bit(NBD_RUNNING, &nbd->runtime_flags); |
772 | blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections); | |
773 | args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL); | |
774 | if (!args) { | |
775 | error = -ENOMEM; | |
776 | goto out_err; | |
777 | } | |
778 | nbd->task_recv = current; | |
779 | mutex_unlock(&nbd->config_lock); | |
23272a67 | 780 | |
9442b739 | 781 | nbd_parse_flags(nbd, bdev); |
23272a67 | 782 | |
9442b739 JB |
783 | error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); |
784 | if (error) { | |
785 | dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); | |
786 | goto out_recv; | |
1a2ad211 PM |
787 | } |
788 | ||
9442b739 | 789 | nbd_size_update(nbd, bdev); |
37091fdd | 790 | |
9442b739 JB |
791 | nbd_dev_dbg_init(nbd); |
792 | for (i = 0; i < num_connections; i++) { | |
793 | sk_set_memalloc(nbd->socks[i]->sock->sk); | |
794 | atomic_inc(&nbd->recv_threads); | |
795 | INIT_WORK(&args[i].work, recv_work); | |
796 | args[i].nbd = nbd; | |
797 | args[i].index = i; | |
798 | queue_work(recv_workqueue, &args[i].work); | |
37091fdd | 799 | } |
9442b739 JB |
800 | wait_event_interruptible(nbd->recv_wq, |
801 | atomic_read(&nbd->recv_threads) == 0); | |
802 | for (i = 0; i < num_connections; i++) | |
803 | flush_work(&args[i].work); | |
804 | nbd_dev_dbg_close(nbd); | |
805 | nbd_size_clear(nbd, bdev); | |
806 | device_remove_file(disk_to_dev(nbd->disk), &pid_attr); | |
807 | out_recv: | |
808 | mutex_lock(&nbd->config_lock); | |
809 | nbd->task_recv = NULL; | |
810 | out_err: | |
811 | clear_bit(NBD_RUNNING, &nbd->runtime_flags); | |
812 | nbd_clear_sock(nbd, bdev); | |
813 | ||
814 | /* user requested, ignore socket errors */ | |
815 | if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) | |
816 | error = 0; | |
817 | if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags)) | |
818 | error = -ETIMEDOUT; | |
1a2ad211 | 819 | |
9442b739 JB |
820 | nbd_reset(nbd); |
821 | return error; | |
822 | } | |
823 | ||
824 | /* Must be called with config_lock held */ | |
825 | static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |
826 | unsigned int cmd, unsigned long arg) | |
827 | { | |
828 | switch (cmd) { | |
829 | case NBD_DISCONNECT: | |
830 | return nbd_disconnect(nbd, bdev); | |
831 | case NBD_CLEAR_SOCK: | |
832 | return nbd_clear_sock(nbd, bdev); | |
833 | case NBD_SET_SOCK: | |
834 | return nbd_add_socket(nbd, bdev, arg); | |
835 | case NBD_SET_BLKSIZE: | |
e544541b JB |
836 | nbd_size_set(nbd, bdev, arg, |
837 | div_s64(nbd->bytesize, arg)); | |
838 | return 0; | |
1da177e4 | 839 | case NBD_SET_SIZE: |
e544541b JB |
840 | nbd_size_set(nbd, bdev, nbd->blksize, |
841 | div_s64(arg, nbd->blksize)); | |
842 | return 0; | |
37091fdd | 843 | case NBD_SET_SIZE_BLOCKS: |
e544541b JB |
844 | nbd_size_set(nbd, bdev, nbd->blksize, arg); |
845 | return 0; | |
7fdfd406 | 846 | case NBD_SET_TIMEOUT: |
0eadf37a | 847 | nbd->tag_set.timeout = arg * HZ; |
7fdfd406 | 848 | return 0; |
1a2ad211 | 849 | |
2f012508 PC |
850 | case NBD_SET_FLAGS: |
851 | nbd->flags = arg; | |
852 | return 0; | |
9442b739 JB |
853 | case NBD_DO_IT: |
854 | return nbd_start_device(nbd, bdev); | |
1da177e4 | 855 | case NBD_CLEAR_QUE: |
4b2f0260 HX |
856 | /* |
857 | * This is for compatibility only. The queue is always cleared | |
858 | * by NBD_DO_IT or NBD_CLEAR_SOCK. | |
859 | */ | |
1da177e4 LT |
860 | return 0; |
861 | case NBD_PRINT_DEBUG: | |
fd8383fd JB |
862 | /* |
863 | * For compatibility only, we no longer keep a list of | |
864 | * outstanding requests. | |
865 | */ | |
1da177e4 LT |
866 | return 0; |
867 | } | |
1a2ad211 PM |
868 | return -ENOTTY; |
869 | } | |
870 | ||
871 | static int nbd_ioctl(struct block_device *bdev, fmode_t mode, | |
872 | unsigned int cmd, unsigned long arg) | |
873 | { | |
f4507164 | 874 | struct nbd_device *nbd = bdev->bd_disk->private_data; |
1a2ad211 PM |
875 | int error; |
876 | ||
877 | if (!capable(CAP_SYS_ADMIN)) | |
878 | return -EPERM; | |
879 | ||
f4507164 | 880 | BUG_ON(nbd->magic != NBD_MAGIC); |
1a2ad211 | 881 | |
9561a7ad | 882 | mutex_lock(&nbd->config_lock); |
f4507164 | 883 | error = __nbd_ioctl(bdev, nbd, cmd, arg); |
9561a7ad | 884 | mutex_unlock(&nbd->config_lock); |
1a2ad211 PM |
885 | |
886 | return error; | |
1da177e4 LT |
887 | } |
888 | ||
83d5cde4 | 889 | static const struct block_device_operations nbd_fops = |
1da177e4 LT |
890 | { |
891 | .owner = THIS_MODULE, | |
8a6cfeb6 | 892 | .ioctl = nbd_ioctl, |
263a3df1 | 893 | .compat_ioctl = nbd_ioctl, |
1da177e4 LT |
894 | }; |
895 | ||
30d53d9c MP |
896 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
897 | ||
898 | static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) | |
899 | { | |
900 | struct nbd_device *nbd = s->private; | |
901 | ||
902 | if (nbd->task_recv) | |
903 | seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); | |
30d53d9c MP |
904 | |
905 | return 0; | |
906 | } | |
907 | ||
908 | static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) | |
909 | { | |
910 | return single_open(file, nbd_dbg_tasks_show, inode->i_private); | |
911 | } | |
912 | ||
913 | static const struct file_operations nbd_dbg_tasks_ops = { | |
914 | .open = nbd_dbg_tasks_open, | |
915 | .read = seq_read, | |
916 | .llseek = seq_lseek, | |
917 | .release = single_release, | |
918 | }; | |
919 | ||
920 | static int nbd_dbg_flags_show(struct seq_file *s, void *unused) | |
921 | { | |
922 | struct nbd_device *nbd = s->private; | |
923 | u32 flags = nbd->flags; | |
924 | ||
925 | seq_printf(s, "Hex: 0x%08x\n\n", flags); | |
926 | ||
927 | seq_puts(s, "Known flags:\n"); | |
928 | ||
929 | if (flags & NBD_FLAG_HAS_FLAGS) | |
930 | seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); | |
931 | if (flags & NBD_FLAG_READ_ONLY) | |
932 | seq_puts(s, "NBD_FLAG_READ_ONLY\n"); | |
933 | if (flags & NBD_FLAG_SEND_FLUSH) | |
934 | seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); | |
935 | if (flags & NBD_FLAG_SEND_TRIM) | |
936 | seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); | |
937 | ||
938 | return 0; | |
939 | } | |
940 | ||
941 | static int nbd_dbg_flags_open(struct inode *inode, struct file *file) | |
942 | { | |
943 | return single_open(file, nbd_dbg_flags_show, inode->i_private); | |
944 | } | |
945 | ||
946 | static const struct file_operations nbd_dbg_flags_ops = { | |
947 | .open = nbd_dbg_flags_open, | |
948 | .read = seq_read, | |
949 | .llseek = seq_lseek, | |
950 | .release = single_release, | |
951 | }; | |
952 | ||
953 | static int nbd_dev_dbg_init(struct nbd_device *nbd) | |
954 | { | |
955 | struct dentry *dir; | |
27ea43fe MP |
956 | |
957 | if (!nbd_dbg_dir) | |
958 | return -EIO; | |
30d53d9c MP |
959 | |
960 | dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); | |
27ea43fe MP |
961 | if (!dir) { |
962 | dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", | |
963 | nbd_name(nbd)); | |
964 | return -EIO; | |
30d53d9c MP |
965 | } |
966 | nbd->dbg_dir = dir; | |
967 | ||
27ea43fe MP |
968 | debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); |
969 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); | |
0eadf37a | 970 | debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); |
ef77b515 | 971 | debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize); |
d366a0ff | 972 | debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); |
30d53d9c MP |
973 | |
974 | return 0; | |
975 | } | |
976 | ||
977 | static void nbd_dev_dbg_close(struct nbd_device *nbd) | |
978 | { | |
979 | debugfs_remove_recursive(nbd->dbg_dir); | |
980 | } | |
981 | ||
982 | static int nbd_dbg_init(void) | |
983 | { | |
984 | struct dentry *dbg_dir; | |
985 | ||
986 | dbg_dir = debugfs_create_dir("nbd", NULL); | |
27ea43fe MP |
987 | if (!dbg_dir) |
988 | return -EIO; | |
30d53d9c MP |
989 | |
990 | nbd_dbg_dir = dbg_dir; | |
991 | ||
992 | return 0; | |
993 | } | |
994 | ||
995 | static void nbd_dbg_close(void) | |
996 | { | |
997 | debugfs_remove_recursive(nbd_dbg_dir); | |
998 | } | |
999 | ||
1000 | #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ | |
1001 | ||
1002 | static int nbd_dev_dbg_init(struct nbd_device *nbd) | |
1003 | { | |
1004 | return 0; | |
1005 | } | |
1006 | ||
1007 | static void nbd_dev_dbg_close(struct nbd_device *nbd) | |
1008 | { | |
1009 | } | |
1010 | ||
1011 | static int nbd_dbg_init(void) | |
1012 | { | |
1013 | return 0; | |
1014 | } | |
1015 | ||
1016 | static void nbd_dbg_close(void) | |
1017 | { | |
1018 | } | |
1019 | ||
1020 | #endif | |
1021 | ||
fd8383fd JB |
1022 | static int nbd_init_request(void *data, struct request *rq, |
1023 | unsigned int hctx_idx, unsigned int request_idx, | |
1024 | unsigned int numa_node) | |
1025 | { | |
1026 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); | |
fd8383fd | 1027 | cmd->nbd = data; |
fd8383fd JB |
1028 | return 0; |
1029 | } | |
1030 | ||
1031 | static struct blk_mq_ops nbd_mq_ops = { | |
1032 | .queue_rq = nbd_queue_rq, | |
fd8383fd | 1033 | .init_request = nbd_init_request, |
0eadf37a | 1034 | .timeout = nbd_xmit_timeout, |
fd8383fd JB |
1035 | }; |
1036 | ||
b0d9111a JB |
1037 | static void nbd_dev_remove(struct nbd_device *nbd) |
1038 | { | |
1039 | struct gendisk *disk = nbd->disk; | |
1040 | nbd->magic = 0; | |
1041 | if (disk) { | |
1042 | del_gendisk(disk); | |
1043 | blk_cleanup_queue(disk->queue); | |
1044 | blk_mq_free_tag_set(&nbd->tag_set); | |
1045 | put_disk(disk); | |
1046 | } | |
1047 | kfree(nbd); | |
1048 | } | |
1049 | ||
1050 | static int nbd_dev_add(int index) | |
1051 | { | |
1052 | struct nbd_device *nbd; | |
1053 | struct gendisk *disk; | |
1054 | struct request_queue *q; | |
1055 | int err = -ENOMEM; | |
1056 | ||
1057 | nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); | |
1058 | if (!nbd) | |
1059 | goto out; | |
1060 | ||
1061 | disk = alloc_disk(1 << part_shift); | |
1062 | if (!disk) | |
1063 | goto out_free_nbd; | |
1064 | ||
1065 | if (index >= 0) { | |
1066 | err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, | |
1067 | GFP_KERNEL); | |
1068 | if (err == -ENOSPC) | |
1069 | err = -EEXIST; | |
1070 | } else { | |
1071 | err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); | |
1072 | if (err >= 0) | |
1073 | index = err; | |
1074 | } | |
1075 | if (err < 0) | |
1076 | goto out_free_disk; | |
1077 | ||
1078 | nbd->disk = disk; | |
1079 | nbd->tag_set.ops = &nbd_mq_ops; | |
1080 | nbd->tag_set.nr_hw_queues = 1; | |
1081 | nbd->tag_set.queue_depth = 128; | |
1082 | nbd->tag_set.numa_node = NUMA_NO_NODE; | |
1083 | nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); | |
1084 | nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | | |
1085 | BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; | |
1086 | nbd->tag_set.driver_data = nbd; | |
1087 | ||
1088 | err = blk_mq_alloc_tag_set(&nbd->tag_set); | |
1089 | if (err) | |
1090 | goto out_free_idr; | |
1091 | ||
1092 | q = blk_mq_init_queue(&nbd->tag_set); | |
1093 | if (IS_ERR(q)) { | |
1094 | err = PTR_ERR(q); | |
1095 | goto out_free_tags; | |
1096 | } | |
1097 | disk->queue = q; | |
1098 | ||
1099 | /* | |
1100 | * Tell the block layer that we are not a rotational device | |
1101 | */ | |
1102 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); | |
1103 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); | |
1104 | disk->queue->limits.discard_granularity = 512; | |
1105 | blk_queue_max_discard_sectors(disk->queue, UINT_MAX); | |
1106 | disk->queue->limits.discard_zeroes_data = 0; | |
1107 | blk_queue_max_hw_sectors(disk->queue, 65536); | |
1108 | disk->queue->limits.max_sectors = 256; | |
1109 | ||
1110 | nbd->magic = NBD_MAGIC; | |
1111 | mutex_init(&nbd->config_lock); | |
1112 | disk->major = NBD_MAJOR; | |
1113 | disk->first_minor = index << part_shift; | |
1114 | disk->fops = &nbd_fops; | |
1115 | disk->private_data = nbd; | |
1116 | sprintf(disk->disk_name, "nbd%d", index); | |
1117 | init_waitqueue_head(&nbd->recv_wq); | |
1118 | nbd_reset(nbd); | |
1119 | add_disk(disk); | |
1120 | return index; | |
1121 | ||
1122 | out_free_tags: | |
1123 | blk_mq_free_tag_set(&nbd->tag_set); | |
1124 | out_free_idr: | |
1125 | idr_remove(&nbd_index_idr, index); | |
1126 | out_free_disk: | |
1127 | put_disk(disk); | |
1128 | out_free_nbd: | |
1129 | kfree(nbd); | |
1130 | out: | |
1131 | return err; | |
1132 | } | |
1133 | ||
1da177e4 LT |
1134 | /* |
1135 | * And here should be modules and kernel interface | |
1136 | * (Just smiley confuses emacs :-) | |
1137 | */ | |
1138 | ||
1139 | static int __init nbd_init(void) | |
1140 | { | |
1da177e4 LT |
1141 | int i; |
1142 | ||
5b7b18cc | 1143 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); |
1da177e4 | 1144 | |
d71a6d73 | 1145 | if (max_part < 0) { |
7742ce4a | 1146 | printk(KERN_ERR "nbd: max_part must be >= 0\n"); |
d71a6d73 LV |
1147 | return -EINVAL; |
1148 | } | |
1149 | ||
1150 | part_shift = 0; | |
5988ce23 | 1151 | if (max_part > 0) { |
d71a6d73 LV |
1152 | part_shift = fls(max_part); |
1153 | ||
5988ce23 NK |
1154 | /* |
1155 | * Adjust max_part according to part_shift as it is exported | |
1156 | * to user space so that user can know the max number of | |
1157 | * partition kernel should be able to manage. | |
1158 | * | |
1159 | * Note that -1 is required because partition 0 is reserved | |
1160 | * for the whole disk. | |
1161 | */ | |
1162 | max_part = (1UL << part_shift) - 1; | |
1163 | } | |
1164 | ||
3b271082 NK |
1165 | if ((1UL << part_shift) > DISK_MAX_PARTS) |
1166 | return -EINVAL; | |
1167 | ||
1168 | if (nbds_max > 1UL << (MINORBITS - part_shift)) | |
1169 | return -EINVAL; | |
124d6db0 JB |
1170 | recv_workqueue = alloc_workqueue("knbd-recv", |
1171 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | |
1172 | if (!recv_workqueue) | |
1173 | return -ENOMEM; | |
3b271082 | 1174 | |
6330a2d0 JB |
1175 | if (register_blkdev(NBD_MAJOR, "nbd")) { |
1176 | destroy_workqueue(recv_workqueue); | |
b0d9111a | 1177 | return -EIO; |
6330a2d0 | 1178 | } |
1da177e4 | 1179 | |
30d53d9c MP |
1180 | nbd_dbg_init(); |
1181 | ||
b0d9111a JB |
1182 | mutex_lock(&nbd_index_mutex); |
1183 | for (i = 0; i < nbds_max; i++) | |
1184 | nbd_dev_add(i); | |
1185 | mutex_unlock(&nbd_index_mutex); | |
1186 | return 0; | |
1187 | } | |
1da177e4 | 1188 | |
b0d9111a JB |
1189 | static int nbd_exit_cb(int id, void *ptr, void *data) |
1190 | { | |
1191 | struct nbd_device *nbd = ptr; | |
1192 | nbd_dev_remove(nbd); | |
1da177e4 | 1193 | return 0; |
1da177e4 LT |
1194 | } |
1195 | ||
1196 | static void __exit nbd_cleanup(void) | |
1197 | { | |
30d53d9c MP |
1198 | nbd_dbg_close(); |
1199 | ||
b0d9111a JB |
1200 | idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL); |
1201 | idr_destroy(&nbd_index_idr); | |
124d6db0 | 1202 | destroy_workqueue(recv_workqueue); |
1da177e4 | 1203 | unregister_blkdev(NBD_MAJOR, "nbd"); |
1da177e4 LT |
1204 | } |
1205 | ||
1206 | module_init(nbd_init); | |
1207 | module_exit(nbd_cleanup); | |
1208 | ||
1209 | MODULE_DESCRIPTION("Network Block Device"); | |
1210 | MODULE_LICENSE("GPL"); | |
1211 | ||
40be0c28 | 1212 | module_param(nbds_max, int, 0444); |
d71a6d73 LV |
1213 | MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); |
1214 | module_param(max_part, int, 0444); | |
1215 | MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); |