]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Network block device - make block devices work over TCP | |
3 | * | |
4 | * Note that you can not swap over this thing, yet. Seems to work but | |
5 | * deadlocks sometimes - you can not swap over TCP in general. | |
6 | * | |
a2531293 | 7 | * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
8 | * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> |
9 | * | |
dbf492d6 | 10 | * This file is released under GPLv2 or later. |
1da177e4 | 11 | * |
dbf492d6 | 12 | * (part of code stolen from loop.c) |
1da177e4 LT |
13 | */ |
14 | ||
15 | #include <linux/major.h> | |
16 | ||
17 | #include <linux/blkdev.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/bio.h> | |
23 | #include <linux/stat.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/file.h> | |
26 | #include <linux/ioctl.h> | |
2a48fc0a | 27 | #include <linux/mutex.h> |
4b2f0260 HX |
28 | #include <linux/compiler.h> |
29 | #include <linux/err.h> | |
30 | #include <linux/kernel.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
1da177e4 | 32 | #include <net/sock.h> |
91cf45f0 | 33 | #include <linux/net.h> |
48cf6061 | 34 | #include <linux/kthread.h> |
b9c495bb | 35 | #include <linux/types.h> |
30d53d9c | 36 | #include <linux/debugfs.h> |
fd8383fd | 37 | #include <linux/blk-mq.h> |
1da177e4 | 38 | |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <asm/types.h> |
41 | ||
42 | #include <linux/nbd.h> | |
43 | ||
b0d9111a JB |
44 | static DEFINE_IDR(nbd_index_idr); |
45 | static DEFINE_MUTEX(nbd_index_mutex); | |
46 | ||
9561a7ad JB |
47 | struct nbd_sock { |
48 | struct socket *sock; | |
49 | struct mutex tx_lock; | |
9dd5d3ab JB |
50 | struct request *pending; |
51 | int sent; | |
9561a7ad JB |
52 | }; |
53 | ||
9b4a6ba9 JB |
54 | #define NBD_TIMEDOUT 0 |
55 | #define NBD_DISCONNECT_REQUESTED 1 | |
9561a7ad JB |
56 | #define NBD_DISCONNECTED 2 |
57 | #define NBD_RUNNING 3 | |
9b4a6ba9 | 58 | |
13e71d69 | 59 | struct nbd_device { |
22d109c1 | 60 | u32 flags; |
9b4a6ba9 | 61 | unsigned long runtime_flags; |
9561a7ad | 62 | struct nbd_sock **socks; |
13e71d69 MP |
63 | int magic; |
64 | ||
fd8383fd | 65 | struct blk_mq_tag_set tag_set; |
13e71d69 | 66 | |
9561a7ad | 67 | struct mutex config_lock; |
13e71d69 | 68 | struct gendisk *disk; |
9561a7ad JB |
69 | int num_connections; |
70 | atomic_t recv_threads; | |
71 | wait_queue_head_t recv_wq; | |
ef77b515 | 72 | loff_t blksize; |
b9c495bb | 73 | loff_t bytesize; |
7e2893a1 | 74 | |
7e2893a1 | 75 | struct task_struct *task_recv; |
9561a7ad | 76 | struct task_struct *task_setup; |
30d53d9c MP |
77 | |
78 | #if IS_ENABLED(CONFIG_DEBUG_FS) | |
79 | struct dentry *dbg_dir; | |
80 | #endif | |
13e71d69 MP |
81 | }; |
82 | ||
fd8383fd JB |
83 | struct nbd_cmd { |
84 | struct nbd_device *nbd; | |
9561a7ad | 85 | struct completion send_complete; |
fd8383fd JB |
86 | }; |
87 | ||
30d53d9c MP |
88 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
89 | static struct dentry *nbd_dbg_dir; | |
90 | #endif | |
91 | ||
92 | #define nbd_name(nbd) ((nbd)->disk->disk_name) | |
93 | ||
f4507164 | 94 | #define NBD_MAGIC 0x68797548 |
1da177e4 | 95 | |
9c7a4169 | 96 | static unsigned int nbds_max = 16; |
d71a6d73 | 97 | static int max_part; |
124d6db0 | 98 | static struct workqueue_struct *recv_workqueue; |
b0d9111a | 99 | static int part_shift; |
1da177e4 | 100 | |
9442b739 JB |
101 | static int nbd_dev_dbg_init(struct nbd_device *nbd); |
102 | static void nbd_dev_dbg_close(struct nbd_device *nbd); | |
103 | ||
104 | ||
d18509f5 | 105 | static inline struct device *nbd_to_dev(struct nbd_device *nbd) |
1da177e4 | 106 | { |
d18509f5 | 107 | return disk_to_dev(nbd->disk); |
1da177e4 LT |
108 | } |
109 | ||
37091fdd MP |
110 | static bool nbd_is_connected(struct nbd_device *nbd) |
111 | { | |
112 | return !!nbd->task_recv; | |
113 | } | |
114 | ||
1da177e4 LT |
115 | static const char *nbdcmd_to_ascii(int cmd) |
116 | { | |
117 | switch (cmd) { | |
118 | case NBD_CMD_READ: return "read"; | |
119 | case NBD_CMD_WRITE: return "write"; | |
120 | case NBD_CMD_DISC: return "disconnect"; | |
75f187ab | 121 | case NBD_CMD_FLUSH: return "flush"; |
a336d298 | 122 | case NBD_CMD_TRIM: return "trim/discard"; |
1da177e4 LT |
123 | } |
124 | return "invalid"; | |
125 | } | |
1da177e4 | 126 | |
37091fdd MP |
127 | static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) |
128 | { | |
abbbdf12 RMB |
129 | if (bdev->bd_openers <= 1) |
130 | bd_set_size(bdev, 0); | |
37091fdd MP |
131 | set_capacity(nbd->disk, 0); |
132 | kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); | |
133 | ||
134 | return 0; | |
135 | } | |
136 | ||
137 | static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev) | |
138 | { | |
e544541b JB |
139 | blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize); |
140 | blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize); | |
141 | bd_set_size(bdev, nbd->bytesize); | |
37091fdd MP |
142 | set_capacity(nbd->disk, nbd->bytesize >> 9); |
143 | kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); | |
144 | } | |
145 | ||
e544541b | 146 | static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev, |
ef77b515 | 147 | loff_t blocksize, loff_t nr_blocks) |
37091fdd | 148 | { |
37091fdd | 149 | nbd->blksize = blocksize; |
ef77b515 | 150 | nbd->bytesize = blocksize * nr_blocks; |
e544541b JB |
151 | if (nbd_is_connected(nbd)) |
152 | nbd_size_update(nbd, bdev); | |
37091fdd MP |
153 | } |
154 | ||
fd8383fd | 155 | static void nbd_end_request(struct nbd_cmd *cmd) |
1da177e4 | 156 | { |
fd8383fd JB |
157 | struct nbd_device *nbd = cmd->nbd; |
158 | struct request *req = blk_mq_rq_from_pdu(cmd); | |
097c94a4 | 159 | int error = req->errors ? -EIO : 0; |
1da177e4 | 160 | |
fd8383fd | 161 | dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd, |
d18509f5 | 162 | error ? "failed" : "done"); |
1da177e4 | 163 | |
fd8383fd | 164 | blk_mq_complete_request(req, error); |
1da177e4 LT |
165 | } |
166 | ||
e018e757 MP |
167 | /* |
168 | * Forcibly shutdown the socket causing all listeners to error | |
169 | */ | |
36e47bee | 170 | static void sock_shutdown(struct nbd_device *nbd) |
7fdfd406 | 171 | { |
9561a7ad | 172 | int i; |
23272a67 | 173 | |
9561a7ad JB |
174 | if (nbd->num_connections == 0) |
175 | return; | |
176 | if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) | |
260bbce4 | 177 | return; |
23272a67 | 178 | |
9561a7ad JB |
179 | for (i = 0; i < nbd->num_connections; i++) { |
180 | struct nbd_sock *nsock = nbd->socks[i]; | |
181 | mutex_lock(&nsock->tx_lock); | |
182 | kernel_sock_shutdown(nsock->sock, SHUT_RDWR); | |
183 | mutex_unlock(&nsock->tx_lock); | |
184 | } | |
185 | dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); | |
7fdfd406 PC |
186 | } |
187 | ||
0eadf37a JB |
188 | static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, |
189 | bool reserved) | |
7fdfd406 | 190 | { |
0eadf37a JB |
191 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); |
192 | struct nbd_device *nbd = cmd->nbd; | |
dcc909d9 | 193 | |
9561a7ad | 194 | dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); |
9b4a6ba9 | 195 | set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); |
c103b4da | 196 | req->errors = -EIO; |
9561a7ad | 197 | |
9561a7ad JB |
198 | mutex_lock(&nbd->config_lock); |
199 | sock_shutdown(nbd); | |
200 | mutex_unlock(&nbd->config_lock); | |
0eadf37a | 201 | return BLK_EH_HANDLED; |
7fdfd406 PC |
202 | } |
203 | ||
1da177e4 LT |
204 | /* |
205 | * Send or receive packet. | |
206 | */ | |
c9f2b6ae | 207 | static int sock_xmit(struct nbd_device *nbd, int index, int send, |
9dd5d3ab | 208 | struct iov_iter *iter, int msg_flags, int *sent) |
1da177e4 | 209 | { |
9561a7ad | 210 | struct socket *sock = nbd->socks[index]->sock; |
1da177e4 LT |
211 | int result; |
212 | struct msghdr msg; | |
7f338fe4 | 213 | unsigned long pflags = current->flags; |
1da177e4 | 214 | |
ffc41cf8 | 215 | if (unlikely(!sock)) { |
a897b666 | 216 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
7f1b90f9 WC |
217 | "Attempted %s on closed socket in sock_xmit\n", |
218 | (send ? "send" : "recv")); | |
ffc41cf8 MS |
219 | return -EINVAL; |
220 | } | |
221 | ||
c9f2b6ae | 222 | msg.msg_iter = *iter; |
c1696cab | 223 | |
7f338fe4 | 224 | current->flags |= PF_MEMALLOC; |
1da177e4 | 225 | do { |
7f338fe4 | 226 | sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; |
1da177e4 LT |
227 | msg.msg_name = NULL; |
228 | msg.msg_namelen = 0; | |
229 | msg.msg_control = NULL; | |
230 | msg.msg_controllen = 0; | |
1da177e4 LT |
231 | msg.msg_flags = msg_flags | MSG_NOSIGNAL; |
232 | ||
7e2893a1 | 233 | if (send) |
c1696cab | 234 | result = sock_sendmsg(sock, &msg); |
7e2893a1 | 235 | else |
c1696cab | 236 | result = sock_recvmsg(sock, &msg, msg.msg_flags); |
1da177e4 | 237 | |
1da177e4 LT |
238 | if (result <= 0) { |
239 | if (result == 0) | |
240 | result = -EPIPE; /* short read */ | |
241 | break; | |
242 | } | |
9dd5d3ab JB |
243 | if (sent) |
244 | *sent += result; | |
c1696cab | 245 | } while (msg_data_left(&msg)); |
1da177e4 | 246 | |
7f338fe4 | 247 | tsk_restore_flags(current, pflags, PF_MEMALLOC); |
1da177e4 LT |
248 | |
249 | return result; | |
250 | } | |
251 | ||
7fdfd406 | 252 | /* always call with the tx_lock held */ |
9561a7ad | 253 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) |
1da177e4 | 254 | { |
fd8383fd | 255 | struct request *req = blk_mq_rq_from_pdu(cmd); |
9dd5d3ab | 256 | struct nbd_sock *nsock = nbd->socks[index]; |
d61b7f97 | 257 | int result; |
c9f2b6ae AV |
258 | struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; |
259 | struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; | |
260 | struct iov_iter from; | |
1011c1b9 | 261 | unsigned long size = blk_rq_bytes(req); |
429a787b | 262 | struct bio *bio; |
9dc6c806 | 263 | u32 type; |
9561a7ad | 264 | u32 tag = blk_mq_unique_tag(req); |
9dd5d3ab | 265 | int sent = nsock->sent, skip = 0; |
9dc6c806 | 266 | |
c9f2b6ae AV |
267 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
268 | ||
aebf526b CH |
269 | switch (req_op(req)) { |
270 | case REQ_OP_DISCARD: | |
9dc6c806 | 271 | type = NBD_CMD_TRIM; |
aebf526b CH |
272 | break; |
273 | case REQ_OP_FLUSH: | |
9dc6c806 | 274 | type = NBD_CMD_FLUSH; |
aebf526b CH |
275 | break; |
276 | case REQ_OP_WRITE: | |
9dc6c806 | 277 | type = NBD_CMD_WRITE; |
aebf526b CH |
278 | break; |
279 | case REQ_OP_READ: | |
9dc6c806 | 280 | type = NBD_CMD_READ; |
aebf526b CH |
281 | break; |
282 | default: | |
283 | return -EIO; | |
284 | } | |
1da177e4 | 285 | |
09fc54cc CH |
286 | if (rq_data_dir(req) == WRITE && |
287 | (nbd->flags & NBD_FLAG_READ_ONLY)) { | |
288 | dev_err_ratelimited(disk_to_dev(nbd->disk), | |
289 | "Write on read-only\n"); | |
290 | return -EIO; | |
291 | } | |
292 | ||
9dd5d3ab JB |
293 | /* We did a partial send previously, and we at least sent the whole |
294 | * request struct, so just go and send the rest of the pages in the | |
295 | * request. | |
296 | */ | |
297 | if (sent) { | |
298 | if (sent >= sizeof(request)) { | |
299 | skip = sent - sizeof(request); | |
300 | goto send_pages; | |
301 | } | |
302 | iov_iter_advance(&from, sent); | |
303 | } | |
9dc6c806 | 304 | request.type = htonl(type); |
9561a7ad | 305 | if (type != NBD_CMD_FLUSH) { |
75f187ab AB |
306 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); |
307 | request.len = htonl(size); | |
308 | } | |
9561a7ad | 309 | memcpy(request.handle, &tag, sizeof(tag)); |
1da177e4 | 310 | |
d18509f5 | 311 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", |
fd8383fd | 312 | cmd, nbdcmd_to_ascii(type), |
d18509f5 | 313 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); |
c9f2b6ae | 314 | result = sock_xmit(nbd, index, 1, &from, |
9dd5d3ab | 315 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); |
1da177e4 | 316 | if (result <= 0) { |
9dd5d3ab JB |
317 | if (result == -ERESTARTSYS) { |
318 | /* If we havne't sent anything we can just return BUSY, | |
319 | * however if we have sent something we need to make | |
320 | * sure we only allow this req to be sent until we are | |
321 | * completely done. | |
322 | */ | |
323 | if (sent) { | |
324 | nsock->pending = req; | |
325 | nsock->sent = sent; | |
326 | } | |
327 | return BLK_MQ_RQ_QUEUE_BUSY; | |
328 | } | |
a897b666 | 329 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
7f1b90f9 | 330 | "Send control failed (result %d)\n", result); |
dab5313a | 331 | return -EIO; |
1da177e4 | 332 | } |
9dd5d3ab | 333 | send_pages: |
429a787b | 334 | if (type != NBD_CMD_WRITE) |
9dd5d3ab | 335 | goto out; |
429a787b | 336 | |
429a787b JA |
337 | bio = req->bio; |
338 | while (bio) { | |
339 | struct bio *next = bio->bi_next; | |
340 | struct bvec_iter iter; | |
7988613b | 341 | struct bio_vec bvec; |
429a787b JA |
342 | |
343 | bio_for_each_segment(bvec, bio, iter) { | |
344 | bool is_last = !next && bio_iter_last(bvec, iter); | |
d61b7f97 | 345 | int flags = is_last ? 0 : MSG_MORE; |
429a787b | 346 | |
d18509f5 | 347 | dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", |
fd8383fd | 348 | cmd, bvec.bv_len); |
c9f2b6ae AV |
349 | iov_iter_bvec(&from, ITER_BVEC | WRITE, |
350 | &bvec, 1, bvec.bv_len); | |
9dd5d3ab JB |
351 | if (skip) { |
352 | if (skip >= iov_iter_count(&from)) { | |
353 | skip -= iov_iter_count(&from); | |
354 | continue; | |
355 | } | |
356 | iov_iter_advance(&from, skip); | |
357 | skip = 0; | |
358 | } | |
359 | result = sock_xmit(nbd, index, 1, &from, flags, &sent); | |
6c92e699 | 360 | if (result <= 0) { |
9dd5d3ab JB |
361 | if (result == -ERESTARTSYS) { |
362 | /* We've already sent the header, we | |
363 | * have no choice but to set pending and | |
364 | * return BUSY. | |
365 | */ | |
366 | nsock->pending = req; | |
367 | nsock->sent = sent; | |
368 | return BLK_MQ_RQ_QUEUE_BUSY; | |
369 | } | |
f4507164 | 370 | dev_err(disk_to_dev(nbd->disk), |
7f1b90f9 WC |
371 | "Send data failed (result %d)\n", |
372 | result); | |
dab5313a | 373 | return -EIO; |
6c92e699 | 374 | } |
429a787b JA |
375 | /* |
376 | * The completion might already have come in, | |
377 | * so break for the last one instead of letting | |
378 | * the iterator do it. This prevents use-after-free | |
379 | * of the bio. | |
380 | */ | |
381 | if (is_last) | |
382 | break; | |
1da177e4 | 383 | } |
429a787b | 384 | bio = next; |
1da177e4 | 385 | } |
9dd5d3ab JB |
386 | out: |
387 | nsock->pending = NULL; | |
388 | nsock->sent = 0; | |
1da177e4 | 389 | return 0; |
1da177e4 LT |
390 | } |
391 | ||
1da177e4 | 392 | /* NULL returned = something went wrong, inform userspace */ |
9561a7ad | 393 | static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) |
1da177e4 LT |
394 | { |
395 | int result; | |
396 | struct nbd_reply reply; | |
fd8383fd JB |
397 | struct nbd_cmd *cmd; |
398 | struct request *req = NULL; | |
399 | u16 hwq; | |
9561a7ad | 400 | u32 tag; |
c9f2b6ae AV |
401 | struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; |
402 | struct iov_iter to; | |
1da177e4 LT |
403 | |
404 | reply.magic = 0; | |
c9f2b6ae | 405 | iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); |
9dd5d3ab | 406 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); |
1da177e4 | 407 | if (result <= 0) { |
9561a7ad JB |
408 | if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && |
409 | !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) | |
410 | dev_err(disk_to_dev(nbd->disk), | |
411 | "Receive control failed (result %d)\n", result); | |
19391830 | 412 | return ERR_PTR(result); |
1da177e4 | 413 | } |
e4b57e08 MF |
414 | |
415 | if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { | |
f4507164 | 416 | dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", |
e4b57e08 | 417 | (unsigned long)ntohl(reply.magic)); |
19391830 | 418 | return ERR_PTR(-EPROTO); |
e4b57e08 MF |
419 | } |
420 | ||
9561a7ad | 421 | memcpy(&tag, reply.handle, sizeof(u32)); |
4b2f0260 | 422 | |
fd8383fd JB |
423 | hwq = blk_mq_unique_tag_to_hwq(tag); |
424 | if (hwq < nbd->tag_set.nr_hw_queues) | |
425 | req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], | |
426 | blk_mq_unique_tag_to_tag(tag)); | |
427 | if (!req || !blk_mq_request_started(req)) { | |
428 | dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", | |
429 | tag, req); | |
430 | return ERR_PTR(-ENOENT); | |
1da177e4 | 431 | } |
fd8383fd | 432 | cmd = blk_mq_rq_to_pdu(req); |
1da177e4 | 433 | if (ntohl(reply.error)) { |
f4507164 | 434 | dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", |
7f1b90f9 | 435 | ntohl(reply.error)); |
c103b4da | 436 | req->errors = -EIO; |
fd8383fd | 437 | return cmd; |
1da177e4 LT |
438 | } |
439 | ||
fd8383fd | 440 | dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); |
9dc6c806 | 441 | if (rq_data_dir(req) != WRITE) { |
5705f702 | 442 | struct req_iterator iter; |
7988613b | 443 | struct bio_vec bvec; |
5705f702 N |
444 | |
445 | rq_for_each_segment(bvec, req, iter) { | |
c9f2b6ae AV |
446 | iov_iter_bvec(&to, ITER_BVEC | READ, |
447 | &bvec, 1, bvec.bv_len); | |
9dd5d3ab | 448 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); |
6c92e699 | 449 | if (result <= 0) { |
f4507164 | 450 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", |
7f1b90f9 | 451 | result); |
c103b4da | 452 | req->errors = -EIO; |
fd8383fd | 453 | return cmd; |
6c92e699 | 454 | } |
d18509f5 | 455 | dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", |
fd8383fd | 456 | cmd, bvec.bv_len); |
1da177e4 | 457 | } |
9561a7ad JB |
458 | } else { |
459 | /* See the comment in nbd_queue_rq. */ | |
460 | wait_for_completion(&cmd->send_complete); | |
1da177e4 | 461 | } |
fd8383fd | 462 | return cmd; |
1da177e4 LT |
463 | } |
464 | ||
edfaa7c3 KS |
465 | static ssize_t pid_show(struct device *dev, |
466 | struct device_attribute *attr, char *buf) | |
6b39bb65 | 467 | { |
edfaa7c3 | 468 | struct gendisk *disk = dev_to_disk(dev); |
6521d39a | 469 | struct nbd_device *nbd = (struct nbd_device *)disk->private_data; |
edfaa7c3 | 470 | |
6521d39a | 471 | return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); |
6b39bb65 PC |
472 | } |
473 | ||
edfaa7c3 | 474 | static struct device_attribute pid_attr = { |
01e8ef11 | 475 | .attr = { .name = "pid", .mode = S_IRUGO}, |
6b39bb65 PC |
476 | .show = pid_show, |
477 | }; | |
478 | ||
9561a7ad JB |
479 | struct recv_thread_args { |
480 | struct work_struct work; | |
481 | struct nbd_device *nbd; | |
482 | int index; | |
483 | }; | |
484 | ||
485 | static void recv_work(struct work_struct *work) | |
1da177e4 | 486 | { |
9561a7ad JB |
487 | struct recv_thread_args *args = container_of(work, |
488 | struct recv_thread_args, | |
489 | work); | |
490 | struct nbd_device *nbd = args->nbd; | |
fd8383fd | 491 | struct nbd_cmd *cmd; |
9561a7ad | 492 | int ret = 0; |
1da177e4 | 493 | |
f4507164 | 494 | BUG_ON(nbd->magic != NBD_MAGIC); |
19391830 | 495 | while (1) { |
9561a7ad | 496 | cmd = nbd_read_stat(nbd, args->index); |
fd8383fd JB |
497 | if (IS_ERR(cmd)) { |
498 | ret = PTR_ERR(cmd); | |
19391830 MP |
499 | break; |
500 | } | |
501 | ||
fd8383fd | 502 | nbd_end_request(cmd); |
19391830 | 503 | } |
6b39bb65 | 504 | |
9561a7ad JB |
505 | /* |
506 | * We got an error, shut everybody down if this wasn't the result of a | |
507 | * disconnect request. | |
508 | */ | |
509 | if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) | |
510 | sock_shutdown(nbd); | |
511 | atomic_dec(&nbd->recv_threads); | |
512 | wake_up(&nbd->recv_wq); | |
1da177e4 LT |
513 | } |
514 | ||
fd8383fd | 515 | static void nbd_clear_req(struct request *req, void *data, bool reserved) |
1da177e4 | 516 | { |
fd8383fd | 517 | struct nbd_cmd *cmd; |
1da177e4 | 518 | |
fd8383fd JB |
519 | if (!blk_mq_request_started(req)) |
520 | return; | |
521 | cmd = blk_mq_rq_to_pdu(req); | |
c103b4da | 522 | req->errors = -EIO; |
fd8383fd JB |
523 | nbd_end_request(cmd); |
524 | } | |
525 | ||
526 | static void nbd_clear_que(struct nbd_device *nbd) | |
527 | { | |
f4507164 | 528 | BUG_ON(nbd->magic != NBD_MAGIC); |
1da177e4 | 529 | |
fd8383fd | 530 | blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); |
e78273c8 | 531 | dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); |
1da177e4 LT |
532 | } |
533 | ||
7fdfd406 | 534 | |
9dd5d3ab | 535 | static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
48cf6061 | 536 | { |
fd8383fd JB |
537 | struct request *req = blk_mq_rq_from_pdu(cmd); |
538 | struct nbd_device *nbd = cmd->nbd; | |
9561a7ad | 539 | struct nbd_sock *nsock; |
9dd5d3ab | 540 | int ret; |
fd8383fd | 541 | |
9561a7ad | 542 | if (index >= nbd->num_connections) { |
a897b666 JB |
543 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
544 | "Attempted send on invalid socket\n"); | |
9dd5d3ab | 545 | return -EINVAL; |
9561a7ad | 546 | } |
48cf6061 | 547 | |
9561a7ad | 548 | if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { |
a897b666 JB |
549 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
550 | "Attempted send on closed socket\n"); | |
9dd5d3ab | 551 | return -EINVAL; |
9561a7ad JB |
552 | } |
553 | ||
48cf6061 LV |
554 | req->errors = 0; |
555 | ||
9561a7ad JB |
556 | nsock = nbd->socks[index]; |
557 | mutex_lock(&nsock->tx_lock); | |
558 | if (unlikely(!nsock->sock)) { | |
559 | mutex_unlock(&nsock->tx_lock); | |
a897b666 JB |
560 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
561 | "Attempted send on closed socket\n"); | |
9dd5d3ab | 562 | return -EINVAL; |
48cf6061 LV |
563 | } |
564 | ||
9dd5d3ab JB |
565 | /* Handle the case that we have a pending request that was partially |
566 | * transmitted that _has_ to be serviced first. We need to call requeue | |
567 | * here so that it gets put _after_ the request that is already on the | |
568 | * dispatch list. | |
569 | */ | |
570 | if (unlikely(nsock->pending && nsock->pending != req)) { | |
571 | blk_mq_requeue_request(req, true); | |
572 | ret = 0; | |
573 | goto out; | |
48cf6061 | 574 | } |
9dd5d3ab JB |
575 | ret = nbd_send_cmd(nbd, cmd, index); |
576 | out: | |
9561a7ad | 577 | mutex_unlock(&nsock->tx_lock); |
9dd5d3ab | 578 | return ret; |
48cf6061 LV |
579 | } |
580 | ||
fd8383fd JB |
581 | static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, |
582 | const struct blk_mq_queue_data *bd) | |
1da177e4 | 583 | { |
fd8383fd | 584 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
9dd5d3ab | 585 | int ret; |
1da177e4 | 586 | |
9561a7ad JB |
587 | /* |
588 | * Since we look at the bio's to send the request over the network we | |
589 | * need to make sure the completion work doesn't mark this request done | |
590 | * before we are done doing our send. This keeps us from dereferencing | |
591 | * freed data if we have particularly fast completions (ie we get the | |
592 | * completion before we exit sock_xmit on the last bvec) or in the case | |
593 | * that the server is misbehaving (or there was an error) before we're | |
594 | * done sending everything over the wire. | |
595 | */ | |
596 | init_completion(&cmd->send_complete); | |
fd8383fd | 597 | blk_mq_start_request(bd->rq); |
9dd5d3ab JB |
598 | |
599 | /* We can be called directly from the user space process, which means we | |
600 | * could possibly have signals pending so our sendmsg will fail. In | |
601 | * this case we need to return that we are busy, otherwise error out as | |
602 | * appropriate. | |
603 | */ | |
604 | ret = nbd_handle_cmd(cmd, hctx->queue_num); | |
605 | if (ret < 0) | |
606 | ret = BLK_MQ_RQ_QUEUE_ERROR; | |
607 | if (!ret) | |
608 | ret = BLK_MQ_RQ_QUEUE_OK; | |
9561a7ad JB |
609 | complete(&cmd->send_complete); |
610 | ||
9dd5d3ab | 611 | return ret; |
1da177e4 LT |
612 | } |
613 | ||
9442b739 JB |
614 | static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, |
615 | unsigned long arg) | |
23272a67 | 616 | { |
9442b739 | 617 | struct socket *sock; |
9561a7ad JB |
618 | struct nbd_sock **socks; |
619 | struct nbd_sock *nsock; | |
9442b739 JB |
620 | int err; |
621 | ||
622 | sock = sockfd_lookup(arg, &err); | |
623 | if (!sock) | |
624 | return err; | |
23272a67 | 625 | |
9561a7ad JB |
626 | if (!nbd->task_setup) |
627 | nbd->task_setup = current; | |
628 | if (nbd->task_setup != current) { | |
629 | dev_err(disk_to_dev(nbd->disk), | |
630 | "Device being setup by another task"); | |
631 | return -EINVAL; | |
23272a67 MP |
632 | } |
633 | ||
9561a7ad JB |
634 | socks = krealloc(nbd->socks, (nbd->num_connections + 1) * |
635 | sizeof(struct nbd_sock *), GFP_KERNEL); | |
636 | if (!socks) | |
637 | return -ENOMEM; | |
638 | nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); | |
639 | if (!nsock) | |
640 | return -ENOMEM; | |
641 | ||
642 | nbd->socks = socks; | |
23272a67 | 643 | |
9561a7ad JB |
644 | mutex_init(&nsock->tx_lock); |
645 | nsock->sock = sock; | |
9dd5d3ab JB |
646 | nsock->pending = NULL; |
647 | nsock->sent = 0; | |
9561a7ad | 648 | socks[nbd->num_connections++] = nsock; |
23272a67 | 649 | |
9442b739 JB |
650 | if (max_part) |
651 | bdev->bd_invalidated = 1; | |
9561a7ad | 652 | return 0; |
23272a67 MP |
653 | } |
654 | ||
0e4f0f6f MP |
655 | /* Reset all properties of an NBD device */ |
656 | static void nbd_reset(struct nbd_device *nbd) | |
657 | { | |
9b4a6ba9 | 658 | nbd->runtime_flags = 0; |
0e4f0f6f MP |
659 | nbd->blksize = 1024; |
660 | nbd->bytesize = 0; | |
661 | set_capacity(nbd->disk, 0); | |
662 | nbd->flags = 0; | |
0eadf37a | 663 | nbd->tag_set.timeout = 0; |
0e4f0f6f | 664 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); |
0e4f0f6f MP |
665 | } |
666 | ||
667 | static void nbd_bdev_reset(struct block_device *bdev) | |
668 | { | |
abbbdf12 RMB |
669 | if (bdev->bd_openers > 1) |
670 | return; | |
0e4f0f6f MP |
671 | set_device_ro(bdev, false); |
672 | bdev->bd_inode->i_size = 0; | |
673 | if (max_part > 0) { | |
674 | blkdev_reread_part(bdev); | |
675 | bdev->bd_invalidated = 1; | |
676 | } | |
677 | } | |
678 | ||
d02cf531 MP |
679 | static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev) |
680 | { | |
681 | if (nbd->flags & NBD_FLAG_READ_ONLY) | |
682 | set_device_ro(bdev, true); | |
683 | if (nbd->flags & NBD_FLAG_SEND_TRIM) | |
684 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); | |
685 | if (nbd->flags & NBD_FLAG_SEND_FLUSH) | |
aafb1eec | 686 | blk_queue_write_cache(nbd->disk->queue, true, false); |
d02cf531 | 687 | else |
aafb1eec | 688 | blk_queue_write_cache(nbd->disk->queue, false, false); |
d02cf531 MP |
689 | } |
690 | ||
9561a7ad JB |
691 | static void send_disconnects(struct nbd_device *nbd) |
692 | { | |
c9f2b6ae AV |
693 | struct nbd_request request = { |
694 | .magic = htonl(NBD_REQUEST_MAGIC), | |
695 | .type = htonl(NBD_CMD_DISC), | |
696 | }; | |
697 | struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; | |
698 | struct iov_iter from; | |
9561a7ad JB |
699 | int i, ret; |
700 | ||
9561a7ad | 701 | for (i = 0; i < nbd->num_connections; i++) { |
c9f2b6ae | 702 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
9dd5d3ab | 703 | ret = sock_xmit(nbd, i, 1, &from, 0, NULL); |
9561a7ad JB |
704 | if (ret <= 0) |
705 | dev_err(disk_to_dev(nbd->disk), | |
706 | "Send disconnect failed %d\n", ret); | |
707 | } | |
708 | } | |
709 | ||
9442b739 JB |
710 | static int nbd_disconnect(struct nbd_device *nbd, struct block_device *bdev) |
711 | { | |
712 | dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); | |
713 | if (!nbd->socks) | |
714 | return -EINVAL; | |
30d53d9c | 715 | |
9442b739 JB |
716 | mutex_unlock(&nbd->config_lock); |
717 | fsync_bdev(bdev); | |
718 | mutex_lock(&nbd->config_lock); | |
719 | ||
720 | /* Check again after getting mutex back. */ | |
721 | if (!nbd->socks) | |
722 | return -EINVAL; | |
723 | ||
724 | if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, | |
725 | &nbd->runtime_flags)) | |
726 | send_disconnects(nbd); | |
727 | return 0; | |
728 | } | |
729 | ||
730 | static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev) | |
1a2ad211 | 731 | { |
9442b739 JB |
732 | sock_shutdown(nbd); |
733 | nbd_clear_que(nbd); | |
abbbdf12 RMB |
734 | |
735 | __invalidate_device(bdev, true); | |
9442b739 JB |
736 | nbd_bdev_reset(bdev); |
737 | /* | |
738 | * We want to give the run thread a chance to wait for everybody | |
739 | * to clean up and then do it's own cleanup. | |
740 | */ | |
741 | if (!test_bit(NBD_RUNNING, &nbd->runtime_flags) && | |
742 | nbd->num_connections) { | |
743 | int i; | |
744 | ||
6a8a2154 JB |
745 | for (i = 0; i < nbd->num_connections; i++) { |
746 | sockfd_put(nbd->socks[i]->sock); | |
9442b739 | 747 | kfree(nbd->socks[i]); |
6a8a2154 | 748 | } |
9442b739 JB |
749 | kfree(nbd->socks); |
750 | nbd->socks = NULL; | |
751 | nbd->num_connections = 0; | |
1a2ad211 | 752 | } |
9442b739 | 753 | nbd->task_setup = NULL; |
9561a7ad | 754 | |
9442b739 JB |
755 | return 0; |
756 | } | |
757 | ||
758 | static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev) | |
759 | { | |
760 | struct recv_thread_args *args; | |
761 | int num_connections = nbd->num_connections; | |
762 | int error = 0, i; | |
1a2ad211 | 763 | |
9442b739 JB |
764 | if (nbd->task_recv) |
765 | return -EBUSY; | |
766 | if (!nbd->socks) | |
767 | return -EINVAL; | |
768 | if (num_connections > 1 && | |
769 | !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) { | |
770 | dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); | |
771 | error = -EINVAL; | |
772 | goto out_err; | |
773 | } | |
23272a67 | 774 | |
9442b739 JB |
775 | set_bit(NBD_RUNNING, &nbd->runtime_flags); |
776 | blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections); | |
777 | args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL); | |
778 | if (!args) { | |
779 | error = -ENOMEM; | |
780 | goto out_err; | |
781 | } | |
782 | nbd->task_recv = current; | |
783 | mutex_unlock(&nbd->config_lock); | |
23272a67 | 784 | |
9442b739 | 785 | nbd_parse_flags(nbd, bdev); |
23272a67 | 786 | |
9442b739 JB |
787 | error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); |
788 | if (error) { | |
789 | dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); | |
790 | goto out_recv; | |
1a2ad211 PM |
791 | } |
792 | ||
9442b739 | 793 | nbd_size_update(nbd, bdev); |
37091fdd | 794 | |
9442b739 JB |
795 | nbd_dev_dbg_init(nbd); |
796 | for (i = 0; i < num_connections; i++) { | |
797 | sk_set_memalloc(nbd->socks[i]->sock->sk); | |
798 | atomic_inc(&nbd->recv_threads); | |
799 | INIT_WORK(&args[i].work, recv_work); | |
800 | args[i].nbd = nbd; | |
801 | args[i].index = i; | |
802 | queue_work(recv_workqueue, &args[i].work); | |
37091fdd | 803 | } |
9442b739 JB |
804 | wait_event_interruptible(nbd->recv_wq, |
805 | atomic_read(&nbd->recv_threads) == 0); | |
806 | for (i = 0; i < num_connections; i++) | |
807 | flush_work(&args[i].work); | |
808 | nbd_dev_dbg_close(nbd); | |
809 | nbd_size_clear(nbd, bdev); | |
810 | device_remove_file(disk_to_dev(nbd->disk), &pid_attr); | |
811 | out_recv: | |
812 | mutex_lock(&nbd->config_lock); | |
813 | nbd->task_recv = NULL; | |
814 | out_err: | |
815 | clear_bit(NBD_RUNNING, &nbd->runtime_flags); | |
816 | nbd_clear_sock(nbd, bdev); | |
817 | ||
818 | /* user requested, ignore socket errors */ | |
819 | if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) | |
820 | error = 0; | |
821 | if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags)) | |
822 | error = -ETIMEDOUT; | |
1a2ad211 | 823 | |
9442b739 JB |
824 | nbd_reset(nbd); |
825 | return error; | |
826 | } | |
827 | ||
828 | /* Must be called with config_lock held */ | |
829 | static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |
830 | unsigned int cmd, unsigned long arg) | |
831 | { | |
832 | switch (cmd) { | |
833 | case NBD_DISCONNECT: | |
834 | return nbd_disconnect(nbd, bdev); | |
835 | case NBD_CLEAR_SOCK: | |
836 | return nbd_clear_sock(nbd, bdev); | |
837 | case NBD_SET_SOCK: | |
838 | return nbd_add_socket(nbd, bdev, arg); | |
839 | case NBD_SET_BLKSIZE: | |
e544541b JB |
840 | nbd_size_set(nbd, bdev, arg, |
841 | div_s64(nbd->bytesize, arg)); | |
842 | return 0; | |
1da177e4 | 843 | case NBD_SET_SIZE: |
e544541b JB |
844 | nbd_size_set(nbd, bdev, nbd->blksize, |
845 | div_s64(arg, nbd->blksize)); | |
846 | return 0; | |
37091fdd | 847 | case NBD_SET_SIZE_BLOCKS: |
e544541b JB |
848 | nbd_size_set(nbd, bdev, nbd->blksize, arg); |
849 | return 0; | |
7fdfd406 | 850 | case NBD_SET_TIMEOUT: |
f8586855 JB |
851 | if (arg) { |
852 | nbd->tag_set.timeout = arg * HZ; | |
853 | blk_queue_rq_timeout(nbd->disk->queue, arg * HZ); | |
854 | } | |
7fdfd406 | 855 | return 0; |
1a2ad211 | 856 | |
2f012508 PC |
857 | case NBD_SET_FLAGS: |
858 | nbd->flags = arg; | |
859 | return 0; | |
9442b739 JB |
860 | case NBD_DO_IT: |
861 | return nbd_start_device(nbd, bdev); | |
1da177e4 | 862 | case NBD_CLEAR_QUE: |
4b2f0260 HX |
863 | /* |
864 | * This is for compatibility only. The queue is always cleared | |
865 | * by NBD_DO_IT or NBD_CLEAR_SOCK. | |
866 | */ | |
1da177e4 LT |
867 | return 0; |
868 | case NBD_PRINT_DEBUG: | |
fd8383fd JB |
869 | /* |
870 | * For compatibility only, we no longer keep a list of | |
871 | * outstanding requests. | |
872 | */ | |
1da177e4 LT |
873 | return 0; |
874 | } | |
1a2ad211 PM |
875 | return -ENOTTY; |
876 | } | |
877 | ||
878 | static int nbd_ioctl(struct block_device *bdev, fmode_t mode, | |
879 | unsigned int cmd, unsigned long arg) | |
880 | { | |
f4507164 | 881 | struct nbd_device *nbd = bdev->bd_disk->private_data; |
1a2ad211 PM |
882 | int error; |
883 | ||
884 | if (!capable(CAP_SYS_ADMIN)) | |
885 | return -EPERM; | |
886 | ||
f4507164 | 887 | BUG_ON(nbd->magic != NBD_MAGIC); |
1a2ad211 | 888 | |
9561a7ad | 889 | mutex_lock(&nbd->config_lock); |
f4507164 | 890 | error = __nbd_ioctl(bdev, nbd, cmd, arg); |
9561a7ad | 891 | mutex_unlock(&nbd->config_lock); |
1a2ad211 PM |
892 | |
893 | return error; | |
1da177e4 LT |
894 | } |
895 | ||
83d5cde4 | 896 | static const struct block_device_operations nbd_fops = |
1da177e4 LT |
897 | { |
898 | .owner = THIS_MODULE, | |
8a6cfeb6 | 899 | .ioctl = nbd_ioctl, |
263a3df1 | 900 | .compat_ioctl = nbd_ioctl, |
1da177e4 LT |
901 | }; |
902 | ||
30d53d9c MP |
903 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
904 | ||
905 | static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) | |
906 | { | |
907 | struct nbd_device *nbd = s->private; | |
908 | ||
909 | if (nbd->task_recv) | |
910 | seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); | |
30d53d9c MP |
911 | |
912 | return 0; | |
913 | } | |
914 | ||
915 | static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) | |
916 | { | |
917 | return single_open(file, nbd_dbg_tasks_show, inode->i_private); | |
918 | } | |
919 | ||
920 | static const struct file_operations nbd_dbg_tasks_ops = { | |
921 | .open = nbd_dbg_tasks_open, | |
922 | .read = seq_read, | |
923 | .llseek = seq_lseek, | |
924 | .release = single_release, | |
925 | }; | |
926 | ||
927 | static int nbd_dbg_flags_show(struct seq_file *s, void *unused) | |
928 | { | |
929 | struct nbd_device *nbd = s->private; | |
930 | u32 flags = nbd->flags; | |
931 | ||
932 | seq_printf(s, "Hex: 0x%08x\n\n", flags); | |
933 | ||
934 | seq_puts(s, "Known flags:\n"); | |
935 | ||
936 | if (flags & NBD_FLAG_HAS_FLAGS) | |
937 | seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); | |
938 | if (flags & NBD_FLAG_READ_ONLY) | |
939 | seq_puts(s, "NBD_FLAG_READ_ONLY\n"); | |
940 | if (flags & NBD_FLAG_SEND_FLUSH) | |
941 | seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); | |
942 | if (flags & NBD_FLAG_SEND_TRIM) | |
943 | seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); | |
944 | ||
945 | return 0; | |
946 | } | |
947 | ||
948 | static int nbd_dbg_flags_open(struct inode *inode, struct file *file) | |
949 | { | |
950 | return single_open(file, nbd_dbg_flags_show, inode->i_private); | |
951 | } | |
952 | ||
953 | static const struct file_operations nbd_dbg_flags_ops = { | |
954 | .open = nbd_dbg_flags_open, | |
955 | .read = seq_read, | |
956 | .llseek = seq_lseek, | |
957 | .release = single_release, | |
958 | }; | |
959 | ||
960 | static int nbd_dev_dbg_init(struct nbd_device *nbd) | |
961 | { | |
962 | struct dentry *dir; | |
27ea43fe MP |
963 | |
964 | if (!nbd_dbg_dir) | |
965 | return -EIO; | |
30d53d9c MP |
966 | |
967 | dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); | |
27ea43fe MP |
968 | if (!dir) { |
969 | dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", | |
970 | nbd_name(nbd)); | |
971 | return -EIO; | |
30d53d9c MP |
972 | } |
973 | nbd->dbg_dir = dir; | |
974 | ||
27ea43fe MP |
975 | debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); |
976 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); | |
0eadf37a | 977 | debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); |
ef77b515 | 978 | debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize); |
d366a0ff | 979 | debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); |
30d53d9c MP |
980 | |
981 | return 0; | |
982 | } | |
983 | ||
984 | static void nbd_dev_dbg_close(struct nbd_device *nbd) | |
985 | { | |
986 | debugfs_remove_recursive(nbd->dbg_dir); | |
987 | } | |
988 | ||
989 | static int nbd_dbg_init(void) | |
990 | { | |
991 | struct dentry *dbg_dir; | |
992 | ||
993 | dbg_dir = debugfs_create_dir("nbd", NULL); | |
27ea43fe MP |
994 | if (!dbg_dir) |
995 | return -EIO; | |
30d53d9c MP |
996 | |
997 | nbd_dbg_dir = dbg_dir; | |
998 | ||
999 | return 0; | |
1000 | } | |
1001 | ||
1002 | static void nbd_dbg_close(void) | |
1003 | { | |
1004 | debugfs_remove_recursive(nbd_dbg_dir); | |
1005 | } | |
1006 | ||
1007 | #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ | |
1008 | ||
1009 | static int nbd_dev_dbg_init(struct nbd_device *nbd) | |
1010 | { | |
1011 | return 0; | |
1012 | } | |
1013 | ||
1014 | static void nbd_dev_dbg_close(struct nbd_device *nbd) | |
1015 | { | |
1016 | } | |
1017 | ||
1018 | static int nbd_dbg_init(void) | |
1019 | { | |
1020 | return 0; | |
1021 | } | |
1022 | ||
1023 | static void nbd_dbg_close(void) | |
1024 | { | |
1025 | } | |
1026 | ||
1027 | #endif | |
1028 | ||
fd8383fd JB |
1029 | static int nbd_init_request(void *data, struct request *rq, |
1030 | unsigned int hctx_idx, unsigned int request_idx, | |
1031 | unsigned int numa_node) | |
1032 | { | |
1033 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); | |
fd8383fd | 1034 | cmd->nbd = data; |
fd8383fd JB |
1035 | return 0; |
1036 | } | |
1037 | ||
1038 | static struct blk_mq_ops nbd_mq_ops = { | |
1039 | .queue_rq = nbd_queue_rq, | |
fd8383fd | 1040 | .init_request = nbd_init_request, |
0eadf37a | 1041 | .timeout = nbd_xmit_timeout, |
fd8383fd JB |
1042 | }; |
1043 | ||
b0d9111a JB |
1044 | static void nbd_dev_remove(struct nbd_device *nbd) |
1045 | { | |
1046 | struct gendisk *disk = nbd->disk; | |
1047 | nbd->magic = 0; | |
1048 | if (disk) { | |
1049 | del_gendisk(disk); | |
1050 | blk_cleanup_queue(disk->queue); | |
1051 | blk_mq_free_tag_set(&nbd->tag_set); | |
1052 | put_disk(disk); | |
1053 | } | |
1054 | kfree(nbd); | |
1055 | } | |
1056 | ||
1057 | static int nbd_dev_add(int index) | |
1058 | { | |
1059 | struct nbd_device *nbd; | |
1060 | struct gendisk *disk; | |
1061 | struct request_queue *q; | |
1062 | int err = -ENOMEM; | |
1063 | ||
1064 | nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); | |
1065 | if (!nbd) | |
1066 | goto out; | |
1067 | ||
1068 | disk = alloc_disk(1 << part_shift); | |
1069 | if (!disk) | |
1070 | goto out_free_nbd; | |
1071 | ||
1072 | if (index >= 0) { | |
1073 | err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, | |
1074 | GFP_KERNEL); | |
1075 | if (err == -ENOSPC) | |
1076 | err = -EEXIST; | |
1077 | } else { | |
1078 | err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); | |
1079 | if (err >= 0) | |
1080 | index = err; | |
1081 | } | |
1082 | if (err < 0) | |
1083 | goto out_free_disk; | |
1084 | ||
1085 | nbd->disk = disk; | |
1086 | nbd->tag_set.ops = &nbd_mq_ops; | |
1087 | nbd->tag_set.nr_hw_queues = 1; | |
1088 | nbd->tag_set.queue_depth = 128; | |
1089 | nbd->tag_set.numa_node = NUMA_NO_NODE; | |
1090 | nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); | |
1091 | nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | | |
1092 | BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; | |
1093 | nbd->tag_set.driver_data = nbd; | |
1094 | ||
1095 | err = blk_mq_alloc_tag_set(&nbd->tag_set); | |
1096 | if (err) | |
1097 | goto out_free_idr; | |
1098 | ||
1099 | q = blk_mq_init_queue(&nbd->tag_set); | |
1100 | if (IS_ERR(q)) { | |
1101 | err = PTR_ERR(q); | |
1102 | goto out_free_tags; | |
1103 | } | |
1104 | disk->queue = q; | |
1105 | ||
1106 | /* | |
1107 | * Tell the block layer that we are not a rotational device | |
1108 | */ | |
1109 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); | |
1110 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); | |
1111 | disk->queue->limits.discard_granularity = 512; | |
1112 | blk_queue_max_discard_sectors(disk->queue, UINT_MAX); | |
1113 | disk->queue->limits.discard_zeroes_data = 0; | |
1114 | blk_queue_max_hw_sectors(disk->queue, 65536); | |
1115 | disk->queue->limits.max_sectors = 256; | |
1116 | ||
1117 | nbd->magic = NBD_MAGIC; | |
1118 | mutex_init(&nbd->config_lock); | |
1119 | disk->major = NBD_MAJOR; | |
1120 | disk->first_minor = index << part_shift; | |
1121 | disk->fops = &nbd_fops; | |
1122 | disk->private_data = nbd; | |
1123 | sprintf(disk->disk_name, "nbd%d", index); | |
1124 | init_waitqueue_head(&nbd->recv_wq); | |
1125 | nbd_reset(nbd); | |
1126 | add_disk(disk); | |
1127 | return index; | |
1128 | ||
1129 | out_free_tags: | |
1130 | blk_mq_free_tag_set(&nbd->tag_set); | |
1131 | out_free_idr: | |
1132 | idr_remove(&nbd_index_idr, index); | |
1133 | out_free_disk: | |
1134 | put_disk(disk); | |
1135 | out_free_nbd: | |
1136 | kfree(nbd); | |
1137 | out: | |
1138 | return err; | |
1139 | } | |
1140 | ||
1da177e4 LT |
1141 | /* |
1142 | * And here should be modules and kernel interface | |
1143 | * (Just smiley confuses emacs :-) | |
1144 | */ | |
1145 | ||
1146 | static int __init nbd_init(void) | |
1147 | { | |
1da177e4 LT |
1148 | int i; |
1149 | ||
5b7b18cc | 1150 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); |
1da177e4 | 1151 | |
d71a6d73 | 1152 | if (max_part < 0) { |
7742ce4a | 1153 | printk(KERN_ERR "nbd: max_part must be >= 0\n"); |
d71a6d73 LV |
1154 | return -EINVAL; |
1155 | } | |
1156 | ||
1157 | part_shift = 0; | |
5988ce23 | 1158 | if (max_part > 0) { |
d71a6d73 LV |
1159 | part_shift = fls(max_part); |
1160 | ||
5988ce23 NK |
1161 | /* |
1162 | * Adjust max_part according to part_shift as it is exported | |
1163 | * to user space so that user can know the max number of | |
1164 | * partition kernel should be able to manage. | |
1165 | * | |
1166 | * Note that -1 is required because partition 0 is reserved | |
1167 | * for the whole disk. | |
1168 | */ | |
1169 | max_part = (1UL << part_shift) - 1; | |
1170 | } | |
1171 | ||
3b271082 NK |
1172 | if ((1UL << part_shift) > DISK_MAX_PARTS) |
1173 | return -EINVAL; | |
1174 | ||
1175 | if (nbds_max > 1UL << (MINORBITS - part_shift)) | |
1176 | return -EINVAL; | |
124d6db0 JB |
1177 | recv_workqueue = alloc_workqueue("knbd-recv", |
1178 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | |
1179 | if (!recv_workqueue) | |
1180 | return -ENOMEM; | |
3b271082 | 1181 | |
6330a2d0 JB |
1182 | if (register_blkdev(NBD_MAJOR, "nbd")) { |
1183 | destroy_workqueue(recv_workqueue); | |
b0d9111a | 1184 | return -EIO; |
6330a2d0 | 1185 | } |
1da177e4 | 1186 | |
30d53d9c MP |
1187 | nbd_dbg_init(); |
1188 | ||
b0d9111a JB |
1189 | mutex_lock(&nbd_index_mutex); |
1190 | for (i = 0; i < nbds_max; i++) | |
1191 | nbd_dev_add(i); | |
1192 | mutex_unlock(&nbd_index_mutex); | |
1193 | return 0; | |
1194 | } | |
1da177e4 | 1195 | |
b0d9111a JB |
1196 | static int nbd_exit_cb(int id, void *ptr, void *data) |
1197 | { | |
1198 | struct nbd_device *nbd = ptr; | |
1199 | nbd_dev_remove(nbd); | |
1da177e4 | 1200 | return 0; |
1da177e4 LT |
1201 | } |
1202 | ||
1203 | static void __exit nbd_cleanup(void) | |
1204 | { | |
30d53d9c MP |
1205 | nbd_dbg_close(); |
1206 | ||
b0d9111a JB |
1207 | idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL); |
1208 | idr_destroy(&nbd_index_idr); | |
124d6db0 | 1209 | destroy_workqueue(recv_workqueue); |
1da177e4 | 1210 | unregister_blkdev(NBD_MAJOR, "nbd"); |
1da177e4 LT |
1211 | } |
1212 | ||
1213 | module_init(nbd_init); | |
1214 | module_exit(nbd_cleanup); | |
1215 | ||
1216 | MODULE_DESCRIPTION("Network Block Device"); | |
1217 | MODULE_LICENSE("GPL"); | |
1218 | ||
40be0c28 | 1219 | module_param(nbds_max, int, 0444); |
d71a6d73 LV |
1220 | MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); |
1221 | module_param(max_part, int, 0444); | |
1222 | MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); |