]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/block/nbd.c
nbd: use our own workqueue for recv threads
[mirror_ubuntu-jammy-kernel.git] / drivers / block / nbd.c
CommitLineData
1da177e4
LT
1/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
a2531293 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
1da177e4
LT
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
dbf492d6 10 * This file is released under GPLv2 or later.
1da177e4 11 *
dbf492d6 12 * (part of code stolen from loop.c)
1da177e4
LT
13 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
2a48fc0a 27#include <linux/mutex.h>
4b2f0260
HX
28#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
5a0e3ad6 31#include <linux/slab.h>
1da177e4 32#include <net/sock.h>
91cf45f0 33#include <linux/net.h>
48cf6061 34#include <linux/kthread.h>
b9c495bb 35#include <linux/types.h>
30d53d9c 36#include <linux/debugfs.h>
fd8383fd 37#include <linux/blk-mq.h>
1da177e4 38
7c0f6ba6 39#include <linux/uaccess.h>
1da177e4
LT
40#include <asm/types.h>
41
42#include <linux/nbd.h>
43
9561a7ad
JB
44struct nbd_sock {
45 struct socket *sock;
46 struct mutex tx_lock;
47};
48
9b4a6ba9
JB
49#define NBD_TIMEDOUT 0
50#define NBD_DISCONNECT_REQUESTED 1
9561a7ad
JB
51#define NBD_DISCONNECTED 2
52#define NBD_RUNNING 3
9b4a6ba9 53
13e71d69 54struct nbd_device {
22d109c1 55 u32 flags;
9b4a6ba9 56 unsigned long runtime_flags;
9561a7ad 57 struct nbd_sock **socks;
13e71d69
MP
58 int magic;
59
fd8383fd 60 struct blk_mq_tag_set tag_set;
13e71d69 61
9561a7ad 62 struct mutex config_lock;
13e71d69 63 struct gendisk *disk;
9561a7ad
JB
64 int num_connections;
65 atomic_t recv_threads;
66 wait_queue_head_t recv_wq;
ef77b515 67 loff_t blksize;
b9c495bb 68 loff_t bytesize;
7e2893a1 69
7e2893a1 70 struct task_struct *task_recv;
9561a7ad 71 struct task_struct *task_setup;
30d53d9c
MP
72
73#if IS_ENABLED(CONFIG_DEBUG_FS)
74 struct dentry *dbg_dir;
75#endif
13e71d69
MP
76};
77
fd8383fd
JB
78struct nbd_cmd {
79 struct nbd_device *nbd;
9561a7ad 80 struct completion send_complete;
fd8383fd
JB
81};
82
30d53d9c
MP
83#if IS_ENABLED(CONFIG_DEBUG_FS)
84static struct dentry *nbd_dbg_dir;
85#endif
86
87#define nbd_name(nbd) ((nbd)->disk->disk_name)
88
f4507164 89#define NBD_MAGIC 0x68797548
1da177e4 90
9c7a4169 91static unsigned int nbds_max = 16;
20a8143e 92static struct nbd_device *nbd_dev;
d71a6d73 93static int max_part;
124d6db0 94static struct workqueue_struct *recv_workqueue;
1da177e4 95
d18509f5 96static inline struct device *nbd_to_dev(struct nbd_device *nbd)
1da177e4 97{
d18509f5 98 return disk_to_dev(nbd->disk);
1da177e4
LT
99}
100
37091fdd
MP
101static bool nbd_is_connected(struct nbd_device *nbd)
102{
103 return !!nbd->task_recv;
104}
105
1da177e4
LT
106static const char *nbdcmd_to_ascii(int cmd)
107{
108 switch (cmd) {
109 case NBD_CMD_READ: return "read";
110 case NBD_CMD_WRITE: return "write";
111 case NBD_CMD_DISC: return "disconnect";
75f187ab 112 case NBD_CMD_FLUSH: return "flush";
a336d298 113 case NBD_CMD_TRIM: return "trim/discard";
1da177e4
LT
114 }
115 return "invalid";
116}
1da177e4 117
37091fdd
MP
118static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
119{
120 bdev->bd_inode->i_size = 0;
121 set_capacity(nbd->disk, 0);
122 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
123
124 return 0;
125}
126
127static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
128{
129 if (!nbd_is_connected(nbd))
130 return;
131
132 bdev->bd_inode->i_size = nbd->bytesize;
133 set_capacity(nbd->disk, nbd->bytesize >> 9);
134 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
135}
136
137static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
ef77b515 138 loff_t blocksize, loff_t nr_blocks)
37091fdd
MP
139{
140 int ret;
141
142 ret = set_blocksize(bdev, blocksize);
143 if (ret)
144 return ret;
145
146 nbd->blksize = blocksize;
ef77b515 147 nbd->bytesize = blocksize * nr_blocks;
37091fdd
MP
148
149 nbd_size_update(nbd, bdev);
150
151 return 0;
152}
153
fd8383fd 154static void nbd_end_request(struct nbd_cmd *cmd)
1da177e4 155{
fd8383fd
JB
156 struct nbd_device *nbd = cmd->nbd;
157 struct request *req = blk_mq_rq_from_pdu(cmd);
097c94a4 158 int error = req->errors ? -EIO : 0;
1da177e4 159
fd8383fd 160 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
d18509f5 161 error ? "failed" : "done");
1da177e4 162
fd8383fd 163 blk_mq_complete_request(req, error);
1da177e4
LT
164}
165
e018e757
MP
166/*
167 * Forcibly shutdown the socket causing all listeners to error
168 */
36e47bee 169static void sock_shutdown(struct nbd_device *nbd)
7fdfd406 170{
9561a7ad 171 int i;
23272a67 172
9561a7ad
JB
173 if (nbd->num_connections == 0)
174 return;
175 if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
260bbce4 176 return;
23272a67 177
9561a7ad
JB
178 for (i = 0; i < nbd->num_connections; i++) {
179 struct nbd_sock *nsock = nbd->socks[i];
180 mutex_lock(&nsock->tx_lock);
181 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
182 mutex_unlock(&nsock->tx_lock);
183 }
184 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
7fdfd406
PC
185}
186
0eadf37a
JB
187static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
188 bool reserved)
7fdfd406 189{
0eadf37a
JB
190 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
191 struct nbd_device *nbd = cmd->nbd;
dcc909d9 192
9561a7ad 193 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
9b4a6ba9 194 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
0eadf37a 195 req->errors++;
9561a7ad 196
9561a7ad
JB
197 mutex_lock(&nbd->config_lock);
198 sock_shutdown(nbd);
199 mutex_unlock(&nbd->config_lock);
0eadf37a 200 return BLK_EH_HANDLED;
7fdfd406
PC
201}
202
1da177e4
LT
203/*
204 * Send or receive packet.
205 */
9561a7ad
JB
206static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
207 int size, int msg_flags)
1da177e4 208{
9561a7ad 209 struct socket *sock = nbd->socks[index]->sock;
1da177e4
LT
210 int result;
211 struct msghdr msg;
212 struct kvec iov;
7f338fe4 213 unsigned long pflags = current->flags;
1da177e4 214
ffc41cf8 215 if (unlikely(!sock)) {
a897b666 216 dev_err_ratelimited(disk_to_dev(nbd->disk),
7f1b90f9
WC
217 "Attempted %s on closed socket in sock_xmit\n",
218 (send ? "send" : "recv"));
ffc41cf8
MS
219 return -EINVAL;
220 }
221
7f338fe4 222 current->flags |= PF_MEMALLOC;
1da177e4 223 do {
7f338fe4 224 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
1da177e4
LT
225 iov.iov_base = buf;
226 iov.iov_len = size;
227 msg.msg_name = NULL;
228 msg.msg_namelen = 0;
229 msg.msg_control = NULL;
230 msg.msg_controllen = 0;
1da177e4
LT
231 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
232
7e2893a1 233 if (send)
1da177e4 234 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
7e2893a1 235 else
35fbf5bc
NK
236 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
237 msg.msg_flags);
1da177e4 238
1da177e4
LT
239 if (result <= 0) {
240 if (result == 0)
241 result = -EPIPE; /* short read */
242 break;
243 }
244 size -= result;
245 buf += result;
246 } while (size > 0);
247
7f338fe4 248 tsk_restore_flags(current, pflags, PF_MEMALLOC);
1da177e4
LT
249
250 return result;
251}
252
9561a7ad
JB
253static inline int sock_send_bvec(struct nbd_device *nbd, int index,
254 struct bio_vec *bvec, int flags)
1da177e4
LT
255{
256 int result;
257 void *kaddr = kmap(bvec->bv_page);
9561a7ad 258 result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
f4507164 259 bvec->bv_len, flags);
1da177e4
LT
260 kunmap(bvec->bv_page);
261 return result;
262}
263
7fdfd406 264/* always call with the tx_lock held */
9561a7ad 265static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1da177e4 266{
fd8383fd 267 struct request *req = blk_mq_rq_from_pdu(cmd);
d61b7f97 268 int result;
1da177e4 269 struct nbd_request request;
1011c1b9 270 unsigned long size = blk_rq_bytes(req);
429a787b 271 struct bio *bio;
9dc6c806 272 u32 type;
9561a7ad 273 u32 tag = blk_mq_unique_tag(req);
9dc6c806 274
aebf526b
CH
275 switch (req_op(req)) {
276 case REQ_OP_DISCARD:
9dc6c806 277 type = NBD_CMD_TRIM;
aebf526b
CH
278 break;
279 case REQ_OP_FLUSH:
9dc6c806 280 type = NBD_CMD_FLUSH;
aebf526b
CH
281 break;
282 case REQ_OP_WRITE:
9dc6c806 283 type = NBD_CMD_WRITE;
aebf526b
CH
284 break;
285 case REQ_OP_READ:
9dc6c806 286 type = NBD_CMD_READ;
aebf526b
CH
287 break;
288 default:
289 return -EIO;
290 }
1da177e4 291
09fc54cc
CH
292 if (rq_data_dir(req) == WRITE &&
293 (nbd->flags & NBD_FLAG_READ_ONLY)) {
294 dev_err_ratelimited(disk_to_dev(nbd->disk),
295 "Write on read-only\n");
296 return -EIO;
297 }
298
04cfac4e 299 memset(&request, 0, sizeof(request));
1da177e4 300 request.magic = htonl(NBD_REQUEST_MAGIC);
9dc6c806 301 request.type = htonl(type);
9561a7ad 302 if (type != NBD_CMD_FLUSH) {
75f187ab
AB
303 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
304 request.len = htonl(size);
305 }
9561a7ad 306 memcpy(request.handle, &tag, sizeof(tag));
1da177e4 307
d18509f5 308 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
fd8383fd 309 cmd, nbdcmd_to_ascii(type),
d18509f5 310 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
9561a7ad 311 result = sock_xmit(nbd, index, 1, &request, sizeof(request),
9dc6c806 312 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
1da177e4 313 if (result <= 0) {
a897b666 314 dev_err_ratelimited(disk_to_dev(nbd->disk),
7f1b90f9 315 "Send control failed (result %d)\n", result);
dab5313a 316 return -EIO;
1da177e4
LT
317 }
318
429a787b
JA
319 if (type != NBD_CMD_WRITE)
320 return 0;
321
429a787b
JA
322 bio = req->bio;
323 while (bio) {
324 struct bio *next = bio->bi_next;
325 struct bvec_iter iter;
7988613b 326 struct bio_vec bvec;
429a787b
JA
327
328 bio_for_each_segment(bvec, bio, iter) {
329 bool is_last = !next && bio_iter_last(bvec, iter);
d61b7f97 330 int flags = is_last ? 0 : MSG_MORE;
429a787b 331
d18509f5 332 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
fd8383fd 333 cmd, bvec.bv_len);
9561a7ad 334 result = sock_send_bvec(nbd, index, &bvec, flags);
6c92e699 335 if (result <= 0) {
f4507164 336 dev_err(disk_to_dev(nbd->disk),
7f1b90f9
WC
337 "Send data failed (result %d)\n",
338 result);
dab5313a 339 return -EIO;
6c92e699 340 }
429a787b
JA
341 /*
342 * The completion might already have come in,
343 * so break for the last one instead of letting
344 * the iterator do it. This prevents use-after-free
345 * of the bio.
346 */
347 if (is_last)
348 break;
1da177e4 349 }
429a787b 350 bio = next;
1da177e4 351 }
1da177e4 352 return 0;
1da177e4
LT
353}
354
9561a7ad
JB
355static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
356 struct bio_vec *bvec)
1da177e4
LT
357{
358 int result;
359 void *kaddr = kmap(bvec->bv_page);
9561a7ad
JB
360 result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
361 bvec->bv_len, MSG_WAITALL);
1da177e4
LT
362 kunmap(bvec->bv_page);
363 return result;
364}
365
366/* NULL returned = something went wrong, inform userspace */
9561a7ad 367static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1da177e4
LT
368{
369 int result;
370 struct nbd_reply reply;
fd8383fd
JB
371 struct nbd_cmd *cmd;
372 struct request *req = NULL;
373 u16 hwq;
9561a7ad 374 u32 tag;
1da177e4
LT
375
376 reply.magic = 0;
9561a7ad 377 result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
1da177e4 378 if (result <= 0) {
9561a7ad
JB
379 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
380 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
381 dev_err(disk_to_dev(nbd->disk),
382 "Receive control failed (result %d)\n", result);
19391830 383 return ERR_PTR(result);
1da177e4 384 }
e4b57e08
MF
385
386 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
f4507164 387 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
e4b57e08 388 (unsigned long)ntohl(reply.magic));
19391830 389 return ERR_PTR(-EPROTO);
e4b57e08
MF
390 }
391
9561a7ad 392 memcpy(&tag, reply.handle, sizeof(u32));
4b2f0260 393
fd8383fd
JB
394 hwq = blk_mq_unique_tag_to_hwq(tag);
395 if (hwq < nbd->tag_set.nr_hw_queues)
396 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
397 blk_mq_unique_tag_to_tag(tag));
398 if (!req || !blk_mq_request_started(req)) {
399 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
400 tag, req);
401 return ERR_PTR(-ENOENT);
1da177e4 402 }
fd8383fd 403 cmd = blk_mq_rq_to_pdu(req);
1da177e4 404 if (ntohl(reply.error)) {
f4507164 405 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
7f1b90f9 406 ntohl(reply.error));
1da177e4 407 req->errors++;
fd8383fd 408 return cmd;
1da177e4
LT
409 }
410
fd8383fd 411 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
9dc6c806 412 if (rq_data_dir(req) != WRITE) {
5705f702 413 struct req_iterator iter;
7988613b 414 struct bio_vec bvec;
5705f702
N
415
416 rq_for_each_segment(bvec, req, iter) {
9561a7ad 417 result = sock_recv_bvec(nbd, index, &bvec);
6c92e699 418 if (result <= 0) {
f4507164 419 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
7f1b90f9 420 result);
6c92e699 421 req->errors++;
fd8383fd 422 return cmd;
6c92e699 423 }
d18509f5 424 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
fd8383fd 425 cmd, bvec.bv_len);
1da177e4 426 }
9561a7ad
JB
427 } else {
428 /* See the comment in nbd_queue_rq. */
429 wait_for_completion(&cmd->send_complete);
1da177e4 430 }
fd8383fd 431 return cmd;
1da177e4
LT
432}
433
edfaa7c3
KS
434static ssize_t pid_show(struct device *dev,
435 struct device_attribute *attr, char *buf)
6b39bb65 436{
edfaa7c3 437 struct gendisk *disk = dev_to_disk(dev);
6521d39a 438 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
edfaa7c3 439
6521d39a 440 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
6b39bb65
PC
441}
442
edfaa7c3 443static struct device_attribute pid_attr = {
01e8ef11 444 .attr = { .name = "pid", .mode = S_IRUGO},
6b39bb65
PC
445 .show = pid_show,
446};
447
9561a7ad
JB
448struct recv_thread_args {
449 struct work_struct work;
450 struct nbd_device *nbd;
451 int index;
452};
453
454static void recv_work(struct work_struct *work)
1da177e4 455{
9561a7ad
JB
456 struct recv_thread_args *args = container_of(work,
457 struct recv_thread_args,
458 work);
459 struct nbd_device *nbd = args->nbd;
fd8383fd 460 struct nbd_cmd *cmd;
9561a7ad 461 int ret = 0;
1da177e4 462
f4507164 463 BUG_ON(nbd->magic != NBD_MAGIC);
19391830 464 while (1) {
9561a7ad 465 cmd = nbd_read_stat(nbd, args->index);
fd8383fd
JB
466 if (IS_ERR(cmd)) {
467 ret = PTR_ERR(cmd);
19391830
MP
468 break;
469 }
470
fd8383fd 471 nbd_end_request(cmd);
19391830 472 }
6b39bb65 473
9561a7ad
JB
474 /*
475 * We got an error, shut everybody down if this wasn't the result of a
476 * disconnect request.
477 */
478 if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
479 sock_shutdown(nbd);
480 atomic_dec(&nbd->recv_threads);
481 wake_up(&nbd->recv_wq);
1da177e4
LT
482}
483
fd8383fd 484static void nbd_clear_req(struct request *req, void *data, bool reserved)
1da177e4 485{
fd8383fd 486 struct nbd_cmd *cmd;
1da177e4 487
fd8383fd
JB
488 if (!blk_mq_request_started(req))
489 return;
490 cmd = blk_mq_rq_to_pdu(req);
491 req->errors++;
492 nbd_end_request(cmd);
493}
494
495static void nbd_clear_que(struct nbd_device *nbd)
496{
f4507164 497 BUG_ON(nbd->magic != NBD_MAGIC);
1da177e4 498
fd8383fd 499 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
e78273c8 500 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
1da177e4
LT
501}
502
7fdfd406 503
9561a7ad 504static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
48cf6061 505{
fd8383fd
JB
506 struct request *req = blk_mq_rq_from_pdu(cmd);
507 struct nbd_device *nbd = cmd->nbd;
9561a7ad 508 struct nbd_sock *nsock;
fd8383fd 509
9561a7ad 510 if (index >= nbd->num_connections) {
a897b666
JB
511 dev_err_ratelimited(disk_to_dev(nbd->disk),
512 "Attempted send on invalid socket\n");
48cf6061 513 goto error_out;
9561a7ad 514 }
48cf6061 515
9561a7ad 516 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
a897b666
JB
517 dev_err_ratelimited(disk_to_dev(nbd->disk),
518 "Attempted send on closed socket\n");
9561a7ad
JB
519 goto error_out;
520 }
521
48cf6061
LV
522 req->errors = 0;
523
9561a7ad
JB
524 nsock = nbd->socks[index];
525 mutex_lock(&nsock->tx_lock);
526 if (unlikely(!nsock->sock)) {
527 mutex_unlock(&nsock->tx_lock);
a897b666
JB
528 dev_err_ratelimited(disk_to_dev(nbd->disk),
529 "Attempted send on closed socket\n");
15746fca 530 goto error_out;
48cf6061
LV
531 }
532
9561a7ad 533 if (nbd_send_cmd(nbd, cmd, index) != 0) {
a897b666
JB
534 dev_err_ratelimited(disk_to_dev(nbd->disk),
535 "Request send failed\n");
48cf6061 536 req->errors++;
fd8383fd 537 nbd_end_request(cmd);
48cf6061
LV
538 }
539
9561a7ad 540 mutex_unlock(&nsock->tx_lock);
48cf6061
LV
541
542 return;
543
544error_out:
545 req->errors++;
fd8383fd 546 nbd_end_request(cmd);
48cf6061
LV
547}
548
fd8383fd
JB
549static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
550 const struct blk_mq_queue_data *bd)
1da177e4 551{
fd8383fd 552 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1da177e4 553
9561a7ad
JB
554 /*
555 * Since we look at the bio's to send the request over the network we
556 * need to make sure the completion work doesn't mark this request done
557 * before we are done doing our send. This keeps us from dereferencing
558 * freed data if we have particularly fast completions (ie we get the
559 * completion before we exit sock_xmit on the last bvec) or in the case
560 * that the server is misbehaving (or there was an error) before we're
561 * done sending everything over the wire.
562 */
563 init_completion(&cmd->send_complete);
fd8383fd 564 blk_mq_start_request(bd->rq);
9561a7ad
JB
565 nbd_handle_cmd(cmd, hctx->queue_num);
566 complete(&cmd->send_complete);
567
fd8383fd 568 return BLK_MQ_RQ_QUEUE_OK;
1da177e4
LT
569}
570
9561a7ad 571static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
23272a67 572{
9561a7ad
JB
573 struct nbd_sock **socks;
574 struct nbd_sock *nsock;
23272a67 575
9561a7ad
JB
576 if (!nbd->task_setup)
577 nbd->task_setup = current;
578 if (nbd->task_setup != current) {
579 dev_err(disk_to_dev(nbd->disk),
580 "Device being setup by another task");
581 return -EINVAL;
23272a67
MP
582 }
583
9561a7ad
JB
584 socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
585 sizeof(struct nbd_sock *), GFP_KERNEL);
586 if (!socks)
587 return -ENOMEM;
588 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
589 if (!nsock)
590 return -ENOMEM;
591
592 nbd->socks = socks;
23272a67 593
9561a7ad
JB
594 mutex_init(&nsock->tx_lock);
595 nsock->sock = sock;
596 socks[nbd->num_connections++] = nsock;
23272a67 597
9561a7ad 598 return 0;
23272a67
MP
599}
600
0e4f0f6f
MP
601/* Reset all properties of an NBD device */
602static void nbd_reset(struct nbd_device *nbd)
603{
9561a7ad
JB
604 int i;
605
606 for (i = 0; i < nbd->num_connections; i++)
607 kfree(nbd->socks[i]);
608 kfree(nbd->socks);
609 nbd->socks = NULL;
9b4a6ba9 610 nbd->runtime_flags = 0;
0e4f0f6f
MP
611 nbd->blksize = 1024;
612 nbd->bytesize = 0;
613 set_capacity(nbd->disk, 0);
614 nbd->flags = 0;
0eadf37a 615 nbd->tag_set.timeout = 0;
9561a7ad
JB
616 nbd->num_connections = 0;
617 nbd->task_setup = NULL;
0e4f0f6f 618 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
0e4f0f6f
MP
619}
620
621static void nbd_bdev_reset(struct block_device *bdev)
622{
623 set_device_ro(bdev, false);
624 bdev->bd_inode->i_size = 0;
625 if (max_part > 0) {
626 blkdev_reread_part(bdev);
627 bdev->bd_invalidated = 1;
628 }
629}
630
d02cf531
MP
631static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
632{
633 if (nbd->flags & NBD_FLAG_READ_ONLY)
634 set_device_ro(bdev, true);
635 if (nbd->flags & NBD_FLAG_SEND_TRIM)
636 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
637 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
aafb1eec 638 blk_queue_write_cache(nbd->disk->queue, true, false);
d02cf531 639 else
aafb1eec 640 blk_queue_write_cache(nbd->disk->queue, false, false);
d02cf531
MP
641}
642
9561a7ad
JB
643static void send_disconnects(struct nbd_device *nbd)
644{
645 struct nbd_request request = {};
646 int i, ret;
647
648 request.magic = htonl(NBD_REQUEST_MAGIC);
649 request.type = htonl(NBD_CMD_DISC);
650
651 for (i = 0; i < nbd->num_connections; i++) {
652 ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
653 if (ret <= 0)
654 dev_err(disk_to_dev(nbd->disk),
655 "Send disconnect failed %d\n", ret);
656 }
657}
658
30d53d9c
MP
659static int nbd_dev_dbg_init(struct nbd_device *nbd);
660static void nbd_dev_dbg_close(struct nbd_device *nbd);
661
9561a7ad 662/* Must be called with config_lock held */
f4507164 663static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1a2ad211
PM
664 unsigned int cmd, unsigned long arg)
665{
1da177e4 666 switch (cmd) {
1a2ad211 667 case NBD_DISCONNECT: {
f4507164 668 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
9561a7ad 669 if (!nbd->socks)
3a2d63f8 670 return -EINVAL;
1a2ad211 671
9561a7ad 672 mutex_unlock(&nbd->config_lock);
3a2d63f8 673 fsync_bdev(bdev);
9561a7ad 674 mutex_lock(&nbd->config_lock);
3a2d63f8
PB
675
676 /* Check again after getting mutex back. */
9561a7ad 677 if (!nbd->socks)
1da177e4 678 return -EINVAL;
c378f70a 679
9561a7ad
JB
680 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
681 &nbd->runtime_flags))
682 send_disconnects(nbd);
c378f70a 683 return 0;
1a2ad211 684 }
9561a7ad 685
23272a67
MP
686 case NBD_CLEAR_SOCK:
687 sock_shutdown(nbd);
f4507164 688 nbd_clear_que(nbd);
3a2d63f8 689 kill_bdev(bdev);
9561a7ad
JB
690 nbd_bdev_reset(bdev);
691 /*
692 * We want to give the run thread a chance to wait for everybody
693 * to clean up and then do it's own cleanup.
694 */
695 if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
696 int i;
697
698 for (i = 0; i < nbd->num_connections; i++)
699 kfree(nbd->socks[i]);
700 kfree(nbd->socks);
701 nbd->socks = NULL;
702 nbd->num_connections = 0;
20032ec3 703 nbd->task_setup = NULL;
9561a7ad 704 }
1a2ad211 705 return 0;
1a2ad211
PM
706
707 case NBD_SET_SOCK: {
e2511578 708 int err;
23272a67
MP
709 struct socket *sock = sockfd_lookup(arg, &err);
710
711 if (!sock)
712 return err;
713
9561a7ad 714 err = nbd_add_socket(nbd, sock);
23272a67
MP
715 if (!err && max_part)
716 bdev->bd_invalidated = 1;
717
718 return err;
1a2ad211
PM
719 }
720
37091fdd 721 case NBD_SET_BLKSIZE: {
5e454c67 722 loff_t bsize = div_s64(nbd->bytesize, arg);
37091fdd
MP
723
724 return nbd_size_set(nbd, bdev, arg, bsize);
725 }
1a2ad211 726
1da177e4 727 case NBD_SET_SIZE:
37091fdd 728 return nbd_size_set(nbd, bdev, nbd->blksize,
e88f72cb 729 div_s64(arg, nbd->blksize));
37091fdd
MP
730
731 case NBD_SET_SIZE_BLOCKS:
732 return nbd_size_set(nbd, bdev, nbd->blksize, arg);
1a2ad211 733
7fdfd406 734 case NBD_SET_TIMEOUT:
0eadf37a 735 nbd->tag_set.timeout = arg * HZ;
7fdfd406 736 return 0;
1a2ad211 737
2f012508
PC
738 case NBD_SET_FLAGS:
739 nbd->flags = arg;
740 return 0;
741
1a2ad211 742 case NBD_DO_IT: {
9561a7ad
JB
743 struct recv_thread_args *args;
744 int num_connections = nbd->num_connections;
feffa5cc 745 int error = 0, i;
1a2ad211 746
6521d39a 747 if (nbd->task_recv)
c91192d6 748 return -EBUSY;
9561a7ad 749 if (!nbd->socks)
1da177e4 750 return -EINVAL;
9561a7ad
JB
751 if (num_connections > 1 &&
752 !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
753 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
feffa5cc 754 error = -EINVAL;
9561a7ad
JB
755 goto out_err;
756 }
1a2ad211 757
9561a7ad
JB
758 set_bit(NBD_RUNNING, &nbd->runtime_flags);
759 blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
760 args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
feffa5cc
JA
761 if (!args) {
762 error = -ENOMEM;
9561a7ad 763 goto out_err;
feffa5cc 764 }
97240963 765 nbd->task_recv = current;
9561a7ad 766 mutex_unlock(&nbd->config_lock);
1a2ad211 767
d02cf531 768 nbd_parse_flags(nbd, bdev);
a336d298 769
9561a7ad
JB
770 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
771 if (error) {
772 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
773 goto out_recv;
774 }
775
776 nbd_size_update(nbd, bdev);
777
30d53d9c 778 nbd_dev_dbg_init(nbd);
9561a7ad
JB
779 for (i = 0; i < num_connections; i++) {
780 sk_set_memalloc(nbd->socks[i]->sock->sk);
781 atomic_inc(&nbd->recv_threads);
782 INIT_WORK(&args[i].work, recv_work);
783 args[i].nbd = nbd;
784 args[i].index = i;
124d6db0 785 queue_work(recv_workqueue, &args[i].work);
9561a7ad
JB
786 }
787 wait_event_interruptible(nbd->recv_wq,
788 atomic_read(&nbd->recv_threads) == 0);
789 for (i = 0; i < num_connections; i++)
790 flush_work(&args[i].work);
30d53d9c 791 nbd_dev_dbg_close(nbd);
9561a7ad
JB
792 nbd_size_clear(nbd, bdev);
793 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
794out_recv:
795 mutex_lock(&nbd->config_lock);
97240963 796 nbd->task_recv = NULL;
9561a7ad 797out_err:
36e47bee 798 sock_shutdown(nbd);
f4507164 799 nbd_clear_que(nbd);
3a2d63f8 800 kill_bdev(bdev);
0e4f0f6f
MP
801 nbd_bdev_reset(bdev);
802
9b4a6ba9
JB
803 /* user requested, ignore socket errors */
804 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
1f7b5cf1 805 error = 0;
9b4a6ba9 806 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
1f7b5cf1
MP
807 error = -ETIMEDOUT;
808
0e4f0f6f 809 nbd_reset(nbd);
19391830 810 return error;
1a2ad211
PM
811 }
812
1da177e4 813 case NBD_CLEAR_QUE:
4b2f0260
HX
814 /*
815 * This is for compatibility only. The queue is always cleared
816 * by NBD_DO_IT or NBD_CLEAR_SOCK.
817 */
1da177e4 818 return 0;
1a2ad211 819
1da177e4 820 case NBD_PRINT_DEBUG:
fd8383fd
JB
821 /*
822 * For compatibility only, we no longer keep a list of
823 * outstanding requests.
824 */
1da177e4
LT
825 return 0;
826 }
1a2ad211
PM
827 return -ENOTTY;
828}
829
830static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
831 unsigned int cmd, unsigned long arg)
832{
f4507164 833 struct nbd_device *nbd = bdev->bd_disk->private_data;
1a2ad211
PM
834 int error;
835
836 if (!capable(CAP_SYS_ADMIN))
837 return -EPERM;
838
f4507164 839 BUG_ON(nbd->magic != NBD_MAGIC);
1a2ad211 840
9561a7ad 841 mutex_lock(&nbd->config_lock);
f4507164 842 error = __nbd_ioctl(bdev, nbd, cmd, arg);
9561a7ad 843 mutex_unlock(&nbd->config_lock);
1a2ad211
PM
844
845 return error;
1da177e4
LT
846}
847
83d5cde4 848static const struct block_device_operations nbd_fops =
1da177e4
LT
849{
850 .owner = THIS_MODULE,
8a6cfeb6 851 .ioctl = nbd_ioctl,
263a3df1 852 .compat_ioctl = nbd_ioctl,
1da177e4
LT
853};
854
30d53d9c
MP
855#if IS_ENABLED(CONFIG_DEBUG_FS)
856
857static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
858{
859 struct nbd_device *nbd = s->private;
860
861 if (nbd->task_recv)
862 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
30d53d9c
MP
863
864 return 0;
865}
866
867static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
868{
869 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
870}
871
872static const struct file_operations nbd_dbg_tasks_ops = {
873 .open = nbd_dbg_tasks_open,
874 .read = seq_read,
875 .llseek = seq_lseek,
876 .release = single_release,
877};
878
879static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
880{
881 struct nbd_device *nbd = s->private;
882 u32 flags = nbd->flags;
883
884 seq_printf(s, "Hex: 0x%08x\n\n", flags);
885
886 seq_puts(s, "Known flags:\n");
887
888 if (flags & NBD_FLAG_HAS_FLAGS)
889 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
890 if (flags & NBD_FLAG_READ_ONLY)
891 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
892 if (flags & NBD_FLAG_SEND_FLUSH)
893 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
894 if (flags & NBD_FLAG_SEND_TRIM)
895 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
896
897 return 0;
898}
899
900static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
901{
902 return single_open(file, nbd_dbg_flags_show, inode->i_private);
903}
904
905static const struct file_operations nbd_dbg_flags_ops = {
906 .open = nbd_dbg_flags_open,
907 .read = seq_read,
908 .llseek = seq_lseek,
909 .release = single_release,
910};
911
912static int nbd_dev_dbg_init(struct nbd_device *nbd)
913{
914 struct dentry *dir;
27ea43fe
MP
915
916 if (!nbd_dbg_dir)
917 return -EIO;
30d53d9c
MP
918
919 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
27ea43fe
MP
920 if (!dir) {
921 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
922 nbd_name(nbd));
923 return -EIO;
30d53d9c
MP
924 }
925 nbd->dbg_dir = dir;
926
27ea43fe
MP
927 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
928 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
0eadf37a 929 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
ef77b515 930 debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
d366a0ff 931 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
30d53d9c
MP
932
933 return 0;
934}
935
936static void nbd_dev_dbg_close(struct nbd_device *nbd)
937{
938 debugfs_remove_recursive(nbd->dbg_dir);
939}
940
941static int nbd_dbg_init(void)
942{
943 struct dentry *dbg_dir;
944
945 dbg_dir = debugfs_create_dir("nbd", NULL);
27ea43fe
MP
946 if (!dbg_dir)
947 return -EIO;
30d53d9c
MP
948
949 nbd_dbg_dir = dbg_dir;
950
951 return 0;
952}
953
954static void nbd_dbg_close(void)
955{
956 debugfs_remove_recursive(nbd_dbg_dir);
957}
958
959#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
960
961static int nbd_dev_dbg_init(struct nbd_device *nbd)
962{
963 return 0;
964}
965
966static void nbd_dev_dbg_close(struct nbd_device *nbd)
967{
968}
969
970static int nbd_dbg_init(void)
971{
972 return 0;
973}
974
975static void nbd_dbg_close(void)
976{
977}
978
979#endif
980
fd8383fd
JB
981static int nbd_init_request(void *data, struct request *rq,
982 unsigned int hctx_idx, unsigned int request_idx,
983 unsigned int numa_node)
984{
985 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
fd8383fd 986 cmd->nbd = data;
fd8383fd
JB
987 return 0;
988}
989
990static struct blk_mq_ops nbd_mq_ops = {
991 .queue_rq = nbd_queue_rq,
fd8383fd 992 .init_request = nbd_init_request,
0eadf37a 993 .timeout = nbd_xmit_timeout,
fd8383fd
JB
994};
995
1da177e4
LT
996/*
997 * And here should be modules and kernel interface
998 * (Just smiley confuses emacs :-)
999 */
1000
1001static int __init nbd_init(void)
1002{
1003 int err = -ENOMEM;
1004 int i;
d71a6d73 1005 int part_shift;
1da177e4 1006
5b7b18cc 1007 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1da177e4 1008
d71a6d73 1009 if (max_part < 0) {
7742ce4a 1010 printk(KERN_ERR "nbd: max_part must be >= 0\n");
d71a6d73
LV
1011 return -EINVAL;
1012 }
1013
1014 part_shift = 0;
5988ce23 1015 if (max_part > 0) {
d71a6d73
LV
1016 part_shift = fls(max_part);
1017
5988ce23
NK
1018 /*
1019 * Adjust max_part according to part_shift as it is exported
1020 * to user space so that user can know the max number of
1021 * partition kernel should be able to manage.
1022 *
1023 * Note that -1 is required because partition 0 is reserved
1024 * for the whole disk.
1025 */
1026 max_part = (1UL << part_shift) - 1;
1027 }
1028
3b271082
NK
1029 if ((1UL << part_shift) > DISK_MAX_PARTS)
1030 return -EINVAL;
1031
1032 if (nbds_max > 1UL << (MINORBITS - part_shift))
1033 return -EINVAL;
124d6db0
JB
1034 recv_workqueue = alloc_workqueue("knbd-recv",
1035 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1036 if (!recv_workqueue)
1037 return -ENOMEM;
3b271082 1038
ff6b8090 1039 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
124d6db0
JB
1040 if (!nbd_dev) {
1041 destroy_workqueue(recv_workqueue);
ff6b8090 1042 return -ENOMEM;
124d6db0 1043 }
ff6b8090 1044
40be0c28 1045 for (i = 0; i < nbds_max; i++) {
25b4acfc 1046 struct request_queue *q;
d71a6d73 1047 struct gendisk *disk = alloc_disk(1 << part_shift);
1da177e4
LT
1048 if (!disk)
1049 goto out;
1050 nbd_dev[i].disk = disk;
fd8383fd
JB
1051
1052 nbd_dev[i].tag_set.ops = &nbd_mq_ops;
1053 nbd_dev[i].tag_set.nr_hw_queues = 1;
1054 nbd_dev[i].tag_set.queue_depth = 128;
1055 nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
1056 nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
1057 nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
005043ac 1058 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
fd8383fd
JB
1059 nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
1060
1061 err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
1062 if (err) {
1063 put_disk(disk);
1064 goto out;
1065 }
1066
1da177e4
LT
1067 /*
1068 * The new linux 2.5 block layer implementation requires
1069 * every gendisk to have its very own request_queue struct.
1070 * These structs are big so we dynamically allocate them.
1071 */
25b4acfc
JM
1072 q = blk_mq_init_queue(&nbd_dev[i].tag_set);
1073 if (IS_ERR(q)) {
fd8383fd 1074 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1da177e4
LT
1075 put_disk(disk);
1076 goto out;
1077 }
25b4acfc 1078 disk->queue = q;
fd8383fd 1079
31dcfab0
JA
1080 /*
1081 * Tell the block layer that we are not a rotational device
1082 */
1083 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
b277da0a 1084 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
a336d298 1085 disk->queue->limits.discard_granularity = 512;
2bb4cd5c 1086 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
a336d298 1087 disk->queue->limits.discard_zeroes_data = 0;
078be02b
MB
1088 blk_queue_max_hw_sectors(disk->queue, 65536);
1089 disk->queue->limits.max_sectors = 256;
1da177e4
LT
1090 }
1091
1092 if (register_blkdev(NBD_MAJOR, "nbd")) {
1093 err = -EIO;
1094 goto out;
1095 }
1096
1097 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
1da177e4 1098
30d53d9c
MP
1099 nbd_dbg_init();
1100
40be0c28 1101 for (i = 0; i < nbds_max; i++) {
1da177e4 1102 struct gendisk *disk = nbd_dev[i].disk;
f4507164 1103 nbd_dev[i].magic = NBD_MAGIC;
9561a7ad 1104 mutex_init(&nbd_dev[i].config_lock);
1da177e4 1105 disk->major = NBD_MAJOR;
d71a6d73 1106 disk->first_minor = i << part_shift;
1da177e4
LT
1107 disk->fops = &nbd_fops;
1108 disk->private_data = &nbd_dev[i];
1da177e4 1109 sprintf(disk->disk_name, "nbd%d", i);
9561a7ad 1110 init_waitqueue_head(&nbd_dev[i].recv_wq);
0e4f0f6f 1111 nbd_reset(&nbd_dev[i]);
1da177e4
LT
1112 add_disk(disk);
1113 }
1114
1115 return 0;
1116out:
1117 while (i--) {
fd8383fd 1118 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1da177e4
LT
1119 blk_cleanup_queue(nbd_dev[i].disk->queue);
1120 put_disk(nbd_dev[i].disk);
1121 }
f3944d61 1122 kfree(nbd_dev);
124d6db0 1123 destroy_workqueue(recv_workqueue);
1da177e4
LT
1124 return err;
1125}
1126
1127static void __exit nbd_cleanup(void)
1128{
1129 int i;
30d53d9c
MP
1130
1131 nbd_dbg_close();
1132
40be0c28 1133 for (i = 0; i < nbds_max; i++) {
1da177e4 1134 struct gendisk *disk = nbd_dev[i].disk;
40be0c28 1135 nbd_dev[i].magic = 0;
1da177e4
LT
1136 if (disk) {
1137 del_gendisk(disk);
1138 blk_cleanup_queue(disk->queue);
fd8383fd 1139 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1da177e4
LT
1140 put_disk(disk);
1141 }
1142 }
124d6db0 1143 destroy_workqueue(recv_workqueue);
1da177e4 1144 unregister_blkdev(NBD_MAJOR, "nbd");
f3944d61 1145 kfree(nbd_dev);
1da177e4
LT
1146 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1147}
1148
1149module_init(nbd_init);
1150module_exit(nbd_cleanup);
1151
1152MODULE_DESCRIPTION("Network Block Device");
1153MODULE_LICENSE("GPL");
1154
40be0c28 1155module_param(nbds_max, int, 0444);
d71a6d73
LV
1156MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1157module_param(max_part, int, 0444);
1158MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");