2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
42 * Support for falling back on the write file operation when the address space
43 * operations write_begin is not available on the backing filesystem.
44 * Anton Altaparmakov, 16 Feb 2005
47 * - Advisory locking is ignored here.
48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/sched.h>
56 #include <linux/file.h>
57 #include <linux/stat.h>
58 #include <linux/errno.h>
59 #include <linux/major.h>
60 #include <linux/wait.h>
61 #include <linux/blkdev.h>
62 #include <linux/blkpg.h>
63 #include <linux/init.h>
64 #include <linux/swap.h>
65 #include <linux/slab.h>
66 #include <linux/compat.h>
67 #include <linux/suspend.h>
68 #include <linux/freezer.h>
69 #include <linux/mutex.h>
70 #include <linux/writeback.h>
71 #include <linux/completion.h>
72 #include <linux/highmem.h>
73 #include <linux/kthread.h>
74 #include <linux/splice.h>
75 #include <linux/sysfs.h>
76 #include <linux/miscdevice.h>
77 #include <linux/falloc.h>
78 #include <linux/uio.h>
81 #include <linux/uaccess.h>
83 static DEFINE_IDR(loop_index_idr
);
84 static DEFINE_MUTEX(loop_index_mutex
);
87 static int part_shift
;
89 static int transfer_xor(struct loop_device
*lo
, int cmd
,
90 struct page
*raw_page
, unsigned raw_off
,
91 struct page
*loop_page
, unsigned loop_off
,
92 int size
, sector_t real_block
)
94 char *raw_buf
= kmap_atomic(raw_page
) + raw_off
;
95 char *loop_buf
= kmap_atomic(loop_page
) + loop_off
;
107 key
= lo
->lo_encrypt_key
;
108 keysize
= lo
->lo_encrypt_key_size
;
109 for (i
= 0; i
< size
; i
++)
110 *out
++ = *in
++ ^ key
[(i
& 511) % keysize
];
112 kunmap_atomic(loop_buf
);
113 kunmap_atomic(raw_buf
);
118 static int xor_init(struct loop_device
*lo
, const struct loop_info64
*info
)
120 if (unlikely(info
->lo_encrypt_key_size
<= 0))
125 static struct loop_func_table none_funcs
= {
126 .number
= LO_CRYPT_NONE
,
129 static struct loop_func_table xor_funcs
= {
130 .number
= LO_CRYPT_XOR
,
131 .transfer
= transfer_xor
,
135 /* xfer_funcs[0] is special - its release function is never called */
136 static struct loop_func_table
*xfer_funcs
[MAX_LO_CRYPT
] = {
141 static loff_t
get_size(loff_t offset
, loff_t sizelimit
, struct file
*file
)
145 /* Compute loopsize in bytes */
146 loopsize
= i_size_read(file
->f_mapping
->host
);
149 /* offset is beyond i_size, weird but possible */
153 if (sizelimit
> 0 && sizelimit
< loopsize
)
154 loopsize
= sizelimit
;
156 * Unfortunately, if we want to do I/O on the device,
157 * the number of 512-byte sectors has to fit into a sector_t.
159 return loopsize
>> 9;
162 static loff_t
get_loop_size(struct loop_device
*lo
, struct file
*file
)
164 return get_size(lo
->lo_offset
, lo
->lo_sizelimit
, file
);
167 static void __loop_update_dio(struct loop_device
*lo
, bool dio
)
169 struct file
*file
= lo
->lo_backing_file
;
170 struct address_space
*mapping
= file
->f_mapping
;
171 struct inode
*inode
= mapping
->host
;
172 unsigned short sb_bsize
= 0;
173 unsigned dio_align
= 0;
176 if (inode
->i_sb
->s_bdev
) {
177 sb_bsize
= bdev_logical_block_size(inode
->i_sb
->s_bdev
);
178 dio_align
= sb_bsize
- 1;
182 * We support direct I/O only if lo_offset is aligned with the
183 * logical I/O size of backing device, and the logical block
184 * size of loop is bigger than the backing device's and the loop
185 * needn't transform transfer.
187 * TODO: the above condition may be loosed in the future, and
188 * direct I/O may be switched runtime at that time because most
189 * of requests in sane appplications should be PAGE_SIZE algined
192 if (queue_logical_block_size(lo
->lo_queue
) >= sb_bsize
&&
193 !(lo
->lo_offset
& dio_align
) &&
194 mapping
->a_ops
->direct_IO
&&
203 if (lo
->use_dio
== use_dio
)
206 /* flush dirty pages before changing direct IO */
210 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
211 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
212 * will get updated by ioctl(LOOP_GET_STATUS)
214 blk_mq_freeze_queue(lo
->lo_queue
);
215 lo
->use_dio
= use_dio
;
217 lo
->lo_flags
|= LO_FLAGS_DIRECT_IO
;
219 lo
->lo_flags
&= ~LO_FLAGS_DIRECT_IO
;
220 blk_mq_unfreeze_queue(lo
->lo_queue
);
224 figure_loop_size(struct loop_device
*lo
, loff_t offset
, loff_t sizelimit
)
226 loff_t size
= get_size(offset
, sizelimit
, lo
->lo_backing_file
);
227 sector_t x
= (sector_t
)size
;
228 struct block_device
*bdev
= lo
->lo_device
;
230 if (unlikely((loff_t
)x
!= size
))
232 if (lo
->lo_offset
!= offset
)
233 lo
->lo_offset
= offset
;
234 if (lo
->lo_sizelimit
!= sizelimit
)
235 lo
->lo_sizelimit
= sizelimit
;
236 set_capacity(lo
->lo_disk
, x
);
237 bd_set_size(bdev
, (loff_t
)get_capacity(bdev
->bd_disk
) << 9);
238 /* let user-space know about the new size */
239 kobject_uevent(&disk_to_dev(bdev
->bd_disk
)->kobj
, KOBJ_CHANGE
);
244 lo_do_transfer(struct loop_device
*lo
, int cmd
,
245 struct page
*rpage
, unsigned roffs
,
246 struct page
*lpage
, unsigned loffs
,
247 int size
, sector_t rblock
)
251 ret
= lo
->transfer(lo
, cmd
, rpage
, roffs
, lpage
, loffs
, size
, rblock
);
255 printk_ratelimited(KERN_ERR
256 "loop: Transfer error at byte offset %llu, length %i.\n",
257 (unsigned long long)rblock
<< 9, size
);
261 static int lo_write_bvec(struct file
*file
, struct bio_vec
*bvec
, loff_t
*ppos
)
266 iov_iter_bvec(&i
, ITER_BVEC
, bvec
, 1, bvec
->bv_len
);
268 file_start_write(file
);
269 bw
= vfs_iter_write(file
, &i
, ppos
);
270 file_end_write(file
);
272 if (likely(bw
== bvec
->bv_len
))
275 printk_ratelimited(KERN_ERR
276 "loop: Write error at byte offset %llu, length %i.\n",
277 (unsigned long long)*ppos
, bvec
->bv_len
);
283 static int lo_write_simple(struct loop_device
*lo
, struct request
*rq
,
287 struct req_iterator iter
;
290 rq_for_each_segment(bvec
, rq
, iter
) {
291 ret
= lo_write_bvec(lo
->lo_backing_file
, &bvec
, &pos
);
301 * This is the slow, transforming version that needs to double buffer the
302 * data as it cannot do the transformations in place without having direct
303 * access to the destination pages of the backing file.
305 static int lo_write_transfer(struct loop_device
*lo
, struct request
*rq
,
308 struct bio_vec bvec
, b
;
309 struct req_iterator iter
;
313 page
= alloc_page(GFP_NOIO
);
317 rq_for_each_segment(bvec
, rq
, iter
) {
318 ret
= lo_do_transfer(lo
, WRITE
, page
, 0, bvec
.bv_page
,
319 bvec
.bv_offset
, bvec
.bv_len
, pos
>> 9);
325 b
.bv_len
= bvec
.bv_len
;
326 ret
= lo_write_bvec(lo
->lo_backing_file
, &b
, &pos
);
335 static int lo_read_simple(struct loop_device
*lo
, struct request
*rq
,
339 struct req_iterator iter
;
343 rq_for_each_segment(bvec
, rq
, iter
) {
344 iov_iter_bvec(&i
, ITER_BVEC
, &bvec
, 1, bvec
.bv_len
);
345 len
= vfs_iter_read(lo
->lo_backing_file
, &i
, &pos
);
349 flush_dcache_page(bvec
.bv_page
);
351 if (len
!= bvec
.bv_len
) {
354 __rq_for_each_bio(bio
, rq
)
364 static int lo_read_transfer(struct loop_device
*lo
, struct request
*rq
,
367 struct bio_vec bvec
, b
;
368 struct req_iterator iter
;
374 page
= alloc_page(GFP_NOIO
);
378 rq_for_each_segment(bvec
, rq
, iter
) {
383 b
.bv_len
= bvec
.bv_len
;
385 iov_iter_bvec(&i
, ITER_BVEC
, &b
, 1, b
.bv_len
);
386 len
= vfs_iter_read(lo
->lo_backing_file
, &i
, &pos
);
392 ret
= lo_do_transfer(lo
, READ
, page
, 0, bvec
.bv_page
,
393 bvec
.bv_offset
, len
, offset
>> 9);
397 flush_dcache_page(bvec
.bv_page
);
399 if (len
!= bvec
.bv_len
) {
402 __rq_for_each_bio(bio
, rq
)
414 static int lo_discard(struct loop_device
*lo
, struct request
*rq
, loff_t pos
)
417 * We use punch hole to reclaim the free space used by the
418 * image a.k.a. discard. However we do not support discard if
419 * encryption is enabled, because it may give an attacker
420 * useful information.
422 struct file
*file
= lo
->lo_backing_file
;
423 int mode
= FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
;
426 if ((!file
->f_op
->fallocate
) || lo
->lo_encrypt_key_size
) {
431 ret
= file
->f_op
->fallocate(file
, mode
, pos
, blk_rq_bytes(rq
));
432 if (unlikely(ret
&& ret
!= -EINVAL
&& ret
!= -EOPNOTSUPP
))
438 static int lo_req_flush(struct loop_device
*lo
, struct request
*rq
)
440 struct file
*file
= lo
->lo_backing_file
;
441 int ret
= vfs_fsync(file
, 0);
442 if (unlikely(ret
&& ret
!= -EINVAL
))
448 static inline void handle_partial_read(struct loop_cmd
*cmd
, long bytes
)
450 if (bytes
< 0 || op_is_write(req_op(cmd
->rq
)))
453 if (unlikely(bytes
< blk_rq_bytes(cmd
->rq
))) {
454 struct bio
*bio
= cmd
->rq
->bio
;
456 bio_advance(bio
, bytes
);
461 static void lo_rw_aio_complete(struct kiocb
*iocb
, long ret
, long ret2
)
463 struct loop_cmd
*cmd
= container_of(iocb
, struct loop_cmd
, iocb
);
464 struct request
*rq
= cmd
->rq
;
466 handle_partial_read(cmd
, ret
);
473 blk_mq_complete_request(rq
, ret
);
476 static int lo_rw_aio(struct loop_device
*lo
, struct loop_cmd
*cmd
,
479 struct iov_iter iter
;
480 struct bio_vec
*bvec
;
481 struct bio
*bio
= cmd
->rq
->bio
;
482 struct file
*file
= lo
->lo_backing_file
;
485 /* nomerge for loop request queue */
486 WARN_ON(cmd
->rq
->bio
!= cmd
->rq
->biotail
);
488 bvec
= __bvec_iter_bvec(bio
->bi_io_vec
, bio
->bi_iter
);
489 iov_iter_bvec(&iter
, ITER_BVEC
| rw
, bvec
,
490 bio_segments(bio
), blk_rq_bytes(cmd
->rq
));
492 * This bio may be started from the middle of the 'bvec'
493 * because of bio splitting, so offset from the bvec must
494 * be passed to iov iterator
496 iter
.iov_offset
= bio
->bi_iter
.bi_bvec_done
;
498 cmd
->iocb
.ki_pos
= pos
;
499 cmd
->iocb
.ki_filp
= file
;
500 cmd
->iocb
.ki_complete
= lo_rw_aio_complete
;
501 cmd
->iocb
.ki_flags
= IOCB_DIRECT
;
504 ret
= file
->f_op
->write_iter(&cmd
->iocb
, &iter
);
506 ret
= file
->f_op
->read_iter(&cmd
->iocb
, &iter
);
508 if (ret
!= -EIOCBQUEUED
)
509 cmd
->iocb
.ki_complete(&cmd
->iocb
, ret
, 0);
513 static int do_req_filebacked(struct loop_device
*lo
, struct request
*rq
)
515 struct loop_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
516 loff_t pos
= ((loff_t
) blk_rq_pos(rq
) << 9) + lo
->lo_offset
;
519 * lo_write_simple and lo_read_simple should have been covered
520 * by io submit style function like lo_rw_aio(), one blocker
521 * is that lo_read_simple() need to call flush_dcache_page after
522 * the page is written from kernel, and it isn't easy to handle
523 * this in io submit style function which submits all segments
524 * of the req at one time. And direct read IO doesn't need to
525 * run flush_dcache_page().
527 switch (req_op(rq
)) {
529 return lo_req_flush(lo
, rq
);
531 return lo_discard(lo
, rq
, pos
);
534 return lo_write_transfer(lo
, rq
, pos
);
535 else if (cmd
->use_aio
)
536 return lo_rw_aio(lo
, cmd
, pos
, WRITE
);
538 return lo_write_simple(lo
, rq
, pos
);
541 return lo_read_transfer(lo
, rq
, pos
);
542 else if (cmd
->use_aio
)
543 return lo_rw_aio(lo
, cmd
, pos
, READ
);
545 return lo_read_simple(lo
, rq
, pos
);
553 struct switch_request
{
555 struct completion wait
;
558 static inline void loop_update_dio(struct loop_device
*lo
)
560 __loop_update_dio(lo
, io_is_direct(lo
->lo_backing_file
) |
565 * Do the actual switch; called from the BIO completion routine
567 static void do_loop_switch(struct loop_device
*lo
, struct switch_request
*p
)
569 struct file
*file
= p
->file
;
570 struct file
*old_file
= lo
->lo_backing_file
;
571 struct address_space
*mapping
;
573 /* if no new file, only flush of queued bios requested */
577 mapping
= file
->f_mapping
;
578 mapping_set_gfp_mask(old_file
->f_mapping
, lo
->old_gfp_mask
);
579 lo
->lo_backing_file
= file
;
580 lo
->lo_blocksize
= S_ISBLK(mapping
->host
->i_mode
) ?
581 mapping
->host
->i_bdev
->bd_block_size
: PAGE_SIZE
;
582 lo
->old_gfp_mask
= mapping_gfp_mask(mapping
);
583 mapping_set_gfp_mask(mapping
, lo
->old_gfp_mask
& ~(__GFP_IO
|__GFP_FS
));
588 * loop_switch performs the hard work of switching a backing store.
589 * First it needs to flush existing IO, it does this by sending a magic
590 * BIO down the pipe. The completion of this BIO does the actual switch.
592 static int loop_switch(struct loop_device
*lo
, struct file
*file
)
594 struct switch_request w
;
598 /* freeze queue and wait for completion of scheduled requests */
599 blk_mq_freeze_queue(lo
->lo_queue
);
601 /* do the switch action */
602 do_loop_switch(lo
, &w
);
605 blk_mq_unfreeze_queue(lo
->lo_queue
);
611 * Helper to flush the IOs in loop, but keeping loop thread running
613 static int loop_flush(struct loop_device
*lo
)
615 return loop_switch(lo
, NULL
);
618 static void loop_reread_partitions(struct loop_device
*lo
,
619 struct block_device
*bdev
)
624 * bd_mutex has been held already in release path, so don't
625 * acquire it if this function is called in such case.
627 * If the reread partition isn't from release path, lo_refcnt
628 * must be at least one and it can only become zero when the
629 * current holder is released.
631 if (!atomic_read(&lo
->lo_refcnt
))
632 rc
= __blkdev_reread_part(bdev
);
634 rc
= blkdev_reread_part(bdev
);
636 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
637 __func__
, lo
->lo_number
, lo
->lo_file_name
, rc
);
641 * loop_change_fd switched the backing store of a loopback device to
642 * a new file. This is useful for operating system installers to free up
643 * the original file and in High Availability environments to switch to
644 * an alternative location for the content in case of server meltdown.
645 * This can only work if the loop device is used read-only, and if the
646 * new backing store is the same size and type as the old backing store.
648 static int loop_change_fd(struct loop_device
*lo
, struct block_device
*bdev
,
651 struct file
*file
, *old_file
;
656 if (lo
->lo_state
!= Lo_bound
)
659 /* the loop device has to be read-only */
661 if (!(lo
->lo_flags
& LO_FLAGS_READ_ONLY
))
669 inode
= file
->f_mapping
->host
;
670 old_file
= lo
->lo_backing_file
;
674 if (!S_ISREG(inode
->i_mode
) && !S_ISBLK(inode
->i_mode
))
677 /* size of the new backing store needs to be the same */
678 if (get_loop_size(lo
, file
) != get_loop_size(lo
, old_file
))
682 error
= loop_switch(lo
, file
);
687 if (lo
->lo_flags
& LO_FLAGS_PARTSCAN
)
688 loop_reread_partitions(lo
, bdev
);
697 static inline int is_loop_device(struct file
*file
)
699 struct inode
*i
= file
->f_mapping
->host
;
701 return i
&& S_ISBLK(i
->i_mode
) && MAJOR(i
->i_rdev
) == LOOP_MAJOR
;
706 * no get/put for file.
708 struct file
*loop_backing_file(struct super_block
*sb
)
711 struct loop_device
*l
;
714 if (MAJOR(sb
->s_dev
) == LOOP_MAJOR
) {
715 l
= sb
->s_bdev
->bd_disk
->private_data
;
716 ret
= l
->lo_backing_file
;
720 EXPORT_SYMBOL_GPL(loop_backing_file
);
722 /* loop sysfs attributes */
724 static ssize_t
loop_attr_show(struct device
*dev
, char *page
,
725 ssize_t (*callback
)(struct loop_device
*, char *))
727 struct gendisk
*disk
= dev_to_disk(dev
);
728 struct loop_device
*lo
= disk
->private_data
;
730 return callback(lo
, page
);
733 #define LOOP_ATTR_RO(_name) \
734 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
735 static ssize_t loop_attr_do_show_##_name(struct device *d, \
736 struct device_attribute *attr, char *b) \
738 return loop_attr_show(d, b, loop_attr_##_name##_show); \
740 static struct device_attribute loop_attr_##_name = \
741 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
743 static ssize_t
loop_attr_backing_file_show(struct loop_device
*lo
, char *buf
)
748 spin_lock_irq(&lo
->lo_lock
);
749 if (lo
->lo_backing_file
)
750 p
= file_path(lo
->lo_backing_file
, buf
, PAGE_SIZE
- 1);
751 spin_unlock_irq(&lo
->lo_lock
);
753 if (IS_ERR_OR_NULL(p
))
757 memmove(buf
, p
, ret
);
765 static ssize_t
loop_attr_offset_show(struct loop_device
*lo
, char *buf
)
767 return sprintf(buf
, "%llu\n", (unsigned long long)lo
->lo_offset
);
770 static ssize_t
loop_attr_sizelimit_show(struct loop_device
*lo
, char *buf
)
772 return sprintf(buf
, "%llu\n", (unsigned long long)lo
->lo_sizelimit
);
775 static ssize_t
loop_attr_autoclear_show(struct loop_device
*lo
, char *buf
)
777 int autoclear
= (lo
->lo_flags
& LO_FLAGS_AUTOCLEAR
);
779 return sprintf(buf
, "%s\n", autoclear
? "1" : "0");
782 static ssize_t
loop_attr_partscan_show(struct loop_device
*lo
, char *buf
)
784 int partscan
= (lo
->lo_flags
& LO_FLAGS_PARTSCAN
);
786 return sprintf(buf
, "%s\n", partscan
? "1" : "0");
789 static ssize_t
loop_attr_dio_show(struct loop_device
*lo
, char *buf
)
791 int dio
= (lo
->lo_flags
& LO_FLAGS_DIRECT_IO
);
793 return sprintf(buf
, "%s\n", dio
? "1" : "0");
796 LOOP_ATTR_RO(backing_file
);
797 LOOP_ATTR_RO(offset
);
798 LOOP_ATTR_RO(sizelimit
);
799 LOOP_ATTR_RO(autoclear
);
800 LOOP_ATTR_RO(partscan
);
803 static struct attribute
*loop_attrs
[] = {
804 &loop_attr_backing_file
.attr
,
805 &loop_attr_offset
.attr
,
806 &loop_attr_sizelimit
.attr
,
807 &loop_attr_autoclear
.attr
,
808 &loop_attr_partscan
.attr
,
813 static struct attribute_group loop_attribute_group
= {
818 static int loop_sysfs_init(struct loop_device
*lo
)
820 return sysfs_create_group(&disk_to_dev(lo
->lo_disk
)->kobj
,
821 &loop_attribute_group
);
824 static void loop_sysfs_exit(struct loop_device
*lo
)
826 sysfs_remove_group(&disk_to_dev(lo
->lo_disk
)->kobj
,
827 &loop_attribute_group
);
830 static void loop_config_discard(struct loop_device
*lo
)
832 struct file
*file
= lo
->lo_backing_file
;
833 struct inode
*inode
= file
->f_mapping
->host
;
834 struct request_queue
*q
= lo
->lo_queue
;
837 * We use punch hole to reclaim the free space used by the
838 * image a.k.a. discard. However we do not support discard if
839 * encryption is enabled, because it may give an attacker
840 * useful information.
842 if ((!file
->f_op
->fallocate
) ||
843 lo
->lo_encrypt_key_size
) {
844 q
->limits
.discard_granularity
= 0;
845 q
->limits
.discard_alignment
= 0;
846 blk_queue_max_discard_sectors(q
, 0);
847 q
->limits
.discard_zeroes_data
= 0;
848 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, q
);
852 q
->limits
.discard_granularity
= inode
->i_sb
->s_blocksize
;
853 q
->limits
.discard_alignment
= 0;
854 blk_queue_max_discard_sectors(q
, UINT_MAX
>> 9);
855 q
->limits
.discard_zeroes_data
= 1;
856 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
859 static void loop_unprepare_queue(struct loop_device
*lo
)
861 kthread_flush_worker(&lo
->worker
);
862 kthread_stop(lo
->worker_task
);
865 static int loop_prepare_queue(struct loop_device
*lo
)
867 kthread_init_worker(&lo
->worker
);
868 lo
->worker_task
= kthread_run(kthread_worker_fn
,
869 &lo
->worker
, "loop%d", lo
->lo_number
);
870 if (IS_ERR(lo
->worker_task
))
872 set_user_nice(lo
->worker_task
, MIN_NICE
);
876 static int loop_set_fd(struct loop_device
*lo
, fmode_t mode
,
877 struct block_device
*bdev
, unsigned int arg
)
879 struct file
*file
, *f
;
881 struct address_space
*mapping
;
882 unsigned lo_blocksize
;
887 /* This is safe, since we have a reference from open(). */
888 __module_get(THIS_MODULE
);
896 if (lo
->lo_state
!= Lo_unbound
)
899 /* Avoid recursion */
901 while (is_loop_device(f
)) {
902 struct loop_device
*l
;
904 if (f
->f_mapping
->host
->i_bdev
== bdev
)
907 l
= f
->f_mapping
->host
->i_bdev
->bd_disk
->private_data
;
908 if (l
->lo_state
== Lo_unbound
) {
912 f
= l
->lo_backing_file
;
915 mapping
= file
->f_mapping
;
916 inode
= mapping
->host
;
919 if (!S_ISREG(inode
->i_mode
) && !S_ISBLK(inode
->i_mode
))
922 if (!(file
->f_mode
& FMODE_WRITE
) || !(mode
& FMODE_WRITE
) ||
923 !file
->f_op
->write_iter
)
924 lo_flags
|= LO_FLAGS_READ_ONLY
;
926 lo_blocksize
= S_ISBLK(inode
->i_mode
) ?
927 inode
->i_bdev
->bd_block_size
: PAGE_SIZE
;
930 size
= get_loop_size(lo
, file
);
931 if ((loff_t
)(sector_t
)size
!= size
)
933 error
= loop_prepare_queue(lo
);
939 set_device_ro(bdev
, (lo_flags
& LO_FLAGS_READ_ONLY
) != 0);
942 lo
->lo_blocksize
= lo_blocksize
;
943 lo
->lo_device
= bdev
;
944 lo
->lo_flags
= lo_flags
;
945 lo
->lo_backing_file
= file
;
948 lo
->lo_sizelimit
= 0;
949 lo
->old_gfp_mask
= mapping_gfp_mask(mapping
);
950 mapping_set_gfp_mask(mapping
, lo
->old_gfp_mask
& ~(__GFP_IO
|__GFP_FS
));
952 if (!(lo_flags
& LO_FLAGS_READ_ONLY
) && file
->f_op
->fsync
)
953 blk_queue_write_cache(lo
->lo_queue
, true, false);
956 set_capacity(lo
->lo_disk
, size
);
957 bd_set_size(bdev
, size
<< 9);
959 /* let user-space know about the new size */
960 kobject_uevent(&disk_to_dev(bdev
->bd_disk
)->kobj
, KOBJ_CHANGE
);
962 set_blocksize(bdev
, lo_blocksize
);
964 lo
->lo_state
= Lo_bound
;
966 lo
->lo_flags
|= LO_FLAGS_PARTSCAN
;
967 if (lo
->lo_flags
& LO_FLAGS_PARTSCAN
)
968 loop_reread_partitions(lo
, bdev
);
970 /* Grab the block_device to prevent its destruction after we
971 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
979 /* This is safe: open() is still holding a reference. */
980 module_put(THIS_MODULE
);
985 loop_release_xfer(struct loop_device
*lo
)
988 struct loop_func_table
*xfer
= lo
->lo_encryption
;
992 err
= xfer
->release(lo
);
994 lo
->lo_encryption
= NULL
;
995 module_put(xfer
->owner
);
1001 loop_init_xfer(struct loop_device
*lo
, struct loop_func_table
*xfer
,
1002 const struct loop_info64
*i
)
1007 struct module
*owner
= xfer
->owner
;
1009 if (!try_module_get(owner
))
1012 err
= xfer
->init(lo
, i
);
1016 lo
->lo_encryption
= xfer
;
1021 static int loop_clr_fd(struct loop_device
*lo
)
1023 struct file
*filp
= lo
->lo_backing_file
;
1024 gfp_t gfp
= lo
->old_gfp_mask
;
1025 struct block_device
*bdev
= lo
->lo_device
;
1027 if (lo
->lo_state
!= Lo_bound
)
1031 * If we've explicitly asked to tear down the loop device,
1032 * and it has an elevated reference count, set it for auto-teardown when
1033 * the last reference goes away. This stops $!~#$@ udev from
1034 * preventing teardown because it decided that it needs to run blkid on
1035 * the loopback device whenever they appear. xfstests is notorious for
1036 * failing tests because blkid via udev races with a losetup
1037 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1038 * command to fail with EBUSY.
1040 if (atomic_read(&lo
->lo_refcnt
) > 1) {
1041 lo
->lo_flags
|= LO_FLAGS_AUTOCLEAR
;
1042 mutex_unlock(&lo
->lo_ctl_mutex
);
1049 /* freeze request queue during the transition */
1050 blk_mq_freeze_queue(lo
->lo_queue
);
1052 spin_lock_irq(&lo
->lo_lock
);
1053 lo
->lo_state
= Lo_rundown
;
1054 lo
->lo_backing_file
= NULL
;
1055 spin_unlock_irq(&lo
->lo_lock
);
1057 loop_release_xfer(lo
);
1058 lo
->transfer
= NULL
;
1060 lo
->lo_device
= NULL
;
1061 lo
->lo_encryption
= NULL
;
1063 lo
->lo_sizelimit
= 0;
1064 lo
->lo_encrypt_key_size
= 0;
1065 memset(lo
->lo_encrypt_key
, 0, LO_KEY_SIZE
);
1066 memset(lo
->lo_crypt_name
, 0, LO_NAME_SIZE
);
1067 memset(lo
->lo_file_name
, 0, LO_NAME_SIZE
);
1070 invalidate_bdev(bdev
);
1072 set_capacity(lo
->lo_disk
, 0);
1073 loop_sysfs_exit(lo
);
1075 bd_set_size(bdev
, 0);
1076 /* let user-space know about this change */
1077 kobject_uevent(&disk_to_dev(bdev
->bd_disk
)->kobj
, KOBJ_CHANGE
);
1079 mapping_set_gfp_mask(filp
->f_mapping
, gfp
);
1080 lo
->lo_state
= Lo_unbound
;
1081 /* This is safe: open() is still holding a reference. */
1082 module_put(THIS_MODULE
);
1083 blk_mq_unfreeze_queue(lo
->lo_queue
);
1085 if (lo
->lo_flags
& LO_FLAGS_PARTSCAN
&& bdev
)
1086 loop_reread_partitions(lo
, bdev
);
1089 lo
->lo_disk
->flags
|= GENHD_FL_NO_PART_SCAN
;
1090 loop_unprepare_queue(lo
);
1091 mutex_unlock(&lo
->lo_ctl_mutex
);
1093 * Need not hold lo_ctl_mutex to fput backing file.
1094 * Calling fput holding lo_ctl_mutex triggers a circular
1095 * lock dependency possibility warning as fput can take
1096 * bd_mutex which is usually taken before lo_ctl_mutex.
1103 loop_set_status(struct loop_device
*lo
, const struct loop_info64
*info
)
1106 struct loop_func_table
*xfer
;
1107 kuid_t uid
= current_uid();
1109 if (lo
->lo_encrypt_key_size
&&
1110 !uid_eq(lo
->lo_key_owner
, uid
) &&
1111 !capable(CAP_SYS_ADMIN
))
1113 if (lo
->lo_state
!= Lo_bound
)
1115 if ((unsigned int) info
->lo_encrypt_key_size
> LO_KEY_SIZE
)
1118 err
= loop_release_xfer(lo
);
1122 if (info
->lo_encrypt_type
) {
1123 unsigned int type
= info
->lo_encrypt_type
;
1125 if (type
>= MAX_LO_CRYPT
)
1127 xfer
= xfer_funcs
[type
];
1133 err
= loop_init_xfer(lo
, xfer
, info
);
1137 if (lo
->lo_offset
!= info
->lo_offset
||
1138 lo
->lo_sizelimit
!= info
->lo_sizelimit
)
1139 if (figure_loop_size(lo
, info
->lo_offset
, info
->lo_sizelimit
))
1142 loop_config_discard(lo
);
1144 memcpy(lo
->lo_file_name
, info
->lo_file_name
, LO_NAME_SIZE
);
1145 memcpy(lo
->lo_crypt_name
, info
->lo_crypt_name
, LO_NAME_SIZE
);
1146 lo
->lo_file_name
[LO_NAME_SIZE
-1] = 0;
1147 lo
->lo_crypt_name
[LO_NAME_SIZE
-1] = 0;
1151 lo
->transfer
= xfer
->transfer
;
1152 lo
->ioctl
= xfer
->ioctl
;
1154 if ((lo
->lo_flags
& LO_FLAGS_AUTOCLEAR
) !=
1155 (info
->lo_flags
& LO_FLAGS_AUTOCLEAR
))
1156 lo
->lo_flags
^= LO_FLAGS_AUTOCLEAR
;
1158 if ((info
->lo_flags
& LO_FLAGS_PARTSCAN
) &&
1159 !(lo
->lo_flags
& LO_FLAGS_PARTSCAN
)) {
1160 lo
->lo_flags
|= LO_FLAGS_PARTSCAN
;
1161 lo
->lo_disk
->flags
&= ~GENHD_FL_NO_PART_SCAN
;
1162 loop_reread_partitions(lo
, lo
->lo_device
);
1165 lo
->lo_encrypt_key_size
= info
->lo_encrypt_key_size
;
1166 lo
->lo_init
[0] = info
->lo_init
[0];
1167 lo
->lo_init
[1] = info
->lo_init
[1];
1168 if (info
->lo_encrypt_key_size
) {
1169 memcpy(lo
->lo_encrypt_key
, info
->lo_encrypt_key
,
1170 info
->lo_encrypt_key_size
);
1171 lo
->lo_key_owner
= uid
;
1174 /* update dio if lo_offset or transfer is changed */
1175 __loop_update_dio(lo
, lo
->use_dio
);
1181 loop_get_status(struct loop_device
*lo
, struct loop_info64
*info
)
1183 struct file
*file
= lo
->lo_backing_file
;
1187 if (lo
->lo_state
!= Lo_bound
)
1189 error
= vfs_getattr(&file
->f_path
, &stat
);
1192 memset(info
, 0, sizeof(*info
));
1193 info
->lo_number
= lo
->lo_number
;
1194 info
->lo_device
= huge_encode_dev(stat
.dev
);
1195 info
->lo_inode
= stat
.ino
;
1196 info
->lo_rdevice
= huge_encode_dev(lo
->lo_device
? stat
.rdev
: stat
.dev
);
1197 info
->lo_offset
= lo
->lo_offset
;
1198 info
->lo_sizelimit
= lo
->lo_sizelimit
;
1199 info
->lo_flags
= lo
->lo_flags
;
1200 memcpy(info
->lo_file_name
, lo
->lo_file_name
, LO_NAME_SIZE
);
1201 memcpy(info
->lo_crypt_name
, lo
->lo_crypt_name
, LO_NAME_SIZE
);
1202 info
->lo_encrypt_type
=
1203 lo
->lo_encryption
? lo
->lo_encryption
->number
: 0;
1204 if (lo
->lo_encrypt_key_size
&& capable(CAP_SYS_ADMIN
)) {
1205 info
->lo_encrypt_key_size
= lo
->lo_encrypt_key_size
;
1206 memcpy(info
->lo_encrypt_key
, lo
->lo_encrypt_key
,
1207 lo
->lo_encrypt_key_size
);
1213 loop_info64_from_old(const struct loop_info
*info
, struct loop_info64
*info64
)
1215 memset(info64
, 0, sizeof(*info64
));
1216 info64
->lo_number
= info
->lo_number
;
1217 info64
->lo_device
= info
->lo_device
;
1218 info64
->lo_inode
= info
->lo_inode
;
1219 info64
->lo_rdevice
= info
->lo_rdevice
;
1220 info64
->lo_offset
= info
->lo_offset
;
1221 info64
->lo_sizelimit
= 0;
1222 info64
->lo_encrypt_type
= info
->lo_encrypt_type
;
1223 info64
->lo_encrypt_key_size
= info
->lo_encrypt_key_size
;
1224 info64
->lo_flags
= info
->lo_flags
;
1225 info64
->lo_init
[0] = info
->lo_init
[0];
1226 info64
->lo_init
[1] = info
->lo_init
[1];
1227 if (info
->lo_encrypt_type
== LO_CRYPT_CRYPTOAPI
)
1228 memcpy(info64
->lo_crypt_name
, info
->lo_name
, LO_NAME_SIZE
);
1230 memcpy(info64
->lo_file_name
, info
->lo_name
, LO_NAME_SIZE
);
1231 memcpy(info64
->lo_encrypt_key
, info
->lo_encrypt_key
, LO_KEY_SIZE
);
1235 loop_info64_to_old(const struct loop_info64
*info64
, struct loop_info
*info
)
1237 memset(info
, 0, sizeof(*info
));
1238 info
->lo_number
= info64
->lo_number
;
1239 info
->lo_device
= info64
->lo_device
;
1240 info
->lo_inode
= info64
->lo_inode
;
1241 info
->lo_rdevice
= info64
->lo_rdevice
;
1242 info
->lo_offset
= info64
->lo_offset
;
1243 info
->lo_encrypt_type
= info64
->lo_encrypt_type
;
1244 info
->lo_encrypt_key_size
= info64
->lo_encrypt_key_size
;
1245 info
->lo_flags
= info64
->lo_flags
;
1246 info
->lo_init
[0] = info64
->lo_init
[0];
1247 info
->lo_init
[1] = info64
->lo_init
[1];
1248 if (info
->lo_encrypt_type
== LO_CRYPT_CRYPTOAPI
)
1249 memcpy(info
->lo_name
, info64
->lo_crypt_name
, LO_NAME_SIZE
);
1251 memcpy(info
->lo_name
, info64
->lo_file_name
, LO_NAME_SIZE
);
1252 memcpy(info
->lo_encrypt_key
, info64
->lo_encrypt_key
, LO_KEY_SIZE
);
1254 /* error in case values were truncated */
1255 if (info
->lo_device
!= info64
->lo_device
||
1256 info
->lo_rdevice
!= info64
->lo_rdevice
||
1257 info
->lo_inode
!= info64
->lo_inode
||
1258 info
->lo_offset
!= info64
->lo_offset
)
1265 loop_set_status_old(struct loop_device
*lo
, const struct loop_info __user
*arg
)
1267 struct loop_info info
;
1268 struct loop_info64 info64
;
1270 if (copy_from_user(&info
, arg
, sizeof (struct loop_info
)))
1272 loop_info64_from_old(&info
, &info64
);
1273 return loop_set_status(lo
, &info64
);
1277 loop_set_status64(struct loop_device
*lo
, const struct loop_info64 __user
*arg
)
1279 struct loop_info64 info64
;
1281 if (copy_from_user(&info64
, arg
, sizeof (struct loop_info64
)))
1283 return loop_set_status(lo
, &info64
);
1287 loop_get_status_old(struct loop_device
*lo
, struct loop_info __user
*arg
) {
1288 struct loop_info info
;
1289 struct loop_info64 info64
;
1295 err
= loop_get_status(lo
, &info64
);
1297 err
= loop_info64_to_old(&info64
, &info
);
1298 if (!err
&& copy_to_user(arg
, &info
, sizeof(info
)))
1305 loop_get_status64(struct loop_device
*lo
, struct loop_info64 __user
*arg
) {
1306 struct loop_info64 info64
;
1312 err
= loop_get_status(lo
, &info64
);
1313 if (!err
&& copy_to_user(arg
, &info64
, sizeof(info64
)))
1319 static int loop_set_capacity(struct loop_device
*lo
, struct block_device
*bdev
)
1321 if (unlikely(lo
->lo_state
!= Lo_bound
))
1324 return figure_loop_size(lo
, lo
->lo_offset
, lo
->lo_sizelimit
);
1327 static int loop_set_dio(struct loop_device
*lo
, unsigned long arg
)
1330 if (lo
->lo_state
!= Lo_bound
)
1333 __loop_update_dio(lo
, !!arg
);
1334 if (lo
->use_dio
== !!arg
)
1341 static int lo_ioctl(struct block_device
*bdev
, fmode_t mode
,
1342 unsigned int cmd
, unsigned long arg
)
1344 struct loop_device
*lo
= bdev
->bd_disk
->private_data
;
1347 mutex_lock_nested(&lo
->lo_ctl_mutex
, 1);
1350 err
= loop_set_fd(lo
, mode
, bdev
, arg
);
1352 case LOOP_CHANGE_FD
:
1353 err
= loop_change_fd(lo
, bdev
, arg
);
1356 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1357 err
= loop_clr_fd(lo
);
1361 case LOOP_SET_STATUS
:
1363 if ((mode
& FMODE_WRITE
) || capable(CAP_SYS_ADMIN
))
1364 err
= loop_set_status_old(lo
,
1365 (struct loop_info __user
*)arg
);
1367 case LOOP_GET_STATUS
:
1368 err
= loop_get_status_old(lo
, (struct loop_info __user
*) arg
);
1370 case LOOP_SET_STATUS64
:
1372 if ((mode
& FMODE_WRITE
) || capable(CAP_SYS_ADMIN
))
1373 err
= loop_set_status64(lo
,
1374 (struct loop_info64 __user
*) arg
);
1376 case LOOP_GET_STATUS64
:
1377 err
= loop_get_status64(lo
, (struct loop_info64 __user
*) arg
);
1379 case LOOP_SET_CAPACITY
:
1381 if ((mode
& FMODE_WRITE
) || capable(CAP_SYS_ADMIN
))
1382 err
= loop_set_capacity(lo
, bdev
);
1384 case LOOP_SET_DIRECT_IO
:
1386 if ((mode
& FMODE_WRITE
) || capable(CAP_SYS_ADMIN
))
1387 err
= loop_set_dio(lo
, arg
);
1390 err
= lo
->ioctl
? lo
->ioctl(lo
, cmd
, arg
) : -EINVAL
;
1392 mutex_unlock(&lo
->lo_ctl_mutex
);
1398 #ifdef CONFIG_COMPAT
1399 struct compat_loop_info
{
1400 compat_int_t lo_number
; /* ioctl r/o */
1401 compat_dev_t lo_device
; /* ioctl r/o */
1402 compat_ulong_t lo_inode
; /* ioctl r/o */
1403 compat_dev_t lo_rdevice
; /* ioctl r/o */
1404 compat_int_t lo_offset
;
1405 compat_int_t lo_encrypt_type
;
1406 compat_int_t lo_encrypt_key_size
; /* ioctl w/o */
1407 compat_int_t lo_flags
; /* ioctl r/o */
1408 char lo_name
[LO_NAME_SIZE
];
1409 unsigned char lo_encrypt_key
[LO_KEY_SIZE
]; /* ioctl w/o */
1410 compat_ulong_t lo_init
[2];
1415 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1416 * - noinlined to reduce stack space usage in main part of driver
1419 loop_info64_from_compat(const struct compat_loop_info __user
*arg
,
1420 struct loop_info64
*info64
)
1422 struct compat_loop_info info
;
1424 if (copy_from_user(&info
, arg
, sizeof(info
)))
1427 memset(info64
, 0, sizeof(*info64
));
1428 info64
->lo_number
= info
.lo_number
;
1429 info64
->lo_device
= info
.lo_device
;
1430 info64
->lo_inode
= info
.lo_inode
;
1431 info64
->lo_rdevice
= info
.lo_rdevice
;
1432 info64
->lo_offset
= info
.lo_offset
;
1433 info64
->lo_sizelimit
= 0;
1434 info64
->lo_encrypt_type
= info
.lo_encrypt_type
;
1435 info64
->lo_encrypt_key_size
= info
.lo_encrypt_key_size
;
1436 info64
->lo_flags
= info
.lo_flags
;
1437 info64
->lo_init
[0] = info
.lo_init
[0];
1438 info64
->lo_init
[1] = info
.lo_init
[1];
1439 if (info
.lo_encrypt_type
== LO_CRYPT_CRYPTOAPI
)
1440 memcpy(info64
->lo_crypt_name
, info
.lo_name
, LO_NAME_SIZE
);
1442 memcpy(info64
->lo_file_name
, info
.lo_name
, LO_NAME_SIZE
);
1443 memcpy(info64
->lo_encrypt_key
, info
.lo_encrypt_key
, LO_KEY_SIZE
);
1448 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1449 * - noinlined to reduce stack space usage in main part of driver
1452 loop_info64_to_compat(const struct loop_info64
*info64
,
1453 struct compat_loop_info __user
*arg
)
1455 struct compat_loop_info info
;
1457 memset(&info
, 0, sizeof(info
));
1458 info
.lo_number
= info64
->lo_number
;
1459 info
.lo_device
= info64
->lo_device
;
1460 info
.lo_inode
= info64
->lo_inode
;
1461 info
.lo_rdevice
= info64
->lo_rdevice
;
1462 info
.lo_offset
= info64
->lo_offset
;
1463 info
.lo_encrypt_type
= info64
->lo_encrypt_type
;
1464 info
.lo_encrypt_key_size
= info64
->lo_encrypt_key_size
;
1465 info
.lo_flags
= info64
->lo_flags
;
1466 info
.lo_init
[0] = info64
->lo_init
[0];
1467 info
.lo_init
[1] = info64
->lo_init
[1];
1468 if (info
.lo_encrypt_type
== LO_CRYPT_CRYPTOAPI
)
1469 memcpy(info
.lo_name
, info64
->lo_crypt_name
, LO_NAME_SIZE
);
1471 memcpy(info
.lo_name
, info64
->lo_file_name
, LO_NAME_SIZE
);
1472 memcpy(info
.lo_encrypt_key
, info64
->lo_encrypt_key
, LO_KEY_SIZE
);
1474 /* error in case values were truncated */
1475 if (info
.lo_device
!= info64
->lo_device
||
1476 info
.lo_rdevice
!= info64
->lo_rdevice
||
1477 info
.lo_inode
!= info64
->lo_inode
||
1478 info
.lo_offset
!= info64
->lo_offset
||
1479 info
.lo_init
[0] != info64
->lo_init
[0] ||
1480 info
.lo_init
[1] != info64
->lo_init
[1])
1483 if (copy_to_user(arg
, &info
, sizeof(info
)))
1489 loop_set_status_compat(struct loop_device
*lo
,
1490 const struct compat_loop_info __user
*arg
)
1492 struct loop_info64 info64
;
1495 ret
= loop_info64_from_compat(arg
, &info64
);
1498 return loop_set_status(lo
, &info64
);
1502 loop_get_status_compat(struct loop_device
*lo
,
1503 struct compat_loop_info __user
*arg
)
1505 struct loop_info64 info64
;
1511 err
= loop_get_status(lo
, &info64
);
1513 err
= loop_info64_to_compat(&info64
, arg
);
1517 static int lo_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
1518 unsigned int cmd
, unsigned long arg
)
1520 struct loop_device
*lo
= bdev
->bd_disk
->private_data
;
1524 case LOOP_SET_STATUS
:
1525 mutex_lock(&lo
->lo_ctl_mutex
);
1526 err
= loop_set_status_compat(
1527 lo
, (const struct compat_loop_info __user
*) arg
);
1528 mutex_unlock(&lo
->lo_ctl_mutex
);
1530 case LOOP_GET_STATUS
:
1531 mutex_lock(&lo
->lo_ctl_mutex
);
1532 err
= loop_get_status_compat(
1533 lo
, (struct compat_loop_info __user
*) arg
);
1534 mutex_unlock(&lo
->lo_ctl_mutex
);
1536 case LOOP_SET_CAPACITY
:
1538 case LOOP_GET_STATUS64
:
1539 case LOOP_SET_STATUS64
:
1540 arg
= (unsigned long) compat_ptr(arg
);
1542 case LOOP_CHANGE_FD
:
1543 err
= lo_ioctl(bdev
, mode
, cmd
, arg
);
1553 static int lo_open(struct block_device
*bdev
, fmode_t mode
)
1555 struct loop_device
*lo
;
1558 mutex_lock(&loop_index_mutex
);
1559 lo
= bdev
->bd_disk
->private_data
;
1565 atomic_inc(&lo
->lo_refcnt
);
1567 mutex_unlock(&loop_index_mutex
);
1571 static void lo_release(struct gendisk
*disk
, fmode_t mode
)
1573 struct loop_device
*lo
= disk
->private_data
;
1576 if (atomic_dec_return(&lo
->lo_refcnt
))
1579 mutex_lock(&lo
->lo_ctl_mutex
);
1580 if (lo
->lo_flags
& LO_FLAGS_AUTOCLEAR
) {
1582 * In autoclear mode, stop the loop thread
1583 * and remove configuration after last close.
1585 err
= loop_clr_fd(lo
);
1590 * Otherwise keep thread (if running) and config,
1591 * but flush possible ongoing bios in thread.
1596 mutex_unlock(&lo
->lo_ctl_mutex
);
1599 static const struct block_device_operations lo_fops
= {
1600 .owner
= THIS_MODULE
,
1602 .release
= lo_release
,
1604 #ifdef CONFIG_COMPAT
1605 .compat_ioctl
= lo_compat_ioctl
,
1610 * And now the modules code and kernel interface.
1612 static int max_loop
;
1613 module_param(max_loop
, int, S_IRUGO
);
1614 MODULE_PARM_DESC(max_loop
, "Maximum number of loop devices");
1615 module_param(max_part
, int, S_IRUGO
);
1616 MODULE_PARM_DESC(max_part
, "Maximum number of partitions per loop device");
1617 MODULE_LICENSE("GPL");
1618 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR
);
1620 int loop_register_transfer(struct loop_func_table
*funcs
)
1622 unsigned int n
= funcs
->number
;
1624 if (n
>= MAX_LO_CRYPT
|| xfer_funcs
[n
])
1626 xfer_funcs
[n
] = funcs
;
1630 static int unregister_transfer_cb(int id
, void *ptr
, void *data
)
1632 struct loop_device
*lo
= ptr
;
1633 struct loop_func_table
*xfer
= data
;
1635 mutex_lock(&lo
->lo_ctl_mutex
);
1636 if (lo
->lo_encryption
== xfer
)
1637 loop_release_xfer(lo
);
1638 mutex_unlock(&lo
->lo_ctl_mutex
);
1642 int loop_unregister_transfer(int number
)
1644 unsigned int n
= number
;
1645 struct loop_func_table
*xfer
;
1647 if (n
== 0 || n
>= MAX_LO_CRYPT
|| (xfer
= xfer_funcs
[n
]) == NULL
)
1650 xfer_funcs
[n
] = NULL
;
1651 idr_for_each(&loop_index_idr
, &unregister_transfer_cb
, xfer
);
1655 EXPORT_SYMBOL(loop_register_transfer
);
1656 EXPORT_SYMBOL(loop_unregister_transfer
);
1658 static int loop_queue_rq(struct blk_mq_hw_ctx
*hctx
,
1659 const struct blk_mq_queue_data
*bd
)
1661 struct loop_cmd
*cmd
= blk_mq_rq_to_pdu(bd
->rq
);
1662 struct loop_device
*lo
= cmd
->rq
->q
->queuedata
;
1664 blk_mq_start_request(bd
->rq
);
1666 if (lo
->lo_state
!= Lo_bound
)
1667 return BLK_MQ_RQ_QUEUE_ERROR
;
1669 switch (req_op(cmd
->rq
)) {
1671 case REQ_OP_DISCARD
:
1672 cmd
->use_aio
= false;
1675 cmd
->use_aio
= lo
->use_dio
;
1679 kthread_queue_work(&lo
->worker
, &cmd
->work
);
1681 return BLK_MQ_RQ_QUEUE_OK
;
1684 static void loop_handle_cmd(struct loop_cmd
*cmd
)
1686 const bool write
= op_is_write(req_op(cmd
->rq
));
1687 struct loop_device
*lo
= cmd
->rq
->q
->queuedata
;
1690 if (write
&& (lo
->lo_flags
& LO_FLAGS_READ_ONLY
)) {
1695 ret
= do_req_filebacked(lo
, cmd
->rq
);
1697 /* complete non-aio request */
1698 if (!cmd
->use_aio
|| ret
)
1699 blk_mq_complete_request(cmd
->rq
, ret
? -EIO
: 0);
1702 static void loop_queue_work(struct kthread_work
*work
)
1704 struct loop_cmd
*cmd
=
1705 container_of(work
, struct loop_cmd
, work
);
1707 loop_handle_cmd(cmd
);
1710 static int loop_init_request(void *data
, struct request
*rq
,
1711 unsigned int hctx_idx
, unsigned int request_idx
,
1712 unsigned int numa_node
)
1714 struct loop_cmd
*cmd
= blk_mq_rq_to_pdu(rq
);
1717 kthread_init_work(&cmd
->work
, loop_queue_work
);
1722 static struct blk_mq_ops loop_mq_ops
= {
1723 .queue_rq
= loop_queue_rq
,
1724 .init_request
= loop_init_request
,
1727 static int loop_add(struct loop_device
**l
, int i
)
1729 struct loop_device
*lo
;
1730 struct gendisk
*disk
;
1734 lo
= kzalloc(sizeof(*lo
), GFP_KERNEL
);
1738 lo
->lo_state
= Lo_unbound
;
1740 /* allocate id, if @id >= 0, we're requesting that specific id */
1742 err
= idr_alloc(&loop_index_idr
, lo
, i
, i
+ 1, GFP_KERNEL
);
1746 err
= idr_alloc(&loop_index_idr
, lo
, 0, 0, GFP_KERNEL
);
1753 lo
->tag_set
.ops
= &loop_mq_ops
;
1754 lo
->tag_set
.nr_hw_queues
= 1;
1755 lo
->tag_set
.queue_depth
= 128;
1756 lo
->tag_set
.numa_node
= NUMA_NO_NODE
;
1757 lo
->tag_set
.cmd_size
= sizeof(struct loop_cmd
);
1758 lo
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
1759 lo
->tag_set
.driver_data
= lo
;
1761 err
= blk_mq_alloc_tag_set(&lo
->tag_set
);
1765 lo
->lo_queue
= blk_mq_init_queue(&lo
->tag_set
);
1766 if (IS_ERR_OR_NULL(lo
->lo_queue
)) {
1767 err
= PTR_ERR(lo
->lo_queue
);
1768 goto out_cleanup_tags
;
1770 lo
->lo_queue
->queuedata
= lo
;
1773 * It doesn't make sense to enable merge because the I/O
1774 * submitted to backing file is handled page by page.
1776 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES
, lo
->lo_queue
);
1779 disk
= lo
->lo_disk
= alloc_disk(1 << part_shift
);
1781 goto out_free_queue
;
1784 * Disable partition scanning by default. The in-kernel partition
1785 * scanning can be requested individually per-device during its
1786 * setup. Userspace can always add and remove partitions from all
1787 * devices. The needed partition minors are allocated from the
1788 * extended minor space, the main loop device numbers will continue
1789 * to match the loop minors, regardless of the number of partitions
1792 * If max_part is given, partition scanning is globally enabled for
1793 * all loop devices. The minors for the main loop devices will be
1794 * multiples of max_part.
1796 * Note: Global-for-all-devices, set-only-at-init, read-only module
1797 * parameteters like 'max_loop' and 'max_part' make things needlessly
1798 * complicated, are too static, inflexible and may surprise
1799 * userspace tools. Parameters like this in general should be avoided.
1802 disk
->flags
|= GENHD_FL_NO_PART_SCAN
;
1803 disk
->flags
|= GENHD_FL_EXT_DEVT
;
1804 mutex_init(&lo
->lo_ctl_mutex
);
1805 atomic_set(&lo
->lo_refcnt
, 0);
1807 spin_lock_init(&lo
->lo_lock
);
1808 disk
->major
= LOOP_MAJOR
;
1809 disk
->first_minor
= i
<< part_shift
;
1810 disk
->fops
= &lo_fops
;
1811 disk
->private_data
= lo
;
1812 disk
->queue
= lo
->lo_queue
;
1813 sprintf(disk
->disk_name
, "loop%d", i
);
1816 return lo
->lo_number
;
1819 blk_cleanup_queue(lo
->lo_queue
);
1821 blk_mq_free_tag_set(&lo
->tag_set
);
1823 idr_remove(&loop_index_idr
, i
);
1830 static void loop_remove(struct loop_device
*lo
)
1832 blk_cleanup_queue(lo
->lo_queue
);
1833 del_gendisk(lo
->lo_disk
);
1834 blk_mq_free_tag_set(&lo
->tag_set
);
1835 put_disk(lo
->lo_disk
);
1839 static int find_free_cb(int id
, void *ptr
, void *data
)
1841 struct loop_device
*lo
= ptr
;
1842 struct loop_device
**l
= data
;
1844 if (lo
->lo_state
== Lo_unbound
) {
1851 static int loop_lookup(struct loop_device
**l
, int i
)
1853 struct loop_device
*lo
;
1859 err
= idr_for_each(&loop_index_idr
, &find_free_cb
, &lo
);
1862 ret
= lo
->lo_number
;
1867 /* lookup and return a specific i */
1868 lo
= idr_find(&loop_index_idr
, i
);
1871 ret
= lo
->lo_number
;
1877 static struct kobject
*loop_probe(dev_t dev
, int *part
, void *data
)
1879 struct loop_device
*lo
;
1880 struct kobject
*kobj
;
1883 mutex_lock(&loop_index_mutex
);
1884 err
= loop_lookup(&lo
, MINOR(dev
) >> part_shift
);
1886 err
= loop_add(&lo
, MINOR(dev
) >> part_shift
);
1890 kobj
= get_disk(lo
->lo_disk
);
1891 mutex_unlock(&loop_index_mutex
);
1897 static long loop_control_ioctl(struct file
*file
, unsigned int cmd
,
1900 struct loop_device
*lo
;
1903 mutex_lock(&loop_index_mutex
);
1906 ret
= loop_lookup(&lo
, parm
);
1911 ret
= loop_add(&lo
, parm
);
1913 case LOOP_CTL_REMOVE
:
1914 ret
= loop_lookup(&lo
, parm
);
1917 mutex_lock(&lo
->lo_ctl_mutex
);
1918 if (lo
->lo_state
!= Lo_unbound
) {
1920 mutex_unlock(&lo
->lo_ctl_mutex
);
1923 if (atomic_read(&lo
->lo_refcnt
) > 0) {
1925 mutex_unlock(&lo
->lo_ctl_mutex
);
1928 lo
->lo_disk
->private_data
= NULL
;
1929 mutex_unlock(&lo
->lo_ctl_mutex
);
1930 idr_remove(&loop_index_idr
, lo
->lo_number
);
1933 case LOOP_CTL_GET_FREE
:
1934 ret
= loop_lookup(&lo
, -1);
1937 ret
= loop_add(&lo
, -1);
1939 mutex_unlock(&loop_index_mutex
);
1944 static const struct file_operations loop_ctl_fops
= {
1945 .open
= nonseekable_open
,
1946 .unlocked_ioctl
= loop_control_ioctl
,
1947 .compat_ioctl
= loop_control_ioctl
,
1948 .owner
= THIS_MODULE
,
1949 .llseek
= noop_llseek
,
1952 static struct miscdevice loop_misc
= {
1953 .minor
= LOOP_CTRL_MINOR
,
1954 .name
= "loop-control",
1955 .fops
= &loop_ctl_fops
,
1958 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR
);
1959 MODULE_ALIAS("devname:loop-control");
1961 static int __init
loop_init(void)
1964 unsigned long range
;
1965 struct loop_device
*lo
;
1968 err
= misc_register(&loop_misc
);
1974 part_shift
= fls(max_part
);
1977 * Adjust max_part according to part_shift as it is exported
1978 * to user space so that user can decide correct minor number
1979 * if [s]he want to create more devices.
1981 * Note that -1 is required because partition 0 is reserved
1982 * for the whole disk.
1984 max_part
= (1UL << part_shift
) - 1;
1987 if ((1UL << part_shift
) > DISK_MAX_PARTS
) {
1992 if (max_loop
> 1UL << (MINORBITS
- part_shift
)) {
1998 * If max_loop is specified, create that many devices upfront.
1999 * This also becomes a hard limit. If max_loop is not specified,
2000 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
2001 * init time. Loop devices can be requested on-demand with the
2002 * /dev/loop-control interface, or be instantiated by accessing
2003 * a 'dead' device node.
2007 range
= max_loop
<< part_shift
;
2009 nr
= CONFIG_BLK_DEV_LOOP_MIN_COUNT
;
2010 range
= 1UL << MINORBITS
;
2013 if (register_blkdev(LOOP_MAJOR
, "loop")) {
2018 blk_register_region(MKDEV(LOOP_MAJOR
, 0), range
,
2019 THIS_MODULE
, loop_probe
, NULL
, NULL
);
2021 /* pre-create number of devices given by config or max_loop */
2022 mutex_lock(&loop_index_mutex
);
2023 for (i
= 0; i
< nr
; i
++)
2025 mutex_unlock(&loop_index_mutex
);
2027 printk(KERN_INFO
"loop: module loaded\n");
2031 misc_deregister(&loop_misc
);
2035 static int loop_exit_cb(int id
, void *ptr
, void *data
)
2037 struct loop_device
*lo
= ptr
;
2043 static void __exit
loop_exit(void)
2045 unsigned long range
;
2047 range
= max_loop
? max_loop
<< part_shift
: 1UL << MINORBITS
;
2049 idr_for_each(&loop_index_idr
, &loop_exit_cb
, NULL
);
2050 idr_destroy(&loop_index_idr
);
2052 blk_unregister_region(MKDEV(LOOP_MAJOR
, 0), range
);
2053 unregister_blkdev(LOOP_MAJOR
, "loop");
2055 misc_deregister(&loop_misc
);
2058 module_init(loop_init
);
2059 module_exit(loop_exit
);
2062 static int __init
max_loop_setup(char *str
)
2064 max_loop
= simple_strtol(str
, NULL
, 0);
2068 __setup("max_loop=", max_loop_setup
);