1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018 Red Hat, Inc.
5 * This is a test "dust" device, which fails reads on specified
6 * sectors, emulating the behavior of a hard disk drive sending
7 * a "Read Medium Error" sense.
11 #include <linux/device-mapper.h>
12 #include <linux/module.h>
13 #include <linux/rbtree.h>
15 #define DM_MSG_PREFIX "dust"
24 struct rb_root badblocklist
;
25 unsigned long long badblock_count
;
28 unsigned int sect_per_block
;
30 bool fail_read_on_bb
:1;
34 static struct badblock
*dust_rb_search(struct rb_root
*root
, sector_t blk
)
36 struct rb_node
*node
= root
->rb_node
;
39 struct badblock
*bblk
= rb_entry(node
, struct badblock
, node
);
43 else if (bblk
->bb
< blk
)
44 node
= node
->rb_right
;
52 static bool dust_rb_insert(struct rb_root
*root
, struct badblock
*new)
54 struct badblock
*bblk
;
55 struct rb_node
**link
= &root
->rb_node
, *parent
= NULL
;
56 sector_t value
= new->bb
;
60 bblk
= rb_entry(parent
, struct badblock
, node
);
63 link
= &(*link
)->rb_left
;
64 else if (bblk
->bb
< value
)
65 link
= &(*link
)->rb_right
;
70 rb_link_node(&new->node
, parent
, link
);
71 rb_insert_color(&new->node
, root
);
76 static int dust_remove_block(struct dust_device
*dd
, unsigned long long block
)
78 struct badblock
*bblock
;
81 spin_lock_irqsave(&dd
->dust_lock
, flags
);
82 bblock
= dust_rb_search(&dd
->badblocklist
, block
* dd
->sect_per_block
);
85 if (!dd
->quiet_mode
) {
86 DMERR("%s: block %llu not found in badblocklist",
89 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
93 rb_erase(&bblock
->node
, &dd
->badblocklist
);
96 DMINFO("%s: badblock removed at block %llu", __func__
, block
);
98 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
103 static int dust_add_block(struct dust_device
*dd
, unsigned long long block
)
105 struct badblock
*bblock
;
108 bblock
= kmalloc(sizeof(*bblock
), GFP_KERNEL
);
109 if (bblock
== NULL
) {
111 DMERR("%s: badblock allocation failed", __func__
);
115 spin_lock_irqsave(&dd
->dust_lock
, flags
);
116 bblock
->bb
= block
* dd
->sect_per_block
;
117 if (!dust_rb_insert(&dd
->badblocklist
, bblock
)) {
118 if (!dd
->quiet_mode
) {
119 DMERR("%s: block %llu already in badblocklist",
122 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
127 dd
->badblock_count
++;
129 DMINFO("%s: badblock added at block %llu", __func__
, block
);
130 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
135 static int dust_query_block(struct dust_device
*dd
, unsigned long long block
)
137 struct badblock
*bblock
;
140 spin_lock_irqsave(&dd
->dust_lock
, flags
);
141 bblock
= dust_rb_search(&dd
->badblocklist
, block
* dd
->sect_per_block
);
143 DMINFO("%s: block %llu found in badblocklist", __func__
, block
);
145 DMINFO("%s: block %llu not found in badblocklist", __func__
, block
);
146 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
151 static int __dust_map_read(struct dust_device
*dd
, sector_t thisblock
)
153 struct badblock
*bblk
= dust_rb_search(&dd
->badblocklist
, thisblock
);
156 return DM_MAPIO_KILL
;
158 return DM_MAPIO_REMAPPED
;
161 static int dust_map_read(struct dust_device
*dd
, sector_t thisblock
,
162 bool fail_read_on_bb
)
165 int ret
= DM_MAPIO_REMAPPED
;
167 if (fail_read_on_bb
) {
168 spin_lock_irqsave(&dd
->dust_lock
, flags
);
169 ret
= __dust_map_read(dd
, thisblock
);
170 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
176 static void __dust_map_write(struct dust_device
*dd
, sector_t thisblock
)
178 struct badblock
*bblk
= dust_rb_search(&dd
->badblocklist
, thisblock
);
181 rb_erase(&bblk
->node
, &dd
->badblocklist
);
182 dd
->badblock_count
--;
184 if (!dd
->quiet_mode
) {
185 sector_div(thisblock
, dd
->sect_per_block
);
186 DMINFO("block %llu removed from badblocklist by write",
187 (unsigned long long)thisblock
);
192 static int dust_map_write(struct dust_device
*dd
, sector_t thisblock
,
193 bool fail_read_on_bb
)
197 if (fail_read_on_bb
) {
198 spin_lock_irqsave(&dd
->dust_lock
, flags
);
199 __dust_map_write(dd
, thisblock
);
200 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
203 return DM_MAPIO_REMAPPED
;
206 static int dust_map(struct dm_target
*ti
, struct bio
*bio
)
208 struct dust_device
*dd
= ti
->private;
211 bio_set_dev(bio
, dd
->dev
->bdev
);
212 bio
->bi_iter
.bi_sector
= dd
->start
+ dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
214 if (bio_data_dir(bio
) == READ
)
215 ret
= dust_map_read(dd
, bio
->bi_iter
.bi_sector
, dd
->fail_read_on_bb
);
217 ret
= dust_map_write(dd
, bio
->bi_iter
.bi_sector
, dd
->fail_read_on_bb
);
222 static bool __dust_clear_badblocks(struct rb_root
*tree
,
223 unsigned long long count
)
225 struct rb_node
*node
= NULL
, *nnode
= NULL
;
227 nnode
= rb_first(tree
);
235 nnode
= rb_next(node
);
236 rb_erase(node
, tree
);
241 BUG_ON(tree
->rb_node
!= NULL
);
246 static int dust_clear_badblocks(struct dust_device
*dd
)
249 struct rb_root badblocklist
;
250 unsigned long long badblock_count
;
252 spin_lock_irqsave(&dd
->dust_lock
, flags
);
253 badblocklist
= dd
->badblocklist
;
254 badblock_count
= dd
->badblock_count
;
255 dd
->badblocklist
= RB_ROOT
;
256 dd
->badblock_count
= 0;
257 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
259 if (!__dust_clear_badblocks(&badblocklist
, badblock_count
))
260 DMINFO("%s: no badblocks found", __func__
);
262 DMINFO("%s: badblocks cleared", __func__
);
270 * <device_path> <offset> <blksz>
272 * device_path: path to the block device
273 * offset: offset to data area from start of device_path
274 * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
276 static int dust_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
278 struct dust_device
*dd
;
279 unsigned long long tmp
;
282 unsigned int sect_per_block
;
283 sector_t DUST_MAX_BLKSZ_SECTORS
= 2097152;
284 sector_t max_block_sectors
= min(ti
->len
, DUST_MAX_BLKSZ_SECTORS
);
287 ti
->error
= "Invalid argument count";
291 if (kstrtouint(argv
[2], 10, &blksz
) || !blksz
) {
292 ti
->error
= "Invalid block size parameter";
297 ti
->error
= "Block size must be at least 512";
301 if (!is_power_of_2(blksz
)) {
302 ti
->error
= "Block size must be a power of 2";
306 if (to_sector(blksz
) > max_block_sectors
) {
307 ti
->error
= "Block size is too large";
311 sect_per_block
= (blksz
>> SECTOR_SHIFT
);
313 if (sscanf(argv
[1], "%llu%c", &tmp
, &dummy
) != 1 || tmp
!= (sector_t
)tmp
) {
314 ti
->error
= "Invalid device offset sector";
318 dd
= kzalloc(sizeof(struct dust_device
), GFP_KERNEL
);
320 ti
->error
= "Cannot allocate context";
324 if (dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &dd
->dev
)) {
325 ti
->error
= "Device lookup failed";
330 dd
->sect_per_block
= sect_per_block
;
335 * Whether to fail a read on a "bad" block.
336 * Defaults to false; enabled later by message.
338 dd
->fail_read_on_bb
= false;
341 * Initialize bad block list rbtree.
343 dd
->badblocklist
= RB_ROOT
;
344 dd
->badblock_count
= 0;
345 spin_lock_init(&dd
->dust_lock
);
347 dd
->quiet_mode
= false;
349 BUG_ON(dm_set_target_max_io_len(ti
, dd
->sect_per_block
) != 0);
351 ti
->num_discard_bios
= 1;
352 ti
->num_flush_bios
= 1;
358 static void dust_dtr(struct dm_target
*ti
)
360 struct dust_device
*dd
= ti
->private;
362 __dust_clear_badblocks(&dd
->badblocklist
, dd
->badblock_count
);
363 dm_put_device(ti
, dd
->dev
);
367 static int dust_message(struct dm_target
*ti
, unsigned int argc
, char **argv
,
368 char *result_buf
, unsigned int maxlen
)
370 struct dust_device
*dd
= ti
->private;
371 sector_t size
= i_size_read(dd
->dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
372 bool invalid_msg
= false;
373 int result
= -EINVAL
;
374 unsigned long long tmp
, block
;
379 if (!strcasecmp(argv
[0], "addbadblock") ||
380 !strcasecmp(argv
[0], "removebadblock") ||
381 !strcasecmp(argv
[0], "queryblock")) {
382 DMERR("%s requires an additional argument", argv
[0]);
383 } else if (!strcasecmp(argv
[0], "disable")) {
384 DMINFO("disabling read failures on bad sectors");
385 dd
->fail_read_on_bb
= false;
387 } else if (!strcasecmp(argv
[0], "enable")) {
388 DMINFO("enabling read failures on bad sectors");
389 dd
->fail_read_on_bb
= true;
391 } else if (!strcasecmp(argv
[0], "countbadblocks")) {
392 spin_lock_irqsave(&dd
->dust_lock
, flags
);
393 DMINFO("countbadblocks: %llu badblock(s) found",
395 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
397 } else if (!strcasecmp(argv
[0], "clearbadblocks")) {
398 result
= dust_clear_badblocks(dd
);
399 } else if (!strcasecmp(argv
[0], "quiet")) {
401 dd
->quiet_mode
= true;
403 dd
->quiet_mode
= false;
408 } else if (argc
== 2) {
409 if (sscanf(argv
[1], "%llu%c", &tmp
, &dummy
) != 1)
413 sector_div(size
, dd
->sect_per_block
);
415 DMERR("selected block value out of range");
419 if (!strcasecmp(argv
[0], "addbadblock"))
420 result
= dust_add_block(dd
, block
);
421 else if (!strcasecmp(argv
[0], "removebadblock"))
422 result
= dust_remove_block(dd
, block
);
423 else if (!strcasecmp(argv
[0], "queryblock"))
424 result
= dust_query_block(dd
, block
);
429 DMERR("invalid number of arguments '%d'", argc
);
432 DMERR("unrecognized message '%s' received", argv
[0]);
437 static void dust_status(struct dm_target
*ti
, status_type_t type
,
438 unsigned int status_flags
, char *result
, unsigned int maxlen
)
440 struct dust_device
*dd
= ti
->private;
444 case STATUSTYPE_INFO
:
445 DMEMIT("%s %s %s", dd
->dev
->name
,
446 dd
->fail_read_on_bb
? "fail_read_on_bad_block" : "bypass",
447 dd
->quiet_mode
? "quiet" : "verbose");
450 case STATUSTYPE_TABLE
:
451 DMEMIT("%s %llu %u", dd
->dev
->name
,
452 (unsigned long long)dd
->start
, dd
->blksz
);
457 static int dust_prepare_ioctl(struct dm_target
*ti
, struct block_device
**bdev
)
459 struct dust_device
*dd
= ti
->private;
460 struct dm_dev
*dev
= dd
->dev
;
465 * Only pass ioctls through if the device sizes match exactly.
468 ti
->len
!= i_size_read(dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
)
474 static int dust_iterate_devices(struct dm_target
*ti
, iterate_devices_callout_fn fn
,
477 struct dust_device
*dd
= ti
->private;
479 return fn(ti
, dd
->dev
, dd
->start
, ti
->len
, data
);
482 static struct target_type dust_target
= {
484 .version
= {1, 0, 0},
485 .module
= THIS_MODULE
,
488 .iterate_devices
= dust_iterate_devices
,
490 .message
= dust_message
,
491 .status
= dust_status
,
492 .prepare_ioctl
= dust_prepare_ioctl
,
495 static int __init
dm_dust_init(void)
497 int result
= dm_register_target(&dust_target
);
500 DMERR("dm_register_target failed %d", result
);
505 static void __exit
dm_dust_exit(void)
507 dm_unregister_target(&dust_target
);
510 module_init(dm_dust_init
);
511 module_exit(dm_dust_exit
);
513 MODULE_DESCRIPTION(DM_NAME
" dust test target");
514 MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
515 MODULE_LICENSE("GPL");