__bio_clone_fast(clone, bio);
- if (bio_integrity(bio)) {
- int r = bio_integrity_clone(clone, bio, GFP_NOIO);
+ if (unlikely(bio_integrity(bio) != NULL)) {
+ int r;
+
+ if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
+ !dm_target_passes_integrity(tio->ti->type))) {
+ DMWARN("%s: the target %s doesn't support integrity data.",
+ dm_device_name(tio->io->md),
+ tio->ti->type->name);
+ return -EIO;
+ }
+
+ r = bio_integrity_clone(clone, bio, GFP_NOIO);
if (r < 0)
return r;
}
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
- if (bio_integrity(bio))
+ if (unlikely(bio_integrity(bio) != NULL))
bio_integrity_trim(clone, 0, len);
return 0;
*/
static void __set_size(struct mapped_device *md, sector_t size)
{
+ lockdep_assert_held(&md->suspend_lock);
+
set_capacity(md->disk, size);
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
mutex_unlock(&md->type_lock);
}
-void dm_set_md_type(struct mapped_device *md, unsigned type)
+void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
{
BUG_ON(!mutex_is_locked(&md->type_lock));
md->type = type;
}
-unsigned dm_get_md_type(struct mapped_device *md)
+enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{
return md->type;
}
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
- unsigned type = dm_get_md_type(md);
+ enum dm_queue_mode type = dm_get_md_type(md);
switch (type) {
case DM_TYPE_REQUEST_BASED:
if (type == DM_TYPE_DAX_BIO_BASED)
queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
break;
+ case DM_TYPE_NONE:
+ WARN_ON_ONCE(true);
+ break;
}
return 0;
* If __dm_suspend returns 0, the device is completely quiescent
* now. There is no request-processing activity. All new requests
* are being added to md->deferred list.
- *
- * Caller must hold md->suspend_lock
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
unsigned suspend_flags, long task_state,
*/
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
+ else
+ pr_debug("%s: suspending with flush\n", dm_device_name(md));
/*
* This gets reverted if there's an error later and the targets
{
struct dm_table *map = NULL;
+ lockdep_assert_held(&md->suspend_lock);
+
if (md->internal_suspend_count++)
return; /* nested internal suspend */
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
-struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
+struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
unsigned integrity, unsigned per_io_data_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);