}
EXPORT_SYMBOL_GPL(dm_cell_release);
-/*
- * There are a couple of places where we put a bio into a cell briefly
- * before taking it out again. In these situations we know that no other
- * bio may be in the cell. This function releases the cell, and also does
- * a sanity check.
- */
-static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
- BUG_ON(cell->holder != bio);
- BUG_ON(!bio_list_empty(&cell->bios));
-
- __cell_release(cell, NULL);
-}
-
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
- unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
-
- spin_lock_irqsave(&prison->lock, flags);
- __cell_release_singleton(cell, bio);
- spin_unlock_irqrestore(&prison->lock, flags);
-}
-EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
-
/*
* Sometimes we don't want the holder, just the additional bios.
*/
}
/*
- * Same as cell_defer above, except it omits one particular detainee,
- * a write bio that covers the block and has already been processed.
+ * Same as cell_defer except it omits the original holder of the cell.
*/
static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
*/
build_data_key(tc->td, lookup_result.block, &key2);
if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
break;
}
* a block boundary. So we submit the discard of a
* partial block appropriately.
*/
- dm_cell_release_singleton(cell, bio);
- dm_cell_release_singleton(cell2, bio);
+ cell_defer_except(tc, cell);
+ cell_defer_except(tc, cell2);
if ((!lookup_result.shared) && pool->pf.discard_passdown)
remap_and_issue(tc, bio, lookup_result.block);
else
/*
* It isn't provisioned, just forget it.
*/
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
bio_endio(bio, 0);
break;
default:
DMERR("discard: find block unexpectedly returned %d", r);
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
bio_io_error(bio);
break;
}
h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
remap_and_issue(tc, bio, lookup_result->block);
}
}
* Remap empty bios (flushes) immediately, without provisioning.
*/
if (!bio->bi_size) {
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
remap_and_issue(tc, bio, 0);
return;
}
*/
if (bio_data_dir(bio) == READ) {
zero_fill_bio(bio);
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
bio_endio(bio, 0);
return;
}
* TODO: this will probably have to change when discard goes
* back in.
*/
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
if (lookup_result.shared)
process_shared_bio(tc, bio, block, &lookup_result);
case -ENODATA:
if (bio_data_dir(bio) == READ && tc->origin_dev) {
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
remap_to_origin_and_issue(tc, bio);
} else
provision_block(tc, bio, block, cell);
default:
DMERR("dm_thin_find_block() failed, error = %d", r);
- dm_cell_release_singleton(cell, bio);
+ cell_defer_except(tc, cell);
bio_io_error(bio);
break;
}