struct request *req = (struct request *)arg;
struct request_queue *q = req->q;
zvol_state_t *zv = q->queuedata;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = blk_rq_pos(req) << 9;
uint64_t size = blk_rq_bytes(req);
int error = 0;
dmu_tx_t *tx;
rl_t *rl;
- /*
- * Annotate this call path with a flag that indicates that it is
- * unsafe to use KM_SLEEP during memory allocations due to the
- * potential for a deadlock. KM_PUSHPAGE should be used instead.
- */
- ASSERT(!(current->flags & PF_NOFS));
- current->flags |= PF_NOFS;
-
if (req->cmd_flags & VDEV_REQ_FLUSH)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
* Some requests are just for flush and nothing else.
*/
if (size == 0) {
- blk_end_request(req, 0, size);
+ error = 0;
goto out;
}
if (error) {
dmu_tx_abort(tx);
zfs_range_unlock(rl);
- blk_end_request(req, -error, size);
goto out;
}
zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
- blk_end_request(req, -error, size);
out:
- current->flags &= ~PF_NOFS;
+ blk_end_request(req, -error, size);
+ spl_fstrans_unmark(cookie);
}
#ifdef HAVE_BLK_QUEUE_DISCARD
struct request *req = (struct request *)arg;
struct request_queue *q = req->q;
zvol_state_t *zv = q->queuedata;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t start = blk_rq_pos(req) << 9;
uint64_t end = start + blk_rq_bytes(req);
int error;
rl_t *rl;
- /*
- * Annotate this call path with a flag that indicates that it is
- * unsafe to use KM_SLEEP during memory allocations due to the
- * potential for a deadlock. KM_PUSHPAGE should be used instead.
- */
- ASSERT(!(current->flags & PF_NOFS));
- current->flags |= PF_NOFS;
-
if (end > zv->zv_volsize) {
- blk_end_request(req, -EIO, blk_rq_bytes(req));
+ error = EIO;
goto out;
}
end = P2ALIGN(end, zv->zv_volblocksize);
if (start >= end) {
- blk_end_request(req, 0, blk_rq_bytes(req));
+ error = 0;
goto out;
}
*/
zfs_range_unlock(rl);
-
- blk_end_request(req, -error, blk_rq_bytes(req));
out:
- current->flags &= ~PF_NOFS;
+ blk_end_request(req, -error, blk_rq_bytes(req));
+ spl_fstrans_unmark(cookie);
}
#endif /* HAVE_BLK_QUEUE_DISCARD */
struct request *req = (struct request *)arg;
struct request_queue *q = req->q;
zvol_state_t *zv = q->queuedata;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = blk_rq_pos(req) << 9;
uint64_t size = blk_rq_bytes(req);
int error;
rl_t *rl;
if (size == 0) {
- blk_end_request(req, 0, size);
- return;
+ error = 0;
+ goto out;
}
rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
if (error == ECKSUM)
error = SET_ERROR(EIO);
+out:
blk_end_request(req, -error, size);
+ spl_fstrans_unmark(cookie);
}
/*
ASSERT(zio != NULL);
ASSERT(size != 0);
- zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
+ zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_zilog = zv->zv_zilog;
zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
return (-SET_ERROR(ERESTARTSYS));
}
+ error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
+ if (error)
+ goto out_mutex;
+
/* lie and say we're read-only */
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
if (error)
zv->zv_volsize = volsize;
zv->zv_zilog = zil_open(os, zvol_get_data);
- VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
if (ro || dmu_objset_is_snapshot(os) ||
!spa_writeable(dmu_objset_spa(os))) {
set_disk_ro(zv->zv_disk, 1);
drop_mutex = 1;
}
- ASSERT3P(zv, !=, NULL);
- ASSERT3U(zv->zv_open_count, >, 0);
- zv->zv_open_count--;
- if (zv->zv_open_count == 0)
- zvol_last_close(zv);
+ if (zv->zv_open_count > 0) {
+ zv->zv_open_count--;
+ if (zv->zv_open_count == 0)
+ zvol_last_close(zv);
+ }
if (drop_mutex)
mutex_exit(&zvol_state_lock);
zvol_state_t *zv;
int error = 0;
- zv = kmem_zalloc(sizeof (zvol_state_t), KM_PUSHPAGE);
+ zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
spin_lock_init(&zv->zv_lock);
list_link_init(&zv->zv_next);
char *atp;
int error = 0;
- parent = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
+ parent = kmem_alloc(MAXPATHLEN, KM_SLEEP);
(void) strlcpy(parent, name, MAXPATHLEN);
if ((atp = strrchr(parent, '@')) != NULL) {
goto out;
}
- doi = kmem_alloc(sizeof (dmu_object_info_t), KM_PUSHPAGE);
+ doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
if (error)
set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
- blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
+ blk_queue_max_hw_sectors(zv->zv_queue, DMU_MAX_ACCESS / 512);
blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
oldnamelen = strlen(oldname);
newnamelen = strlen(newname);
- name = kmem_alloc(MAXNAMELEN, KM_PUSHPAGE);
+ name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
mutex_enter(&zvol_state_lock);