* by KVM (and thus needs a kernel virtual mapping).
* @gpa: guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
- * @dirty: mark the cache dirty immediately.
*
* @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary.
*/
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
- gpa_t gpa, unsigned long len, bool dirty);
+ gpa_t gpa, unsigned long len);
/**
* kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
* @gpc: struct gfn_to_pfn_cache object.
* @gpa: current guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
- * @dirty: mark the cache dirty immediately.
*
* @return: %true if the cache is still valid and the address matches.
* %false if the cache is not valid.
* @gpc: struct gfn_to_pfn_cache object.
* @gpa: updated guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
- * @dirty: mark the cache dirty immediately.
*
* @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary.
* with the lock still held to permit access.
*/
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- gpa_t gpa, unsigned long len, bool dirty);
+ gpa_t gpa, unsigned long len);
/**
* kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
*
- * This unmaps the referenced page and marks it dirty, if appropriate. The
- * cache is left in the invalid state but at least the mapping from GPA to
- * userspace HVA will remain cached and can be reused on a subsequent
- * refresh.
+ * This unmaps the referenced page. The cache is left in the invalid state
+ * but at least the mapping from GPA to userspace HVA will remain cached
+ * and can be reused on a subsequent refresh.
*/
void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
}
__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
}
-
- /*
- * We cannot call mark_page_dirty() from here because
- * this physical CPU might not have an active vCPU
- * with which to do the KVM dirty tracking.
- *
- * Neither is there any point in telling the kernel MM
- * that the underlying page is dirty. A vCPU in guest
- * mode might still be writing to it up to the point
- * where we wake them a few lines further down anyway.
- *
- * So all the dirty marking happens on the unmap.
- */
}
write_unlock_irq(&gpc->lock);
}
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
-static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva,
- gpa_t gpa, bool dirty)
+static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa)
{
/* Unmap the old page if it was mapped before, and release it */
if (!is_error_noslot_pfn(pfn)) {
#endif
}
- kvm_release_pfn(pfn, dirty);
- if (dirty)
- mark_page_dirty(kvm, gpa);
+ kvm_release_pfn(pfn, false);
}
}
}
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- gpa_t gpa, unsigned long len, bool dirty)
+ gpa_t gpa, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
unsigned long old_uhva;
gpa_t old_gpa;
void *old_khva;
- bool old_valid, old_dirty;
+ bool old_valid;
int ret = 0;
/*
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva;
old_valid = gpc->valid;
- old_dirty = gpc->dirty;
/* If the userspace HVA is invalid, refresh that first */
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);
- gpc->dirty = false;
gpc->gpa = gpa;
gpc->generation = slots->generation;
gpc->memslot = __gfn_to_memslot(slots, gfn);
}
out:
- if (ret)
- gpc->dirty = false;
- else
- gpc->dirty = dirty;
-
write_unlock_irq(&gpc->lock);
- __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
+ __release_gpc(kvm, old_pfn, old_khva, old_gpa);
return ret;
}
{
void *old_khva;
kvm_pfn_t old_pfn;
- bool old_dirty;
gpa_t old_gpa;
write_lock_irq(&gpc->lock);
gpc->valid = false;
old_khva = gpc->khva - offset_in_page(gpc->khva);
- old_dirty = gpc->dirty;
old_gpa = gpc->gpa;
old_pfn = gpc->pfn;
write_unlock_irq(&gpc->lock);
- __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
+ __release_gpc(kvm, old_pfn, old_khva, old_gpa);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
- gpa_t gpa, unsigned long len, bool dirty)
+ gpa_t gpa, unsigned long len)
{
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
list_add(&gpc->list, &kvm->gpc_list);
spin_unlock(&kvm->gpc_lock);
}
- return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty);
+ return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);