static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
int ring_id = workload->ring_id;
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > vgpu->ring_scan_buffer_size[ring_id]) {
+ if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
void *p;
/* realloc the new ring buffer if needed */
- p = krealloc(vgpu->ring_scan_buffer[ring_id], workload->rb_len,
+ p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
GFP_KERNEL);
if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- vgpu->ring_scan_buffer[ring_id] = p;
- vgpu->ring_scan_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[ring_id] = p;
+ s->ring_scan_buffer_size[ring_id] = workload->rb_len;
}
- shadow_ring_buffer_va = vgpu->ring_scan_buffer[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
clean_workloads(vgpu, ALL_ENGINES);
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- kfree(vgpu->ring_scan_buffer[i]);
- vgpu->ring_scan_buffer[i] = NULL;
- vgpu->ring_scan_buffer_size[i] = 0;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+
+ kfree(s->ring_scan_buffer[i]);
+ s->ring_scan_buffer[i] = NULL;
+ s->ring_scan_buffer_size[i] = 0;
}
}
#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
+ struct intel_vgpu_submission *s = &vgpu->submission;
enum intel_engine_id i;
struct intel_engine_cs *engine;
/* each ring has a shadow ring buffer until vgpu destroyed */
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- vgpu->ring_scan_buffer[i] =
+ s->ring_scan_buffer[i] =
kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
- if (!vgpu->ring_scan_buffer[i]) {
+ if (!s->ring_scan_buffer[i]) {
gvt_vgpu_err("fail to alloc ring scan buffer\n");
goto out;
}
- vgpu->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
+ s->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
}
return 0;
out:
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- if (vgpu->ring_scan_buffer_size[i]) {
- kfree(vgpu->ring_scan_buffer[i]);
- vgpu->ring_scan_buffer[i] = NULL;
- vgpu->ring_scan_buffer_size[i] = 0;
+ if (s->ring_scan_buffer_size[i]) {
+ kfree(s->ring_scan_buffer[i]);
+ s->ring_scan_buffer[i] = NULL;
+ s->ring_scan_buffer_size[i] = 0;
}
}
return -ENOMEM;
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+ /* 1/2K for each engine */
+ void *ring_scan_buffer[I915_NUM_ENGINES];
+ int ring_scan_buffer_size[I915_NUM_ENGINES];
};
struct intel_vgpu {
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
struct intel_vgpu_submission submission;
- /* 1/2K for each engine */
- void *ring_scan_buffer[I915_NUM_ENGINES];
- int ring_scan_buffer_size[I915_NUM_ENGINES];
-
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {