extern struct amd_sched_backend_ops amdgpu_sched_ops;
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ struct amdgpu_job **job);
+void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_ib *ibs,
unsigned nchunks;
struct amdgpu_cs_chunk *chunks;
- /* indirect buffers */
- uint32_t num_ibs;
- struct amdgpu_ib *ibs;
+ /* scheduler job object */
+ struct amdgpu_job *job;
/* buffer objects */
struct ww_acquire_ctx ticket;
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
uint32_t ib_idx, int idx)
{
- return p->ibs[ib_idx].ptr[idx];
+ return p->job->ibs[ib_idx].ptr[idx];
}
static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
uint32_t ib_idx, int idx,
uint32_t value)
{
- p->ibs[ib_idx].ptr[idx] = value;
+ p->job->ibs[ib_idx].ptr[idx] = value;
}
/*
uint64_t *chunk_array_user;
uint64_t *chunk_array;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned size;
+ unsigned size, num_ibs = 0;
int i;
int ret;
switch (p->chunks[i].chunk_id) {
case AMDGPU_CHUNK_ID_IB:
- p->num_ibs++;
+ ++num_ibs;
break;
case AMDGPU_CHUNK_ID_FENCE:
}
}
- if (p->num_ibs == 0) {
- ret = -EINVAL;
+ ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job);
+ if (ret)
goto free_all_kdata;
- }
-
- p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!p->ibs) {
- ret = -ENOMEM;
- goto free_all_kdata;
- }
kfree(chunk_array);
return 0;
list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv;
- r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp);
+ r = amdgpu_sync_resv(p->adev, &p->job->ibs[0].sync, resv, p->filp);
if (r)
return r;
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
- if (parser->ibs)
- for (i = 0; i < parser->num_ibs; i++)
- amdgpu_ib_free(parser->adev, &parser->ibs[i]);
- kfree(parser->ibs);
+ if (parser->job)
+ amdgpu_job_free(parser->job);
amdgpu_bo_unref(&parser->uf.bo);
amdgpu_bo_unref(&parser->uf_entry.robj);
}
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+ r = amdgpu_sync_fence(adev, &p->job->ibs[0].sync, vm->page_directory_fence);
if (r)
return r;
return r;
f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+ r = amdgpu_sync_fence(adev, &p->job->ibs[0].sync, f);
if (r)
return r;
}
}
- r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
+ r = amdgpu_vm_clear_invalids(adev, vm, &p->job->ibs[0].sync);
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
int i, r;
/* Only for UVD/VCE VM emulation */
- for (i = 0; i < parser->num_ibs; i++) {
- ring = parser->ibs[i].ring;
+ for (i = 0; i < parser->job->num_ibs; i++) {
+ ring = parser->job->ibs[i].ring;
if (ring->funcs->parse_cs) {
r = amdgpu_ring_parse_cs(ring, parser, i);
if (r)
int i, j;
int r;
- for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) {
+ for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
struct amdgpu_cs_chunk *chunk;
struct amdgpu_ib *ib;
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
struct amdgpu_ring *ring;
chunk = &parser->chunks[i];
- ib = &parser->ibs[j];
+ ib = &parser->job->ibs[j];
chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
struct amdgpu_bo *gds = parser->bo_list->gds_obj;
struct amdgpu_bo *gws = parser->bo_list->gws_obj;
struct amdgpu_bo *oa = parser->bo_list->oa_obj;
- struct amdgpu_ib *ib = &parser->ibs[0];
+ struct amdgpu_ib *ib = &parser->job->ibs[0];
if (gds) {
ib->gds_base = amdgpu_bo_gpu_offset(gds);
}
/* wrap the last IB with user fence */
if (parser->uf.bo) {
- struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
+ struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
/* UVD & VCE fw doesn't support user fences */
if (ib->ring->type == AMDGPU_RING_TYPE_UVD ||
int i, j, r;
/* Add dependencies to first IB */
- ib = &p->ibs[0];
+ ib = &p->job->ibs[0];
for (i = 0; i < p->nchunks; ++i) {
struct drm_amdgpu_cs_chunk_dep *deps;
struct amdgpu_cs_chunk *chunk;
static int amdgpu_cs_free_job(struct amdgpu_job *job)
{
- int i;
- if (job->ibs)
- for (i = 0; i < job->num_ibs; i++)
- amdgpu_ib_free(job->adev, &job->ibs[i]);
- kfree(job->ibs);
- if (job->uf.bo)
- amdgpu_bo_unref(&job->uf.bo);
+ amdgpu_job_free(job);
return 0;
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
- struct amdgpu_ring * ring = p->ibs->ring;
+ struct amdgpu_ring * ring = p->job->ibs->ring;
struct amd_sched_fence *fence;
struct amdgpu_job *job;
- job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
- if (!job)
- return -ENOMEM;
+ job = p->job;
+ p->job = NULL;
job->base.sched = &ring->sched;
job->base.s_entity = &p->ctx->rings[ring->idx].entity;
job->owner = p->filp;
job->free_job = amdgpu_cs_free_job;
- job->ibs = p->ibs;
- job->num_ibs = p->num_ibs;
- p->ibs = NULL;
- p->num_ibs = 0;
-
if (job->ibs[job->num_ibs - 1].user) {
job->uf = p->uf;
job->ibs[job->num_ibs - 1].user = &job->uf;
if (r)
goto out;
- for (i = 0; i < parser.num_ibs; i++)
+ for (i = 0; i < parser.job->num_ibs; i++)
trace_amdgpu_cs(&parser, i);
r = amdgpu_cs_ib_vm_chunk(adev, &parser);