2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
29 #include "amdgpu_trace.h"
31 int amdgpu_job_alloc(struct amdgpu_device
*adev
, unsigned num_ibs
,
32 struct amdgpu_job
**job
)
34 size_t size
= sizeof(struct amdgpu_job
);
39 size
+= sizeof(struct amdgpu_ib
) * num_ibs
;
41 *job
= kzalloc(size
, GFP_KERNEL
);
46 (*job
)->ibs
= (void *)&(*job
)[1];
47 (*job
)->num_ibs
= num_ibs
;
49 amdgpu_sync_create(&(*job
)->sync
);
54 int amdgpu_job_alloc_with_ib(struct amdgpu_device
*adev
, unsigned size
,
55 struct amdgpu_job
**job
)
59 r
= amdgpu_job_alloc(adev
, 1, job
);
63 r
= amdgpu_ib_get(adev
, NULL
, size
, &(*job
)->ibs
[0]);
70 void amdgpu_job_free(struct amdgpu_job
*job
)
74 /* use sched fence if available */
75 f
= (job
->base
.s_fence
)? &job
->base
.s_fence
->base
: job
->fence
;
77 for (i
= 0; i
< job
->num_ibs
; ++i
)
78 amdgpu_sa_bo_free(job
->adev
, &job
->ibs
[i
].sa_bo
, f
);
79 fence_put(job
->fence
);
81 amdgpu_bo_unref(&job
->uf
.bo
);
82 amdgpu_sync_free(&job
->sync
);
86 int amdgpu_job_submit(struct amdgpu_job
*job
, struct amdgpu_ring
*ring
,
87 struct amd_sched_entity
*entity
, void *owner
,
97 r
= amd_sched_job_init(&job
->base
, &ring
->sched
, entity
, owner
, &fence
);
102 *f
= fence_get(fence
);
103 amd_sched_entity_push_job(&job
->base
);
108 static struct fence
*amdgpu_job_dependency(struct amd_sched_job
*sched_job
)
110 struct amdgpu_job
*job
= to_amdgpu_job(sched_job
);
111 struct amdgpu_vm
*vm
= job
->ibs
->vm
;
113 struct fence
*fence
= amdgpu_sync_get_fence(&job
->sync
);
115 if (fence
== NULL
&& vm
&& !job
->ibs
->vm_id
) {
116 struct amdgpu_ring
*ring
= job
->ring
;
121 r
= amdgpu_vm_grab_id(vm
, ring
, &job
->sync
,
122 &job
->base
.s_fence
->base
,
123 &vm_id
, &vm_pd_addr
);
125 DRM_ERROR("Error getting VM ID (%d)\n", r
);
127 for (i
= 0; i
< job
->num_ibs
; ++i
) {
128 job
->ibs
[i
].vm_id
= vm_id
;
129 job
->ibs
[i
].vm_pd_addr
= vm_pd_addr
;
133 fence
= amdgpu_sync_get_fence(&job
->sync
);
139 static struct fence
*amdgpu_job_run(struct amd_sched_job
*sched_job
)
141 struct fence
*fence
= NULL
;
142 struct amdgpu_job
*job
;
146 DRM_ERROR("job is null\n");
149 job
= to_amdgpu_job(sched_job
);
151 r
= amdgpu_sync_wait(&job
->sync
);
153 DRM_ERROR("failed to sync wait (%d)\n", r
);
157 trace_amdgpu_sched_run_job(job
);
158 r
= amdgpu_ib_schedule(job
->ring
, job
->num_ibs
, job
->ibs
,
159 job
->sync
.last_vm_update
, &fence
);
161 DRM_ERROR("Error scheduling IBs (%d)\n", r
);
167 amdgpu_job_free(job
);
171 struct amd_sched_backend_ops amdgpu_sched_ops
= {
172 .dependency
= amdgpu_job_dependency
,
173 .run_job
= amdgpu_job_run
,