2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
29 #include "amdgpu_trace.h"
31 static void amdgpu_job_timedout(struct amd_sched_job
*s_job
)
33 struct amdgpu_job
*job
= container_of(s_job
, struct amdgpu_job
, base
);
35 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36 job
->base
.sched
->name
,
37 atomic_read(&job
->ring
->fence_drv
.last_seq
),
38 job
->ring
->fence_drv
.sync_seq
);
40 if (amdgpu_sriov_vf(job
->adev
))
41 amdgpu_sriov_gpu_reset(job
->adev
, job
);
43 amdgpu_gpu_reset(job
->adev
);
46 int amdgpu_job_alloc(struct amdgpu_device
*adev
, unsigned num_ibs
,
47 struct amdgpu_job
**job
, struct amdgpu_vm
*vm
)
49 size_t size
= sizeof(struct amdgpu_job
);
54 size
+= sizeof(struct amdgpu_ib
) * num_ibs
;
56 *job
= kzalloc(size
, GFP_KERNEL
);
62 (*job
)->ibs
= (void *)&(*job
)[1];
63 (*job
)->num_ibs
= num_ibs
;
65 amdgpu_sync_create(&(*job
)->sync
);
66 amdgpu_sync_create(&(*job
)->dep_sync
);
67 amdgpu_sync_create(&(*job
)->sched_sync
);
72 int amdgpu_job_alloc_with_ib(struct amdgpu_device
*adev
, unsigned size
,
73 struct amdgpu_job
**job
)
77 r
= amdgpu_job_alloc(adev
, 1, job
, NULL
);
81 r
= amdgpu_ib_get(adev
, NULL
, size
, &(*job
)->ibs
[0]);
85 (*job
)->vm_pd_addr
= adev
->gart
.table_addr
;
90 void amdgpu_job_free_resources(struct amdgpu_job
*job
)
95 /* use sched fence if available */
96 f
= job
->base
.s_fence
? &job
->base
.s_fence
->finished
: job
->fence
;
98 for (i
= 0; i
< job
->num_ibs
; ++i
)
99 amdgpu_ib_free(job
->adev
, &job
->ibs
[i
], f
);
102 static void amdgpu_job_free_cb(struct amd_sched_job
*s_job
)
104 struct amdgpu_job
*job
= container_of(s_job
, struct amdgpu_job
, base
);
106 dma_fence_put(job
->fence
);
107 amdgpu_sync_free(&job
->sync
);
108 amdgpu_sync_free(&job
->dep_sync
);
109 amdgpu_sync_free(&job
->sched_sync
);
113 void amdgpu_job_free(struct amdgpu_job
*job
)
115 amdgpu_job_free_resources(job
);
117 dma_fence_put(job
->fence
);
118 amdgpu_sync_free(&job
->sync
);
119 amdgpu_sync_free(&job
->dep_sync
);
120 amdgpu_sync_free(&job
->sched_sync
);
124 int amdgpu_job_submit(struct amdgpu_job
*job
, struct amdgpu_ring
*ring
,
125 struct amd_sched_entity
*entity
, void *owner
,
126 struct dma_fence
**f
)
134 r
= amd_sched_job_init(&job
->base
, &ring
->sched
, entity
, owner
);
139 job
->fence_ctx
= entity
->fence_context
;
140 *f
= dma_fence_get(&job
->base
.s_fence
->finished
);
141 amdgpu_job_free_resources(job
);
142 amd_sched_entity_push_job(&job
->base
);
147 static struct dma_fence
*amdgpu_job_dependency(struct amd_sched_job
*sched_job
)
149 struct amdgpu_job
*job
= to_amdgpu_job(sched_job
);
150 struct amdgpu_vm
*vm
= job
->vm
;
152 struct dma_fence
*fence
= amdgpu_sync_get_fence(&job
->dep_sync
);
155 if (amd_sched_dependency_optimized(fence
, sched_job
->s_entity
)) {
156 r
= amdgpu_sync_fence(job
->adev
, &job
->sched_sync
, fence
);
158 DRM_ERROR("Error adding fence to sync (%d)\n", r
);
161 fence
= amdgpu_sync_get_fence(&job
->sync
);
162 while (fence
== NULL
&& vm
&& !job
->vm_id
) {
163 struct amdgpu_ring
*ring
= job
->ring
;
165 r
= amdgpu_vm_grab_id(vm
, ring
, &job
->sync
,
166 &job
->base
.s_fence
->finished
,
169 DRM_ERROR("Error getting VM ID (%d)\n", r
);
171 fence
= amdgpu_sync_get_fence(&job
->sync
);
177 static struct dma_fence
*amdgpu_job_run(struct amd_sched_job
*sched_job
)
179 struct dma_fence
*fence
= NULL
;
180 struct amdgpu_job
*job
;
181 struct amdgpu_fpriv
*fpriv
= NULL
;
185 DRM_ERROR("job is null\n");
188 job
= to_amdgpu_job(sched_job
);
190 BUG_ON(amdgpu_sync_peek_fence(&job
->sync
, NULL
));
192 trace_amdgpu_sched_run_job(job
);
194 fpriv
= container_of(job
->vm
, struct amdgpu_fpriv
, vm
);
195 /* skip ib schedule when vram is lost */
196 if (fpriv
&& amdgpu_kms_vram_lost(job
->adev
, fpriv
))
197 DRM_ERROR("Skip scheduling IBs!\n");
199 r
= amdgpu_ib_schedule(job
->ring
, job
->num_ibs
, job
->ibs
, job
, &fence
);
201 DRM_ERROR("Error scheduling IBs (%d)\n", r
);
203 /* if gpu reset, hw fence will be replaced here */
204 dma_fence_put(job
->fence
);
205 job
->fence
= dma_fence_get(fence
);
206 amdgpu_job_free_resources(job
);
210 const struct amd_sched_backend_ops amdgpu_sched_ops
= {
211 .dependency
= amdgpu_job_dependency
,
212 .run_job
= amdgpu_job_run
,
213 .timedout_job
= amdgpu_job_timedout
,
214 .free_job
= amdgpu_job_free_cb