2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
30 static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler
*sched
,
31 struct amd_context_entity
*c_entity
,
35 struct amdgpu_cs_parser
*sched_job
= (struct amdgpu_cs_parser
*)job
;
36 if (sched_job
->prepare_job
)
37 r
= sched_job
->prepare_job(sched_job
);
39 DRM_ERROR("Prepare job error\n");
40 schedule_work(&sched_job
->job_work
);
45 static void amdgpu_sched_run_job(struct amd_gpu_scheduler
*sched
,
46 struct amd_context_entity
*c_entity
,
50 struct amdgpu_cs_parser
*sched_job
= (struct amdgpu_cs_parser
*)job
;
52 mutex_lock(&sched_job
->job_lock
);
53 r
= amdgpu_ib_schedule(sched_job
->adev
,
59 if (sched_job
->run_job
) {
60 r
= sched_job
->run_job(sched_job
);
64 atomic64_set(&c_entity
->last_emitted_v_seq
,
65 sched_job
->ibs
[sched_job
->num_ibs
- 1].sequence
);
66 wake_up_all(&c_entity
->wait_emit
);
68 mutex_unlock(&sched_job
->job_lock
);
71 DRM_ERROR("Run job error\n");
72 mutex_unlock(&sched_job
->job_lock
);
73 schedule_work(&sched_job
->job_work
);
76 static void amdgpu_sched_process_job(struct amd_gpu_scheduler
*sched
, void *job
)
78 struct amdgpu_cs_parser
*sched_job
= NULL
;
79 struct amdgpu_fence
*fence
= NULL
;
80 struct amdgpu_ring
*ring
= NULL
;
81 struct amdgpu_device
*adev
= NULL
;
82 struct amd_context_entity
*c_entity
= NULL
;
86 sched_job
= (struct amdgpu_cs_parser
*)job
;
87 fence
= sched_job
->ibs
[sched_job
->num_ibs
- 1].fence
;
93 /* wake up users waiting for time stamp */
94 wake_up_all(&c_entity
->wait_queue
);
96 schedule_work(&sched_job
->job_work
);
99 struct amd_sched_backend_ops amdgpu_sched_ops
= {
100 .prepare_job
= amdgpu_sched_prepare_job
,
101 .run_job
= amdgpu_sched_run_job
,
102 .process_job
= amdgpu_sched_process_job
105 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device
*adev
,
106 struct amdgpu_ring
*ring
,
107 struct amdgpu_ib
*ibs
,
109 int (*free_job
)(struct amdgpu_cs_parser
*),
113 if (amdgpu_enable_scheduler
) {
115 struct amdgpu_cs_parser
*sched_job
=
116 amdgpu_cs_parser_create(adev
,
123 sched_job
->free_job
= free_job
;
124 v_seq
= atomic64_inc_return(&adev
->kernel_ctx
->rings
[ring
->idx
].c_entity
.last_queued_v_seq
);
125 ibs
[num_ibs
- 1].sequence
= v_seq
;
126 amd_sched_push_job(ring
->scheduler
,
127 &adev
->kernel_ctx
->rings
[ring
->idx
].c_entity
,
129 r
= amd_sched_wait_emit(
130 &adev
->kernel_ctx
->rings
[ring
->idx
].c_entity
,
135 WARN(true, "emit timeout\n");
137 r
= amdgpu_ib_schedule(adev
, 1, ibs
, owner
);